diff options
author | David S. Miller <davem@davemloft.net> | 2019-04-05 17:14:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-05 17:14:19 -0400 |
commit | f83f7151950dd9e0f6b4a1a405bf5e55c5294e4d (patch) | |
tree | f8d9d8ee821fcc9f0a8e1a8679bc622219c70e3b /drivers/net | |
parent | 8f4043f1253292495dbf9c8be0c1b07b4b9902b7 (diff) | |
parent | 7f46774c6480174eb869a3c15167eafac467a6af (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor comment merge conflict in mlx5.
Staging driver has a fixup due to the skb->xmit_more changes
in 'net-next', but was removed in 'net'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
57 files changed, 447 insertions, 317 deletions
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 2f120b2ffef0..4985268e2273 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c | |||
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count); | |||
55 | 55 | ||
56 | static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) | 56 | static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) |
57 | { | 57 | { |
58 | return sprintf(buf, "%pM\n", slave->perm_hwaddr); | 58 | return sprintf(buf, "%*phC\n", |
59 | slave->dev->addr_len, | ||
60 | slave->perm_hwaddr); | ||
59 | } | 61 | } |
60 | static SLAVE_ATTR_RO(perm_hwaddr); | 62 | static SLAVE_ATTR_RO(perm_hwaddr); |
61 | 63 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index dce84a2a65c7..c44b2822e4dd 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c | |||
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
427 | return 0; | 427 | return 0; |
428 | 428 | ||
429 | lane = mv88e6390x_serdes_get_lane(chip, port); | 429 | lane = mv88e6390x_serdes_get_lane(chip, port); |
430 | if (lane < 0) | 430 | if (lane < 0 && lane != -ENODEV) |
431 | return lane; | 431 | return lane; |
432 | 432 | ||
433 | if (chip->ports[port].serdes_irq) { | 433 | if (lane >= 0) { |
434 | err = mv88e6390_serdes_irq_disable(chip, port, lane); | 434 | if (chip->ports[port].serdes_irq) { |
435 | err = mv88e6390_serdes_irq_disable(chip, port, lane); | ||
436 | if (err) | ||
437 | return err; | ||
438 | } | ||
439 | |||
440 | err = mv88e6390x_serdes_power(chip, port, false); | ||
435 | if (err) | 441 | if (err) |
436 | return err; | 442 | return err; |
437 | } | 443 | } |
438 | 444 | ||
439 | err = mv88e6390x_serdes_power(chip, port, false); | 445 | chip->ports[port].cmode = 0; |
440 | if (err) | ||
441 | return err; | ||
442 | 446 | ||
443 | if (cmode) { | 447 | if (cmode) { |
444 | err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); | 448 | err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); |
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
452 | if (err) | 456 | if (err) |
453 | return err; | 457 | return err; |
454 | 458 | ||
459 | chip->ports[port].cmode = cmode; | ||
460 | |||
461 | lane = mv88e6390x_serdes_get_lane(chip, port); | ||
462 | if (lane < 0) | ||
463 | return lane; | ||
464 | |||
455 | err = mv88e6390x_serdes_power(chip, port, true); | 465 | err = mv88e6390x_serdes_power(chip, port, true); |
456 | if (err) | 466 | if (err) |
457 | return err; | 467 | return err; |
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
463 | } | 473 | } |
464 | } | 474 | } |
465 | 475 | ||
466 | chip->ports[port].cmode = cmode; | ||
467 | |||
468 | return 0; | 476 | return 0; |
469 | } | 477 | } |
470 | 478 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index aa2be4807191..28eac9056211 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev) | |||
1328 | struct nicvf_cq_poll *cq_poll = NULL; | 1328 | struct nicvf_cq_poll *cq_poll = NULL; |
1329 | union nic_mbx mbx = {}; | 1329 | union nic_mbx mbx = {}; |
1330 | 1330 | ||
1331 | cancel_delayed_work_sync(&nic->link_change_work); | ||
1332 | |||
1333 | /* wait till all queued set_rx_mode tasks completes */ | 1331 | /* wait till all queued set_rx_mode tasks completes */ |
1334 | drain_workqueue(nic->nicvf_rx_mode_wq); | 1332 | if (nic->nicvf_rx_mode_wq) { |
1333 | cancel_delayed_work_sync(&nic->link_change_work); | ||
1334 | drain_workqueue(nic->nicvf_rx_mode_wq); | ||
1335 | } | ||
1335 | 1336 | ||
1336 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; | 1337 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; |
1337 | nicvf_send_msg_to_pf(nic, &mbx); | 1338 | nicvf_send_msg_to_pf(nic, &mbx); |
@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev) | |||
1452 | struct nicvf_cq_poll *cq_poll = NULL; | 1453 | struct nicvf_cq_poll *cq_poll = NULL; |
1453 | 1454 | ||
1454 | /* wait till all queued set_rx_mode tasks completes if any */ | 1455 | /* wait till all queued set_rx_mode tasks completes if any */ |
1455 | drain_workqueue(nic->nicvf_rx_mode_wq); | 1456 | if (nic->nicvf_rx_mode_wq) |
1457 | drain_workqueue(nic->nicvf_rx_mode_wq); | ||
1456 | 1458 | ||
1457 | netif_carrier_off(netdev); | 1459 | netif_carrier_off(netdev); |
1458 | 1460 | ||
@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev) | |||
1550 | /* Send VF config done msg to PF */ | 1552 | /* Send VF config done msg to PF */ |
1551 | nicvf_send_cfg_done(nic); | 1553 | nicvf_send_cfg_done(nic); |
1552 | 1554 | ||
1553 | INIT_DELAYED_WORK(&nic->link_change_work, | 1555 | if (nic->nicvf_rx_mode_wq) { |
1554 | nicvf_link_status_check_task); | 1556 | INIT_DELAYED_WORK(&nic->link_change_work, |
1555 | queue_delayed_work(nic->nicvf_rx_mode_wq, | 1557 | nicvf_link_status_check_task); |
1556 | &nic->link_change_work, 0); | 1558 | queue_delayed_work(nic->nicvf_rx_mode_wq, |
1559 | &nic->link_change_work, 0); | ||
1560 | } | ||
1557 | 1561 | ||
1558 | return 0; | 1562 | return 0; |
1559 | cleanup: | 1563 | cleanup: |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 5b4d3badcb73..e246f9733bb8 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, | |||
105 | /* Check if page can be recycled */ | 105 | /* Check if page can be recycled */ |
106 | if (page) { | 106 | if (page) { |
107 | ref_count = page_ref_count(page); | 107 | ref_count = page_ref_count(page); |
108 | /* Check if this page has been used once i.e 'put_page' | 108 | /* This page can be recycled if internal ref_count and page's |
109 | * called after packet transmission i.e internal ref_count | 109 | * ref_count are equal, indicating that the page has been used |
110 | * and page's ref_count are equal i.e page can be recycled. | 110 | * once for packet transmission. For non-XDP mode, internal |
111 | * ref_count is always '1'. | ||
111 | */ | 112 | */ |
112 | if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) | 113 | if (rbdr->is_xdp) { |
113 | pgcache->ref_count--; | 114 | if (ref_count == pgcache->ref_count) |
114 | else | 115 | pgcache->ref_count--; |
115 | page = NULL; | 116 | else |
116 | 117 | page = NULL; | |
117 | /* In non-XDP mode, page's ref_count needs to be '1' for it | 118 | } else if (ref_count != 1) { |
118 | * to be recycled. | ||
119 | */ | ||
120 | if (!rbdr->is_xdp && (ref_count != 1)) | ||
121 | page = NULL; | 119 | page = NULL; |
120 | } | ||
122 | } | 121 | } |
123 | 122 | ||
124 | if (!page) { | 123 | if (!page) { |
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | |||
365 | while (head < rbdr->pgcnt) { | 364 | while (head < rbdr->pgcnt) { |
366 | pgcache = &rbdr->pgcache[head]; | 365 | pgcache = &rbdr->pgcache[head]; |
367 | if (pgcache->page && page_ref_count(pgcache->page) != 0) { | 366 | if (pgcache->page && page_ref_count(pgcache->page) != 0) { |
368 | if (!rbdr->is_xdp) { | 367 | if (rbdr->is_xdp) { |
369 | put_page(pgcache->page); | 368 | page_ref_sub(pgcache->page, |
370 | continue; | 369 | pgcache->ref_count - 1); |
371 | } | 370 | } |
372 | page_ref_sub(pgcache->page, pgcache->ref_count - 1); | ||
373 | put_page(pgcache->page); | 371 | put_page(pgcache->page); |
374 | } | 372 | } |
375 | head++; | 373 | head++; |
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 74849be5f004..e2919005ead3 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c | |||
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, | |||
354 | ppmax = max; | 354 | ppmax = max; |
355 | 355 | ||
356 | /* pool size must be multiple of unsigned long */ | 356 | /* pool size must be multiple of unsigned long */ |
357 | bmap = BITS_TO_LONGS(ppmax); | 357 | bmap = ppmax / BITS_PER_TYPE(unsigned long); |
358 | if (!bmap) | ||
359 | return NULL; | ||
360 | |||
358 | ppmax = (bmap * sizeof(unsigned long)) << 3; | 361 | ppmax = (bmap * sizeof(unsigned long)) << 3; |
359 | 362 | ||
360 | alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; | 363 | alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; |
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, | |||
402 | if (reserve_factor) { | 405 | if (reserve_factor) { |
403 | ppmax_pool = ppmax / reserve_factor; | 406 | ppmax_pool = ppmax / reserve_factor; |
404 | pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); | 407 | pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); |
408 | if (!pool) { | ||
409 | ppmax_pool = 0; | ||
410 | reserve_factor = 0; | ||
411 | } | ||
405 | 412 | ||
406 | pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", | 413 | pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", |
407 | ndev->name, ppmax, ppmax_pool, pool_index_max); | 414 | ndev->name, ppmax, ppmax_pool, pool_index_max); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 79d03f8ee7b1..c7fa97a7e1f4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c | |||
@@ -150,7 +150,6 @@ out_buffer_fail: | |||
150 | /* free desc along with its attached buffer */ | 150 | /* free desc along with its attached buffer */ |
151 | static void hnae_free_desc(struct hnae_ring *ring) | 151 | static void hnae_free_desc(struct hnae_ring *ring) |
152 | { | 152 | { |
153 | hnae_free_buffers(ring); | ||
154 | dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, | 153 | dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, |
155 | ring->desc_num * sizeof(ring->desc[0]), | 154 | ring->desc_num * sizeof(ring->desc[0]), |
156 | ring_to_dma_dir(ring)); | 155 | ring_to_dma_dir(ring)); |
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring) | |||
183 | /* fini ring, also free the buffer for the ring */ | 182 | /* fini ring, also free the buffer for the ring */ |
184 | static void hnae_fini_ring(struct hnae_ring *ring) | 183 | static void hnae_fini_ring(struct hnae_ring *ring) |
185 | { | 184 | { |
185 | if (is_rx_ring(ring)) | ||
186 | hnae_free_buffers(ring); | ||
187 | |||
186 | hnae_free_desc(ring); | 188 | hnae_free_desc(ring); |
187 | kfree(ring->desc_cb); | 189 | kfree(ring->desc_cb); |
188 | ring->desc_cb = NULL; | 190 | ring->desc_cb = NULL; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 08a750fb60c4..d6fb83437230 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
@@ -357,7 +357,7 @@ struct hnae_buf_ops { | |||
357 | }; | 357 | }; |
358 | 358 | ||
359 | struct hnae_queue { | 359 | struct hnae_queue { |
360 | void __iomem *io_base; | 360 | u8 __iomem *io_base; |
361 | phys_addr_t phy_base; | 361 | phys_addr_t phy_base; |
362 | struct hnae_ae_dev *dev; /* the device who use this queue */ | 362 | struct hnae_ae_dev *dev; /* the device who use this queue */ |
363 | struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; | 363 | struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a97228c93831..6c0507921623 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | |||
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn) | |||
370 | static void hns_mac_param_get(struct mac_params *param, | 370 | static void hns_mac_param_get(struct mac_params *param, |
371 | struct hns_mac_cb *mac_cb) | 371 | struct hns_mac_cb *mac_cb) |
372 | { | 372 | { |
373 | param->vaddr = (void *)mac_cb->vaddr; | 373 | param->vaddr = mac_cb->vaddr; |
374 | param->mac_mode = hns_get_enet_interface(mac_cb); | 374 | param->mac_mode = hns_get_enet_interface(mac_cb); |
375 | ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); | 375 | ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); |
376 | param->mac_id = mac_cb->mac_id; | 376 | param->mac_id = mac_cb->mac_id; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index fbc75341bef7..22589799f1a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | |||
@@ -187,7 +187,7 @@ struct mac_statistics { | |||
187 | /*mac para struct ,mac get param from nic or dsaf when initialize*/ | 187 | /*mac para struct ,mac get param from nic or dsaf when initialize*/ |
188 | struct mac_params { | 188 | struct mac_params { |
189 | char addr[ETH_ALEN]; | 189 | char addr[ETH_ALEN]; |
190 | void *vaddr; /*virtual address*/ | 190 | u8 __iomem *vaddr; /*virtual address*/ |
191 | struct device *dev; | 191 | struct device *dev; |
192 | u8 mac_id; | 192 | u8 mac_id; |
193 | /**< Ethernet operation mode (MAC-PHY interface and speed) */ | 193 | /**< Ethernet operation mode (MAC-PHY interface and speed) */ |
@@ -402,7 +402,7 @@ struct mac_driver { | |||
402 | enum mac_mode mac_mode; | 402 | enum mac_mode mac_mode; |
403 | u8 mac_id; | 403 | u8 mac_id; |
404 | struct hns_mac_cb *mac_cb; | 404 | struct hns_mac_cb *mac_cb; |
405 | void __iomem *io_base; | 405 | u8 __iomem *io_base; |
406 | unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ | 406 | unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ |
407 | unsigned int virt_dev_num; | 407 | unsigned int virt_dev_num; |
408 | struct device *dev; | 408 | struct device *dev; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ac55db065f16..61eea6ac846f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key( | |||
1602 | DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); | 1602 | DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); |
1603 | dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, | 1603 | dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, |
1604 | DSAF_TBL_TCAM_KEY_PORT_S, port); | 1604 | DSAF_TBL_TCAM_KEY_PORT_S, port); |
1605 | |||
1606 | mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan); | ||
1607 | } | 1605 | } |
1608 | 1606 | ||
1609 | /** | 1607 | /** |
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry( | |||
1663 | /* default config dvc to 0 */ | 1661 | /* default config dvc to 0 */ |
1664 | mac_data.tbl_ucast_dvc = 0; | 1662 | mac_data.tbl_ucast_dvc = 0; |
1665 | mac_data.tbl_ucast_out_port = mac_entry->port_num; | 1663 | mac_data.tbl_ucast_out_port = mac_entry->port_num; |
1666 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 1664 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
1667 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 1665 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
1668 | 1666 | ||
1669 | hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); | 1667 | hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); |
1670 | 1668 | ||
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
1786 | 0xff, | 1784 | 0xff, |
1787 | mc_mask); | 1785 | mc_mask); |
1788 | 1786 | ||
1789 | mask_key.high.val = le32_to_cpu(mask_key.high.val); | ||
1790 | mask_key.low.val = le32_to_cpu(mask_key.low.val); | ||
1791 | |||
1792 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); | 1787 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); |
1793 | } | 1788 | } |
1794 | 1789 | ||
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
1840 | dsaf_dev->ae_dev.name, mac_key.high.val, | 1835 | dsaf_dev->ae_dev.name, mac_key.high.val, |
1841 | mac_key.low.val, entry_index); | 1836 | mac_key.low.val, entry_index); |
1842 | 1837 | ||
1843 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 1838 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
1844 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 1839 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
1845 | 1840 | ||
1846 | /* config mc entry with mask */ | 1841 | /* config mc entry with mask */ |
1847 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, | 1842 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, |
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
1956 | /* config key mask */ | 1951 | /* config key mask */ |
1957 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); | 1952 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); |
1958 | 1953 | ||
1959 | mask_key.high.val = le32_to_cpu(mask_key.high.val); | ||
1960 | mask_key.low.val = le32_to_cpu(mask_key.low.val); | ||
1961 | |||
1962 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); | 1954 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); |
1963 | } | 1955 | } |
1964 | 1956 | ||
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
2012 | soft_mac_entry += entry_index; | 2004 | soft_mac_entry += entry_index; |
2013 | soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; | 2005 | soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; |
2014 | } else { /* not zero, just del port, update */ | 2006 | } else { /* not zero, just del port, update */ |
2015 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 2007 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
2016 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 2008 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
2017 | 2009 | ||
2018 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, | 2010 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, |
2019 | &tcam_data, | 2011 | &tcam_data, |
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void) | |||
2750 | return DSAF_DUMP_REGS_NUM; | 2742 | return DSAF_DUMP_REGS_NUM; |
2751 | } | 2743 | } |
2752 | 2744 | ||
2745 | static int hns_dsaf_get_port_id(u8 port) | ||
2746 | { | ||
2747 | if (port < DSAF_SERVICE_NW_NUM) | ||
2748 | return port; | ||
2749 | |||
2750 | if (port >= DSAF_BASE_INNER_PORT_NUM) | ||
2751 | return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; | ||
2752 | |||
2753 | return -EINVAL; | ||
2754 | } | ||
2755 | |||
2753 | static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) | 2756 | static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) |
2754 | { | 2757 | { |
2755 | struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; | 2758 | struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; |
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) | |||
2815 | memset(&temp_key, 0x0, sizeof(temp_key)); | 2818 | memset(&temp_key, 0x0, sizeof(temp_key)); |
2816 | mask_entry.addr[0] = 0x01; | 2819 | mask_entry.addr[0] = 0x01; |
2817 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, | 2820 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, |
2818 | port, mask_entry.addr); | 2821 | 0xf, mask_entry.addr); |
2819 | tbl_tcam_mcast.tbl_mcast_item_vld = 1; | 2822 | tbl_tcam_mcast.tbl_mcast_item_vld = 1; |
2820 | tbl_tcam_mcast.tbl_mcast_old_en = 0; | 2823 | tbl_tcam_mcast.tbl_mcast_old_en = 0; |
2821 | 2824 | ||
2822 | if (port < DSAF_SERVICE_NW_NUM) { | 2825 | /* set MAC port to handle multicast */ |
2823 | mskid = port; | 2826 | mskid = hns_dsaf_get_port_id(port); |
2824 | } else if (port >= DSAF_BASE_INNER_PORT_NUM) { | 2827 | if (mskid == -EINVAL) { |
2825 | mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; | ||
2826 | } else { | ||
2827 | dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", | 2828 | dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", |
2828 | dsaf_dev->ae_dev.name, port, | 2829 | dsaf_dev->ae_dev.name, port, |
2829 | mask_key.high.val, mask_key.low.val); | 2830 | mask_key.high.val, mask_key.low.val); |
2830 | return; | 2831 | return; |
2831 | } | 2832 | } |
2833 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], | ||
2834 | mskid % 32, 1); | ||
2832 | 2835 | ||
2836 | /* set pool bit map to handle multicast */ | ||
2837 | mskid = hns_dsaf_get_port_id(port_num); | ||
2838 | if (mskid == -EINVAL) { | ||
2839 | dev_err(dsaf_dev->dev, | ||
2840 | "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n", | ||
2841 | dsaf_dev->ae_dev.name, port_num, | ||
2842 | mask_key.high.val, mask_key.low.val); | ||
2843 | return; | ||
2844 | } | ||
2833 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], | 2845 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], |
2834 | mskid % 32, 1); | 2846 | mskid % 32, 1); |
2847 | |||
2835 | memcpy(&temp_key, &mask_key, sizeof(mask_key)); | 2848 | memcpy(&temp_key, &mask_key, sizeof(mask_key)); |
2836 | hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, | 2849 | hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, |
2837 | (struct dsaf_tbl_tcam_data *)(&mask_key), | 2850 | (struct dsaf_tbl_tcam_data *)(&mask_key), |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 0e1cd99831a6..76cc8887e1a8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | |||
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
467 | u8 mac_id, u8 port_num); | 467 | u8 mac_id, u8 port_num); |
468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); | 468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); |
469 | 469 | ||
470 | int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); | ||
471 | |||
470 | #endif /* __HNS_DSAF_MAIN_H__ */ | 472 | #endif /* __HNS_DSAF_MAIN_H__ */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 16294cd3c954..19b94879691f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c | |||
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) | |||
670 | dsaf_set_field(origin, 1ull << 10, 10, en); | 670 | dsaf_set_field(origin, 1ull << 10, 10, en); |
671 | dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); | 671 | dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); |
672 | } else { | 672 | } else { |
673 | u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + | 673 | u8 __iomem *base_addr = mac_cb->serdes_vaddr + |
674 | (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); | 674 | (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); |
675 | dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); | 675 | dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); |
676 | } | 676 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 3d07c8a7639d..17c019106e6e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, | |||
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
64 | static void __iomem * | 64 | static u8 __iomem * |
65 | hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) | 65 | hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) |
66 | { | 66 | { |
67 | return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; | 67 | return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; |
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) | |||
111 | dsaf_dev->ppe_common[comm_index] = NULL; | 111 | dsaf_dev->ppe_common[comm_index] = NULL; |
112 | } | 112 | } |
113 | 113 | ||
114 | static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, | 114 | static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, |
115 | int ppe_idx) | 115 | int ppe_idx) |
116 | { | 116 | { |
117 | return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; | 117 | return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; |
118 | } | 118 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index f670e63a5a01..110c6e8222c7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | |||
@@ -80,7 +80,7 @@ struct hns_ppe_cb { | |||
80 | struct hns_ppe_hw_stats hw_stats; | 80 | struct hns_ppe_hw_stats hw_stats; |
81 | 81 | ||
82 | u8 index; /* index in a ppe common device */ | 82 | u8 index; /* index in a ppe common device */ |
83 | void __iomem *io_base; | 83 | u8 __iomem *io_base; |
84 | int virq; | 84 | int virq; |
85 | u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ | 85 | u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ |
86 | u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ | 86 | u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ |
@@ -89,7 +89,7 @@ struct hns_ppe_cb { | |||
89 | struct ppe_common_cb { | 89 | struct ppe_common_cb { |
90 | struct device *dev; | 90 | struct device *dev; |
91 | struct dsaf_device *dsaf_dev; | 91 | struct dsaf_device *dsaf_dev; |
92 | void __iomem *io_base; | 92 | u8 __iomem *io_base; |
93 | 93 | ||
94 | enum ppe_common_mode ppe_mode; | 94 | enum ppe_common_mode ppe_mode; |
95 | 95 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6bf346c11b25..ac3518ca4d7b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) | |||
458 | mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; | 458 | mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; |
459 | } else { | 459 | } else { |
460 | ring = &q->tx_ring; | 460 | ring = &q->tx_ring; |
461 | ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + | 461 | ring->io_base = ring_pair_cb->q.io_base + |
462 | HNS_RCB_TX_REG_OFFSET; | 462 | HNS_RCB_TX_REG_OFFSET; |
463 | irq_idx = HNS_RCB_IRQ_IDX_TX; | 463 | irq_idx = HNS_RCB_IRQ_IDX_TX; |
464 | mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : | 464 | mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : |
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) | |||
764 | } | 764 | } |
765 | } | 765 | } |
766 | 766 | ||
767 | static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) | 767 | static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) |
768 | { | 768 | { |
769 | struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; | 769 | struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; |
770 | 770 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b9733b0b8482..b9e7f11f0896 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
@@ -1018,7 +1018,7 @@ | |||
1018 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 | 1018 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 |
1019 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 | 1019 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 |
1020 | 1020 | ||
1021 | static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) | 1021 | static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value) |
1022 | { | 1022 | { |
1023 | writel(value, base + reg); | 1023 | writel(value, base + reg); |
1024 | } | 1024 | } |
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val) | |||
1053 | #define dsaf_set_bit(origin, shift, val) \ | 1053 | #define dsaf_set_bit(origin, shift, val) \ |
1054 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) | 1054 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) |
1055 | 1055 | ||
1056 | static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, | 1056 | static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, |
1057 | u32 shift, u32 val) | 1057 | u32 shift, u32 val) |
1058 | { | 1058 | { |
1059 | u32 origin = dsaf_read_reg(base, reg); | 1059 | u32 origin = dsaf_read_reg(base, reg); |
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, | |||
1073 | #define dsaf_get_bit(origin, shift) \ | 1073 | #define dsaf_get_bit(origin, shift) \ |
1074 | dsaf_get_field((origin), (1ull << (shift)), (shift)) | 1074 | dsaf_get_field((origin), (1ull << (shift)), (shift)) |
1075 | 1075 | ||
1076 | static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, | 1076 | static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, |
1077 | u32 shift) | 1077 | u32 shift) |
1078 | { | 1078 | { |
1079 | u32 origin; | 1079 | u32 origin; |
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, | |||
1089 | dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) | 1089 | dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) |
1090 | 1090 | ||
1091 | #define dsaf_write_b(addr, data)\ | 1091 | #define dsaf_write_b(addr, data)\ |
1092 | writeb((data), (__iomem unsigned char *)(addr)) | 1092 | writeb((data), (__iomem u8 *)(addr)) |
1093 | #define dsaf_read_b(addr)\ | 1093 | #define dsaf_read_b(addr)\ |
1094 | readb((__iomem unsigned char *)(addr)) | 1094 | readb((__iomem u8 *)(addr)) |
1095 | 1095 | ||
1096 | #define hns_mac_reg_read64(drv, offset) \ | 1096 | #define hns_mac_reg_read64(drv, offset) \ |
1097 | readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) | 1097 | readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset)))) |
1098 | 1098 | ||
1099 | #endif /* _DSAF_REG_H */ | 1099 | #endif /* _DSAF_REG_H */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index ba4316910dea..a60f207768fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c | |||
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) | |||
129 | dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); | 129 | dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); |
130 | dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); | 130 | dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); |
131 | dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); | 131 | dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); |
132 | dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); | 132 | dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); |
133 | } | 133 | } |
134 | 134 | ||
135 | /** | 135 | /** |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index e37a0ca0db89..297b95c1b3c1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
@@ -29,9 +29,6 @@ | |||
29 | 29 | ||
30 | #define SERVICE_TIMER_HZ (1 * HZ) | 30 | #define SERVICE_TIMER_HZ (1 * HZ) |
31 | 31 | ||
32 | #define NIC_TX_CLEAN_MAX_NUM 256 | ||
33 | #define NIC_RX_CLEAN_MAX_NUM 64 | ||
34 | |||
35 | #define RCB_IRQ_NOT_INITED 0 | 32 | #define RCB_IRQ_NOT_INITED 0 |
36 | #define RCB_IRQ_INITED 1 | 33 | #define RCB_IRQ_INITED 1 |
37 | #define HNS_BUFFER_SIZE_2048 2048 | 34 | #define HNS_BUFFER_SIZE_2048 2048 |
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, | |||
376 | wmb(); /* commit all data before submit */ | 373 | wmb(); /* commit all data before submit */ |
377 | assert(skb->queue_mapping < priv->ae_handle->q_num); | 374 | assert(skb->queue_mapping < priv->ae_handle->q_num); |
378 | hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); | 375 | hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); |
379 | ring->stats.tx_pkts++; | ||
380 | ring->stats.tx_bytes += skb->len; | ||
381 | 376 | ||
382 | return NETDEV_TX_OK; | 377 | return NETDEV_TX_OK; |
383 | 378 | ||
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, | |||
999 | /* issue prefetch for next Tx descriptor */ | 994 | /* issue prefetch for next Tx descriptor */ |
1000 | prefetch(&ring->desc_cb[ring->next_to_clean]); | 995 | prefetch(&ring->desc_cb[ring->next_to_clean]); |
1001 | } | 996 | } |
997 | /* update tx ring statistics. */ | ||
998 | ring->stats.tx_pkts += pkts; | ||
999 | ring->stats.tx_bytes += bytes; | ||
1002 | 1000 | ||
1003 | NETIF_TX_UNLOCK(ring); | 1001 | NETIF_TX_UNLOCK(ring); |
1004 | 1002 | ||
@@ -2151,7 +2149,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
2151 | hns_nic_tx_fini_pro_v2; | 2149 | hns_nic_tx_fini_pro_v2; |
2152 | 2150 | ||
2153 | netif_napi_add(priv->netdev, &rd->napi, | 2151 | netif_napi_add(priv->netdev, &rd->napi, |
2154 | hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); | 2152 | hns_nic_common_poll, NAPI_POLL_WEIGHT); |
2155 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; | 2153 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
2156 | } | 2154 | } |
2157 | for (i = h->q_num; i < h->q_num * 2; i++) { | 2155 | for (i = h->q_num; i < h->q_num * 2; i++) { |
@@ -2164,7 +2162,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
2164 | hns_nic_rx_fini_pro_v2; | 2162 | hns_nic_rx_fini_pro_v2; |
2165 | 2163 | ||
2166 | netif_napi_add(priv->netdev, &rd->napi, | 2164 | netif_napi_add(priv->netdev, &rd->napi, |
2167 | hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); | 2165 | hns_nic_common_poll, NAPI_POLL_WEIGHT); |
2168 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; | 2166 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
2169 | } | 2167 | } |
2170 | 2168 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index fffe8c1c45d3..0fb61d440d3b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # Makefile for the HISILICON network device drivers. | 3 | # Makefile for the HISILICON network device drivers. |
4 | # | 4 | # |
5 | 5 | ||
6 | ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 | 6 | ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 |
7 | 7 | ||
8 | obj-$(CONFIG_HNS3_HCLGE) += hclge.o | 8 | obj-$(CONFIG_HNS3_HCLGE) += hclge.o |
9 | hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o | 9 | hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index fb93bbd35845..6193f8fa7cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # Makefile for the HISILICON network device drivers. | 3 | # Makefile for the HISILICON network device drivers. |
4 | # | 4 | # |
5 | 5 | ||
6 | ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 | 6 | ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 |
7 | 7 | ||
8 | obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o | 8 | obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o |
9 | hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file | 9 | hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file |
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index baf5cc251f32..8b8a7d00e8e0 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c | |||
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg { | |||
39 | }; | 39 | }; |
40 | 40 | ||
41 | struct hns_mdio_device { | 41 | struct hns_mdio_device { |
42 | void *vbase; /* mdio reg base address */ | 42 | u8 __iomem *vbase; /* mdio reg base address */ |
43 | struct regmap *subctrl_vbase; | 43 | struct regmap *subctrl_vbase; |
44 | struct hns_mdio_sc_reg sc_reg; | 44 | struct hns_mdio_sc_reg sc_reg; |
45 | }; | 45 | }; |
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq { | |||
96 | #define MDIO_SC_CLK_ST 0x531C | 96 | #define MDIO_SC_CLK_ST 0x531C |
97 | #define MDIO_SC_RESET_ST 0x5A1C | 97 | #define MDIO_SC_RESET_ST 0x5A1C |
98 | 98 | ||
99 | static void mdio_write_reg(void *base, u32 reg, u32 value) | 99 | static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value) |
100 | { | 100 | { |
101 | u8 __iomem *reg_addr = (u8 __iomem *)base; | 101 | writel_relaxed(value, base + reg); |
102 | |||
103 | writel_relaxed(value, reg_addr + reg); | ||
104 | } | 102 | } |
105 | 103 | ||
106 | #define MDIO_WRITE_REG(a, reg, value) \ | 104 | #define MDIO_WRITE_REG(a, reg, value) \ |
107 | mdio_write_reg((a)->vbase, (reg), (value)) | 105 | mdio_write_reg((a)->vbase, (reg), (value)) |
108 | 106 | ||
109 | static u32 mdio_read_reg(void *base, u32 reg) | 107 | static u32 mdio_read_reg(u8 __iomem *base, u32 reg) |
110 | { | 108 | { |
111 | u8 __iomem *reg_addr = (u8 __iomem *)base; | 109 | return readl_relaxed(base + reg); |
112 | |||
113 | return readl_relaxed(reg_addr + reg); | ||
114 | } | 110 | } |
115 | 111 | ||
116 | #define mdio_set_field(origin, mask, shift, val) \ | 112 | #define mdio_set_field(origin, mask, shift, val) \ |
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg) | |||
121 | 117 | ||
122 | #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) | 118 | #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) |
123 | 119 | ||
124 | static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | 120 | static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, |
125 | u32 val) | 121 | u32 val) |
126 | { | 122 | { |
127 | u32 origin = mdio_read_reg(base, reg); | 123 | u32 origin = mdio_read_reg(base, reg); |
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | |||
133 | #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ | 129 | #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ |
134 | mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) | 130 | mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) |
135 | 131 | ||
136 | static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) | 132 | static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) |
137 | { | 133 | { |
138 | u32 origin; | 134 | u32 origin; |
139 | 135 | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 20c4e0835ba8..1de691e76b86 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -1886,6 +1886,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, | |||
1886 | */ | 1886 | */ |
1887 | adapter->state = VNIC_PROBED; | 1887 | adapter->state = VNIC_PROBED; |
1888 | 1888 | ||
1889 | reinit_completion(&adapter->init_done); | ||
1889 | rc = init_crq_queue(adapter); | 1890 | rc = init_crq_queue(adapter); |
1890 | if (rc) { | 1891 | if (rc) { |
1891 | netdev_err(adapter->netdev, | 1892 | netdev_err(adapter->netdev, |
@@ -4692,7 +4693,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) | |||
4692 | old_num_rx_queues = adapter->req_rx_queues; | 4693 | old_num_rx_queues = adapter->req_rx_queues; |
4693 | old_num_tx_queues = adapter->req_tx_queues; | 4694 | old_num_tx_queues = adapter->req_tx_queues; |
4694 | 4695 | ||
4695 | init_completion(&adapter->init_done); | 4696 | reinit_completion(&adapter->init_done); |
4696 | adapter->init_done_rc = 0; | 4697 | adapter->init_done_rc = 0; |
4697 | ibmvnic_send_crq_init(adapter); | 4698 | ibmvnic_send_crq_init(adapter); |
4698 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | 4699 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
@@ -4747,7 +4748,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) | |||
4747 | 4748 | ||
4748 | adapter->from_passive_init = false; | 4749 | adapter->from_passive_init = false; |
4749 | 4750 | ||
4750 | init_completion(&adapter->init_done); | ||
4751 | adapter->init_done_rc = 0; | 4751 | adapter->init_done_rc = 0; |
4752 | ibmvnic_send_crq_init(adapter); | 4752 | ibmvnic_send_crq_init(adapter); |
4753 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | 4753 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
@@ -4826,6 +4826,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
4826 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); | 4826 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); |
4827 | INIT_LIST_HEAD(&adapter->rwi_list); | 4827 | INIT_LIST_HEAD(&adapter->rwi_list); |
4828 | spin_lock_init(&adapter->rwi_lock); | 4828 | spin_lock_init(&adapter->rwi_lock); |
4829 | init_completion(&adapter->init_done); | ||
4829 | adapter->resetting = false; | 4830 | adapter->resetting = false; |
4830 | 4831 | ||
4831 | adapter->mac_change_pending = false; | 4832 | adapter->mac_change_pending = false; |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index e2fa112bed9a..2325cee76211 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | |||
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void) | |||
41 | /* create driver workqueue */ | 41 | /* create driver workqueue */ |
42 | fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, | 42 | fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, |
43 | fm10k_driver_name); | 43 | fm10k_driver_name); |
44 | if (!fm10k_workqueue) | ||
45 | return -ENOMEM; | ||
44 | 46 | ||
45 | fm10k_dbg_init(); | 47 | fm10k_dbg_init(); |
46 | 48 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d684998ba2b0..d3cc3427caad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
@@ -790,6 +790,8 @@ struct i40e_vsi { | |||
790 | 790 | ||
791 | /* VSI specific handlers */ | 791 | /* VSI specific handlers */ |
792 | irqreturn_t (*irq_handler)(int irq, void *data); | 792 | irqreturn_t (*irq_handler)(int irq, void *data); |
793 | |||
794 | unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ | ||
793 | } ____cacheline_internodealigned_in_smp; | 795 | } ____cacheline_internodealigned_in_smp; |
794 | 796 | ||
795 | struct i40e_netdev_priv { | 797 | struct i40e_netdev_priv { |
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) | |||
1096 | return !!vsi->xdp_prog; | 1098 | return !!vsi->xdp_prog; |
1097 | } | 1099 | } |
1098 | 1100 | ||
1099 | static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) | ||
1100 | { | ||
1101 | bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); | ||
1102 | int qid = ring->queue_index; | ||
1103 | |||
1104 | if (ring_is_xdp(ring)) | ||
1105 | qid -= ring->vsi->alloc_queue_pairs; | ||
1106 | |||
1107 | if (!xdp_on) | ||
1108 | return NULL; | ||
1109 | |||
1110 | return xdp_get_umem_from_qid(ring->vsi->netdev, qid); | ||
1111 | } | ||
1112 | |||
1113 | int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); | 1101 | int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); |
1114 | int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); | 1102 | int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); |
1115 | int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, | 1103 | int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4c885801fa26..7874d0ec7fb0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
2573 | return -EOPNOTSUPP; | 2573 | return -EOPNOTSUPP; |
2574 | 2574 | ||
2575 | /* only magic packet is supported */ | 2575 | /* only magic packet is supported */ |
2576 | if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) | 2576 | if (wol->wolopts & ~WAKE_MAGIC) |
2577 | | (wol->wolopts != WAKE_FILTER)) | ||
2578 | return -EOPNOTSUPP; | 2577 | return -EOPNOTSUPP; |
2579 | 2578 | ||
2580 | /* is this a new value? */ | 2579 | /* is this a new value? */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index da62218eb70a..b1c265012c8a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) | |||
3064 | } | 3064 | } |
3065 | 3065 | ||
3066 | /** | 3066 | /** |
3067 | * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled | ||
3068 | * @ring: The Tx or Rx ring | ||
3069 | * | ||
3070 | * Returns the UMEM or NULL. | ||
3071 | **/ | ||
3072 | static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) | ||
3073 | { | ||
3074 | bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); | ||
3075 | int qid = ring->queue_index; | ||
3076 | |||
3077 | if (ring_is_xdp(ring)) | ||
3078 | qid -= ring->vsi->alloc_queue_pairs; | ||
3079 | |||
3080 | if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) | ||
3081 | return NULL; | ||
3082 | |||
3083 | return xdp_get_umem_from_qid(ring->vsi->netdev, qid); | ||
3084 | } | ||
3085 | |||
3086 | /** | ||
3067 | * i40e_configure_tx_ring - Configure a transmit ring context and rest | 3087 | * i40e_configure_tx_ring - Configure a transmit ring context and rest |
3068 | * @ring: The Tx ring to configure | 3088 | * @ring: The Tx ring to configure |
3069 | * | 3089 | * |
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
10064 | hash_init(vsi->mac_filter_hash); | 10084 | hash_init(vsi->mac_filter_hash); |
10065 | vsi->irqs_ready = false; | 10085 | vsi->irqs_ready = false; |
10066 | 10086 | ||
10087 | if (type == I40E_VSI_MAIN) { | ||
10088 | vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); | ||
10089 | if (!vsi->af_xdp_zc_qps) | ||
10090 | goto err_rings; | ||
10091 | } | ||
10092 | |||
10067 | ret = i40e_set_num_rings_in_vsi(vsi); | 10093 | ret = i40e_set_num_rings_in_vsi(vsi); |
10068 | if (ret) | 10094 | if (ret) |
10069 | goto err_rings; | 10095 | goto err_rings; |
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
10082 | goto unlock_pf; | 10108 | goto unlock_pf; |
10083 | 10109 | ||
10084 | err_rings: | 10110 | err_rings: |
10111 | bitmap_free(vsi->af_xdp_zc_qps); | ||
10085 | pf->next_vsi = i - 1; | 10112 | pf->next_vsi = i - 1; |
10086 | kfree(vsi); | 10113 | kfree(vsi); |
10087 | unlock_pf: | 10114 | unlock_pf: |
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) | |||
10162 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); | 10189 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); |
10163 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); | 10190 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); |
10164 | 10191 | ||
10192 | bitmap_free(vsi->af_xdp_zc_qps); | ||
10165 | i40e_vsi_free_arrays(vsi, true); | 10193 | i40e_vsi_free_arrays(vsi, true); |
10166 | i40e_clear_rss_config_user(vsi); | 10194 | i40e_clear_rss_config_user(vsi); |
10167 | 10195 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 5fb4353c742b..31575c0bb884 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c | |||
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) | |||
146 | static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) | 146 | static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
147 | { | 147 | { |
148 | struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); | 148 | struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); |
149 | struct timespec64 now; | 149 | struct timespec64 now, then; |
150 | 150 | ||
151 | then = ns_to_timespec64(delta); | ||
151 | mutex_lock(&pf->tmreg_lock); | 152 | mutex_lock(&pf->tmreg_lock); |
152 | 153 | ||
153 | i40e_ptp_read(pf, &now, NULL); | 154 | i40e_ptp_read(pf, &now, NULL); |
154 | timespec64_add_ns(&now, delta); | 155 | now = timespec64_add(now, then); |
155 | i40e_ptp_write(pf, (const struct timespec64 *)&now); | 156 | i40e_ptp_write(pf, (const struct timespec64 *)&now); |
156 | 157 | ||
157 | mutex_unlock(&pf->tmreg_lock); | 158 | mutex_unlock(&pf->tmreg_lock); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index b5c182e688e3..1b17486543ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c | |||
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, | |||
102 | if (err) | 102 | if (err) |
103 | return err; | 103 | return err; |
104 | 104 | ||
105 | set_bit(qid, vsi->af_xdp_zc_qps); | ||
106 | |||
105 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); | 107 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); |
106 | 108 | ||
107 | if (if_running) { | 109 | if (if_running) { |
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) | |||
148 | return err; | 150 | return err; |
149 | } | 151 | } |
150 | 152 | ||
153 | clear_bit(qid, vsi->af_xdp_zc_qps); | ||
151 | i40e_xsk_umem_dma_unmap(vsi, umem); | 154 | i40e_xsk_umem_dma_unmap(vsi, umem); |
152 | 155 | ||
153 | if (if_running) { | 156 | if (if_running) { |
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 01fcfc6f3415..d2e2c50ce257 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h | |||
@@ -194,6 +194,8 @@ | |||
194 | /* enable link status from external LINK_0 and LINK_1 pins */ | 194 | /* enable link status from external LINK_0 and LINK_1 pins */ |
195 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ | 195 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ |
196 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ | 196 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ |
197 | #define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ | ||
198 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ | ||
197 | #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ | 199 | #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ |
198 | #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ | 200 | #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ |
199 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ | 201 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 32d61d5a2706..acbb5b4f333d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -8743,9 +8743,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
8743 | struct e1000_hw *hw = &adapter->hw; | 8743 | struct e1000_hw *hw = &adapter->hw; |
8744 | u32 ctrl, rctl, status; | 8744 | u32 ctrl, rctl, status; |
8745 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; | 8745 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; |
8746 | #ifdef CONFIG_PM | 8746 | bool wake; |
8747 | int retval = 0; | ||
8748 | #endif | ||
8749 | 8747 | ||
8750 | rtnl_lock(); | 8748 | rtnl_lock(); |
8751 | netif_device_detach(netdev); | 8749 | netif_device_detach(netdev); |
@@ -8758,14 +8756,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
8758 | igb_clear_interrupt_scheme(adapter); | 8756 | igb_clear_interrupt_scheme(adapter); |
8759 | rtnl_unlock(); | 8757 | rtnl_unlock(); |
8760 | 8758 | ||
8761 | #ifdef CONFIG_PM | ||
8762 | if (!runtime) { | ||
8763 | retval = pci_save_state(pdev); | ||
8764 | if (retval) | ||
8765 | return retval; | ||
8766 | } | ||
8767 | #endif | ||
8768 | |||
8769 | status = rd32(E1000_STATUS); | 8759 | status = rd32(E1000_STATUS); |
8770 | if (status & E1000_STATUS_LU) | 8760 | if (status & E1000_STATUS_LU) |
8771 | wufc &= ~E1000_WUFC_LNKC; | 8761 | wufc &= ~E1000_WUFC_LNKC; |
@@ -8782,10 +8772,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
8782 | } | 8772 | } |
8783 | 8773 | ||
8784 | ctrl = rd32(E1000_CTRL); | 8774 | ctrl = rd32(E1000_CTRL); |
8785 | /* advertise wake from D3Cold */ | ||
8786 | #define E1000_CTRL_ADVD3WUC 0x00100000 | ||
8787 | /* phy power management enable */ | ||
8788 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | ||
8789 | ctrl |= E1000_CTRL_ADVD3WUC; | 8775 | ctrl |= E1000_CTRL_ADVD3WUC; |
8790 | wr32(E1000_CTRL, ctrl); | 8776 | wr32(E1000_CTRL, ctrl); |
8791 | 8777 | ||
@@ -8799,12 +8785,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
8799 | wr32(E1000_WUFC, 0); | 8785 | wr32(E1000_WUFC, 0); |
8800 | } | 8786 | } |
8801 | 8787 | ||
8802 | *enable_wake = wufc || adapter->en_mng_pt; | 8788 | wake = wufc || adapter->en_mng_pt; |
8803 | if (!*enable_wake) | 8789 | if (!wake) |
8804 | igb_power_down_link(adapter); | 8790 | igb_power_down_link(adapter); |
8805 | else | 8791 | else |
8806 | igb_power_up_link(adapter); | 8792 | igb_power_up_link(adapter); |
8807 | 8793 | ||
8794 | if (enable_wake) | ||
8795 | *enable_wake = wake; | ||
8796 | |||
8808 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 8797 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
8809 | * would have already happened in close and is redundant. | 8798 | * would have already happened in close and is redundant. |
8810 | */ | 8799 | */ |
@@ -8847,22 +8836,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev) | |||
8847 | 8836 | ||
8848 | static int __maybe_unused igb_suspend(struct device *dev) | 8837 | static int __maybe_unused igb_suspend(struct device *dev) |
8849 | { | 8838 | { |
8850 | int retval; | 8839 | return __igb_shutdown(to_pci_dev(dev), NULL, 0); |
8851 | bool wake; | ||
8852 | struct pci_dev *pdev = to_pci_dev(dev); | ||
8853 | |||
8854 | retval = __igb_shutdown(pdev, &wake, 0); | ||
8855 | if (retval) | ||
8856 | return retval; | ||
8857 | |||
8858 | if (wake) { | ||
8859 | pci_prepare_to_sleep(pdev); | ||
8860 | } else { | ||
8861 | pci_wake_from_d3(pdev, false); | ||
8862 | pci_set_power_state(pdev, PCI_D3hot); | ||
8863 | } | ||
8864 | |||
8865 | return 0; | ||
8866 | } | 8840 | } |
8867 | 8841 | ||
8868 | static int __maybe_unused igb_resume(struct device *dev) | 8842 | static int __maybe_unused igb_resume(struct device *dev) |
@@ -8933,22 +8907,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev) | |||
8933 | 8907 | ||
8934 | static int __maybe_unused igb_runtime_suspend(struct device *dev) | 8908 | static int __maybe_unused igb_runtime_suspend(struct device *dev) |
8935 | { | 8909 | { |
8936 | struct pci_dev *pdev = to_pci_dev(dev); | 8910 | return __igb_shutdown(to_pci_dev(dev), NULL, 1); |
8937 | int retval; | ||
8938 | bool wake; | ||
8939 | |||
8940 | retval = __igb_shutdown(pdev, &wake, 1); | ||
8941 | if (retval) | ||
8942 | return retval; | ||
8943 | |||
8944 | if (wake) { | ||
8945 | pci_prepare_to_sleep(pdev); | ||
8946 | } else { | ||
8947 | pci_wake_from_d3(pdev, false); | ||
8948 | pci_set_power_state(pdev, PCI_D3hot); | ||
8949 | } | ||
8950 | |||
8951 | return 0; | ||
8952 | } | 8911 | } |
8953 | 8912 | ||
8954 | static int __maybe_unused igb_runtime_resume(struct device *dev) | 8913 | static int __maybe_unused igb_runtime_resume(struct device *dev) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index cc4907f9ff02..2fb97967961c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | |||
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) | |||
905 | struct pci_dev *pdev = adapter->pdev; | 905 | struct pci_dev *pdev = adapter->pdev; |
906 | struct device *dev = &adapter->netdev->dev; | 906 | struct device *dev = &adapter->netdev->dev; |
907 | struct mii_bus *bus; | 907 | struct mii_bus *bus; |
908 | int err = -ENODEV; | ||
908 | 909 | ||
909 | adapter->mii_bus = devm_mdiobus_alloc(dev); | 910 | bus = devm_mdiobus_alloc(dev); |
910 | if (!adapter->mii_bus) | 911 | if (!bus) |
911 | return -ENOMEM; | 912 | return -ENOMEM; |
912 | 913 | ||
913 | bus = adapter->mii_bus; | ||
914 | |||
915 | switch (hw->device_id) { | 914 | switch (hw->device_id) { |
916 | /* C3000 SoCs */ | 915 | /* C3000 SoCs */ |
917 | case IXGBE_DEV_ID_X550EM_A_KR: | 916 | case IXGBE_DEV_ID_X550EM_A_KR: |
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) | |||
949 | */ | 948 | */ |
950 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; | 949 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; |
951 | 950 | ||
952 | return mdiobus_register(bus); | 951 | err = mdiobus_register(bus); |
952 | if (!err) { | ||
953 | adapter->mii_bus = bus; | ||
954 | return 0; | ||
955 | } | ||
953 | 956 | ||
954 | ixgbe_no_mii_bus: | 957 | ixgbe_no_mii_bus: |
955 | devm_mdiobus_free(dev, bus); | 958 | devm_mdiobus_free(dev, bus); |
956 | adapter->mii_bus = NULL; | 959 | return err; |
957 | return -ENODEV; | ||
958 | } | 960 | } |
959 | 961 | ||
960 | /** | 962 | /** |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 122927f3a600..d5e5afbdca6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c | |||
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, | |||
96 | if (!eproto) | 96 | if (!eproto) |
97 | return -EINVAL; | 97 | return -EINVAL; |
98 | 98 | ||
99 | if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet)) | ||
100 | return -EOPNOTSUPP; | ||
101 | |||
102 | err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); | 99 | err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); |
103 | if (err) | 100 | if (err) |
104 | return err; | 101 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index b0ce68feb0f3..633b117eb13e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | |||
@@ -122,7 +122,9 @@ out: | |||
122 | return err; | 122 | return err; |
123 | } | 123 | } |
124 | 124 | ||
125 | /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ | 125 | /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) |
126 | * minimum speed value is 40Gbps | ||
127 | */ | ||
126 | static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | 128 | static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) |
127 | { | 129 | { |
128 | u32 speed; | 130 | u32 speed; |
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | |||
130 | int err; | 132 | int err; |
131 | 133 | ||
132 | err = mlx5e_port_linkspeed(priv->mdev, &speed); | 134 | err = mlx5e_port_linkspeed(priv->mdev, &speed); |
133 | if (err) { | 135 | if (err) |
134 | mlx5_core_warn(priv->mdev, "cannot get port speed\n"); | 136 | speed = SPEED_40000; |
135 | return 0; | 137 | speed = max_t(u32, speed, SPEED_40000); |
136 | } | ||
137 | 138 | ||
138 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; | 139 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; |
139 | 140 | ||
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | |||
142 | } | 143 | } |
143 | 144 | ||
144 | static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | 145 | static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, |
145 | u32 xoff, unsigned int mtu) | 146 | u32 xoff, unsigned int max_mtu) |
146 | { | 147 | { |
147 | int i; | 148 | int i; |
148 | 149 | ||
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
154 | } | 155 | } |
155 | 156 | ||
156 | if (port_buffer->buffer[i].size < | 157 | if (port_buffer->buffer[i].size < |
157 | (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) | 158 | (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) |
158 | return -ENOMEM; | 159 | return -ENOMEM; |
159 | 160 | ||
160 | port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; | 161 | port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; |
161 | port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; | 162 | port_buffer->buffer[i].xon = |
163 | port_buffer->buffer[i].xoff - max_mtu; | ||
162 | } | 164 | } |
163 | 165 | ||
164 | return 0; | 166 | return 0; |
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
166 | 168 | ||
167 | /** | 169 | /** |
168 | * update_buffer_lossy - Update buffer configuration based on pfc | 170 | * update_buffer_lossy - Update buffer configuration based on pfc |
169 | * @mtu: device's MTU | 171 | * @max_mtu: netdev's max_mtu |
170 | * @pfc_en: <input> current pfc configuration | 172 | * @pfc_en: <input> current pfc configuration |
171 | * @buffer: <input> current prio to buffer mapping | 173 | * @buffer: <input> current prio to buffer mapping |
172 | * @xoff: <input> xoff value | 174 | * @xoff: <input> xoff value |
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
183 | * @return: 0 if no error, | 185 | * @return: 0 if no error, |
184 | * sets change to true if buffer configuration was modified. | 186 | * sets change to true if buffer configuration was modified. |
185 | */ | 187 | */ |
186 | static int update_buffer_lossy(unsigned int mtu, | 188 | static int update_buffer_lossy(unsigned int max_mtu, |
187 | u8 pfc_en, u8 *buffer, u32 xoff, | 189 | u8 pfc_en, u8 *buffer, u32 xoff, |
188 | struct mlx5e_port_buffer *port_buffer, | 190 | struct mlx5e_port_buffer *port_buffer, |
189 | bool *change) | 191 | bool *change) |
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu, | |||
220 | } | 222 | } |
221 | 223 | ||
222 | if (changed) { | 224 | if (changed) { |
223 | err = update_xoff_threshold(port_buffer, xoff, mtu); | 225 | err = update_xoff_threshold(port_buffer, xoff, max_mtu); |
224 | if (err) | 226 | if (err) |
225 | return err; | 227 | return err; |
226 | 228 | ||
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu, | |||
230 | return 0; | 232 | return 0; |
231 | } | 233 | } |
232 | 234 | ||
235 | #define MINIMUM_MAX_MTU 9216 | ||
233 | int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | 236 | int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, |
234 | u32 change, unsigned int mtu, | 237 | u32 change, unsigned int mtu, |
235 | struct ieee_pfc *pfc, | 238 | struct ieee_pfc *pfc, |
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
241 | bool update_prio2buffer = false; | 244 | bool update_prio2buffer = false; |
242 | u8 buffer[MLX5E_MAX_PRIORITY]; | 245 | u8 buffer[MLX5E_MAX_PRIORITY]; |
243 | bool update_buffer = false; | 246 | bool update_buffer = false; |
247 | unsigned int max_mtu; | ||
244 | u32 total_used = 0; | 248 | u32 total_used = 0; |
245 | u8 curr_pfc_en; | 249 | u8 curr_pfc_en; |
246 | int err; | 250 | int err; |
247 | int i; | 251 | int i; |
248 | 252 | ||
249 | mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); | 253 | mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); |
254 | max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU); | ||
250 | 255 | ||
251 | err = mlx5e_port_query_buffer(priv, &port_buffer); | 256 | err = mlx5e_port_query_buffer(priv, &port_buffer); |
252 | if (err) | 257 | if (err) |
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
254 | 259 | ||
255 | if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { | 260 | if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { |
256 | update_buffer = true; | 261 | update_buffer = true; |
257 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 262 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
258 | if (err) | 263 | if (err) |
259 | return err; | 264 | return err; |
260 | } | 265 | } |
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
264 | if (err) | 269 | if (err) |
265 | return err; | 270 | return err; |
266 | 271 | ||
267 | err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, | 272 | err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, |
268 | &port_buffer, &update_buffer); | 273 | &port_buffer, &update_buffer); |
269 | if (err) | 274 | if (err) |
270 | return err; | 275 | return err; |
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
276 | if (err) | 281 | if (err) |
277 | return err; | 282 | return err; |
278 | 283 | ||
279 | err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, | 284 | err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, |
280 | &port_buffer, &update_buffer); | 285 | xoff, &port_buffer, &update_buffer); |
281 | if (err) | 286 | if (err) |
282 | return err; | 287 | return err; |
283 | } | 288 | } |
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
301 | return -EINVAL; | 306 | return -EINVAL; |
302 | 307 | ||
303 | update_buffer = true; | 308 | update_buffer = true; |
304 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 309 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
305 | if (err) | 310 | if (err) |
306 | return err; | 311 | return err; |
307 | } | 312 | } |
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
309 | /* Need to update buffer configuration if xoff value is changed */ | 314 | /* Need to update buffer configuration if xoff value is changed */ |
310 | if (!update_buffer && xoff != priv->dcbx.xoff) { | 315 | if (!update_buffer && xoff != priv->dcbx.xoff) { |
311 | update_buffer = true; | 316 | update_buffer = true; |
312 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 317 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
313 | if (err) | 318 | if (err) |
314 | return err; | 319 | return err; |
315 | } | 320 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 3078491cc0d0..1539cf3de5dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c | |||
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, | |||
45 | if (err) | 45 | if (err) |
46 | return err; | 46 | return err; |
47 | 47 | ||
48 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
48 | list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); | 49 | list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); |
50 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
49 | 51 | ||
50 | return 0; | 52 | return 0; |
51 | } | 53 | } |
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, | |||
53 | void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, | 55 | void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, |
54 | struct mlx5e_tir *tir) | 56 | struct mlx5e_tir *tir) |
55 | { | 57 | { |
58 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
56 | mlx5_core_destroy_tir(mdev, tir->tirn); | 59 | mlx5_core_destroy_tir(mdev, tir->tirn); |
57 | list_del(&tir->list); | 60 | list_del(&tir->list); |
61 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
58 | } | 62 | } |
59 | 63 | ||
60 | static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, | 64 | static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, |
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) | |||
114 | } | 118 | } |
115 | 119 | ||
116 | INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); | 120 | INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); |
121 | mutex_init(&mdev->mlx5e_res.td.list_lock); | ||
117 | 122 | ||
118 | return 0; | 123 | return 0; |
119 | 124 | ||
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) | |||
141 | { | 146 | { |
142 | struct mlx5_core_dev *mdev = priv->mdev; | 147 | struct mlx5_core_dev *mdev = priv->mdev; |
143 | struct mlx5e_tir *tir; | 148 | struct mlx5e_tir *tir; |
144 | int err = -ENOMEM; | 149 | int err = 0; |
145 | u32 tirn = 0; | 150 | u32 tirn = 0; |
146 | int inlen; | 151 | int inlen; |
147 | void *in; | 152 | void *in; |
148 | 153 | ||
149 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); | 154 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); |
150 | in = kvzalloc(inlen, GFP_KERNEL); | 155 | in = kvzalloc(inlen, GFP_KERNEL); |
151 | if (!in) | 156 | if (!in) { |
157 | err = -ENOMEM; | ||
152 | goto out; | 158 | goto out; |
159 | } | ||
153 | 160 | ||
154 | if (enable_uc_lb) | 161 | if (enable_uc_lb) |
155 | MLX5_SET(modify_tir_in, in, ctx.self_lb_block, | 162 | MLX5_SET(modify_tir_in, in, ctx.self_lb_block, |
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) | |||
157 | 164 | ||
158 | MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); | 165 | MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); |
159 | 166 | ||
167 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
160 | list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { | 168 | list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { |
161 | tirn = tir->tirn; | 169 | tirn = tir->tirn; |
162 | err = mlx5_core_modify_tir(mdev, tirn, in, inlen); | 170 | err = mlx5_core_modify_tir(mdev, tirn, in, inlen); |
@@ -168,6 +176,7 @@ out: | |||
168 | kvfree(in); | 176 | kvfree(in); |
169 | if (err) | 177 | if (err) |
170 | netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); | 178 | netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); |
179 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
171 | 180 | ||
172 | return err; | 181 | return err; |
173 | } | 182 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a0987cc5fe4a..5efce4a3ff79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, | |||
603 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 603 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
604 | } | 604 | } |
605 | 605 | ||
606 | static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, | 606 | static void ptys2ethtool_adver_link(unsigned long *advertising_modes, |
607 | unsigned long *advertising_modes, | 607 | u32 eth_proto_cap, bool ext) |
608 | u32 eth_proto_cap) | ||
609 | { | 608 | { |
610 | unsigned long proto_cap = eth_proto_cap; | 609 | unsigned long proto_cap = eth_proto_cap; |
611 | struct ptys2ethtool_config *table; | 610 | struct ptys2ethtool_config *table; |
612 | u32 max_size; | 611 | u32 max_size; |
613 | int proto; | 612 | int proto; |
614 | 613 | ||
615 | mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); | 614 | table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; |
615 | max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : | ||
616 | ARRAY_SIZE(ptys2legacy_ethtool_table); | ||
617 | |||
616 | for_each_set_bit(proto, &proto_cap, max_size) | 618 | for_each_set_bit(proto, &proto_cap, max_size) |
617 | bitmap_or(advertising_modes, advertising_modes, | 619 | bitmap_or(advertising_modes, advertising_modes, |
618 | table[proto].advertised, | 620 | table[proto].advertised, |
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap, | |||
794 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); | 796 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); |
795 | } | 797 | } |
796 | 798 | ||
797 | static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, | 799 | static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause, |
798 | u8 tx_pause, u8 rx_pause, | 800 | struct ethtool_link_ksettings *link_ksettings, |
799 | struct ethtool_link_ksettings *link_ksettings) | 801 | bool ext) |
800 | { | 802 | { |
801 | unsigned long *advertising = link_ksettings->link_modes.advertising; | 803 | unsigned long *advertising = link_ksettings->link_modes.advertising; |
802 | ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); | 804 | ptys2ethtool_adver_link(advertising, eth_proto_cap, ext); |
803 | 805 | ||
804 | if (rx_pause) | 806 | if (rx_pause) |
805 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); | 807 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); |
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, | |||
854 | struct ethtool_link_ksettings *link_ksettings) | 856 | struct ethtool_link_ksettings *link_ksettings) |
855 | { | 857 | { |
856 | unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; | 858 | unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; |
859 | bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); | ||
857 | 860 | ||
858 | ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); | 861 | ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); |
859 | } | 862 | } |
860 | 863 | ||
861 | int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | 864 | int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, |
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
872 | u8 an_disable_admin; | 875 | u8 an_disable_admin; |
873 | u8 an_status; | 876 | u8 an_status; |
874 | u8 connector_type; | 877 | u8 connector_type; |
878 | bool admin_ext; | ||
875 | bool ext; | 879 | bool ext; |
876 | int err; | 880 | int err; |
877 | 881 | ||
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
886 | eth_proto_capability); | 890 | eth_proto_capability); |
887 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, | 891 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, |
888 | eth_proto_admin); | 892 | eth_proto_admin); |
893 | /* Fields: eth_proto_admin and ext_eth_proto_admin are | ||
894 | * mutually exclusive. Hence try reading legacy advertising | ||
895 | * when extended advertising is zero. | ||
896 | * admin_ext indicates how eth_proto_admin should be | ||
897 | * interpreted | ||
898 | */ | ||
899 | admin_ext = ext; | ||
900 | if (ext && !eth_proto_admin) { | ||
901 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false, | ||
902 | eth_proto_admin); | ||
903 | admin_ext = false; | ||
904 | } | ||
905 | |||
889 | eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, | 906 | eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, |
890 | eth_proto_oper); | 907 | eth_proto_oper); |
891 | eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); | 908 | eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); |
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
899 | ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); | 916 | ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); |
900 | 917 | ||
901 | get_supported(mdev, eth_proto_cap, link_ksettings); | 918 | get_supported(mdev, eth_proto_cap, link_ksettings); |
902 | get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); | 919 | get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, |
920 | admin_ext); | ||
903 | get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); | 921 | get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); |
904 | 922 | ||
905 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; | 923 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; |
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, | |||
997 | 1015 | ||
998 | #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) | 1016 | #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) |
999 | 1017 | ||
1000 | ext_requested = (link_ksettings->link_modes.advertising[0] > | 1018 | ext_requested = !!(link_ksettings->link_modes.advertising[0] > |
1001 | MLX5E_PTYS_EXT); | 1019 | MLX5E_PTYS_EXT || |
1020 | link_ksettings->link_modes.advertising[1]); | ||
1002 | ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); | 1021 | ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); |
1003 | 1022 | ext_requested &= ext_supported; | |
1004 | /*when ptys_extended_ethernet is set legacy link modes are deprecated */ | ||
1005 | if (ext_requested != ext_supported) | ||
1006 | return -EPROTONOSUPPORT; | ||
1007 | 1023 | ||
1008 | speed = link_ksettings->base.speed; | 1024 | speed = link_ksettings->base.speed; |
1009 | ethtool2ptys_adver_func = ext_requested ? | 1025 | ethtool2ptys_adver_func = ext_requested ? |
1010 | mlx5e_ethtool2ptys_ext_adver_link : | 1026 | mlx5e_ethtool2ptys_ext_adver_link : |
1011 | mlx5e_ethtool2ptys_adver_link; | 1027 | mlx5e_ethtool2ptys_adver_link; |
1012 | err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); | 1028 | err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto); |
1013 | if (err) { | 1029 | if (err) { |
1014 | netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", | 1030 | netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", |
1015 | __func__, err); | 1031 | __func__, err); |
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, | |||
1037 | if (!an_changes && link_modes == eproto.admin) | 1053 | if (!an_changes && link_modes == eproto.admin) |
1038 | goto out; | 1054 | goto out; |
1039 | 1055 | ||
1040 | mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); | 1056 | mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested); |
1041 | mlx5_toggle_port_link(mdev); | 1057 | mlx5_toggle_port_link(mdev); |
1042 | 1058 | ||
1043 | out: | 1059 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 2fd425a7b156..ffc4a36551c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -2161,6 +2161,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, | |||
2161 | return true; | 2161 | return true; |
2162 | } | 2162 | } |
2163 | 2163 | ||
2164 | struct ip_ttl_word { | ||
2165 | __u8 ttl; | ||
2166 | __u8 protocol; | ||
2167 | __sum16 check; | ||
2168 | }; | ||
2169 | |||
2170 | struct ipv6_hoplimit_word { | ||
2171 | __be16 payload_len; | ||
2172 | __u8 nexthdr; | ||
2173 | __u8 hop_limit; | ||
2174 | }; | ||
2175 | |||
2176 | static bool is_action_keys_supported(const struct flow_action_entry *act) | ||
2177 | { | ||
2178 | u32 mask, offset; | ||
2179 | u8 htype; | ||
2180 | |||
2181 | htype = act->mangle.htype; | ||
2182 | offset = act->mangle.offset; | ||
2183 | mask = ~act->mangle.mask; | ||
2184 | /* For IPv4 & IPv6 header check 4 byte word, | ||
2185 | * to determine that modified fields | ||
2186 | * are NOT ttl & hop_limit only. | ||
2187 | */ | ||
2188 | if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { | ||
2189 | struct ip_ttl_word *ttl_word = | ||
2190 | (struct ip_ttl_word *)&mask; | ||
2191 | |||
2192 | if (offset != offsetof(struct iphdr, ttl) || | ||
2193 | ttl_word->protocol || | ||
2194 | ttl_word->check) { | ||
2195 | return true; | ||
2196 | } | ||
2197 | } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { | ||
2198 | struct ipv6_hoplimit_word *hoplimit_word = | ||
2199 | (struct ipv6_hoplimit_word *)&mask; | ||
2200 | |||
2201 | if (offset != offsetof(struct ipv6hdr, payload_len) || | ||
2202 | hoplimit_word->payload_len || | ||
2203 | hoplimit_word->nexthdr) { | ||
2204 | return true; | ||
2205 | } | ||
2206 | } | ||
2207 | return false; | ||
2208 | } | ||
2209 | |||
2164 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | 2210 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, |
2165 | struct flow_action *flow_action, | 2211 | struct flow_action *flow_action, |
2166 | u32 actions, | 2212 | u32 actions, |
@@ -2168,9 +2214,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
2168 | { | 2214 | { |
2169 | const struct flow_action_entry *act; | 2215 | const struct flow_action_entry *act; |
2170 | bool modify_ip_header; | 2216 | bool modify_ip_header; |
2171 | u8 htype, ip_proto; | ||
2172 | void *headers_v; | 2217 | void *headers_v; |
2173 | u16 ethertype; | 2218 | u16 ethertype; |
2219 | u8 ip_proto; | ||
2174 | int i; | 2220 | int i; |
2175 | 2221 | ||
2176 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) | 2222 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) |
@@ -2190,9 +2236,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
2190 | act->id != FLOW_ACTION_ADD) | 2236 | act->id != FLOW_ACTION_ADD) |
2191 | continue; | 2237 | continue; |
2192 | 2238 | ||
2193 | htype = act->mangle.htype; | 2239 | if (is_action_keys_supported(act)) { |
2194 | if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 || | ||
2195 | htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { | ||
2196 | modify_ip_header = true; | 2240 | modify_ip_header = true; |
2197 | break; | 2241 | break; |
2198 | } | 2242 | } |
@@ -2381,15 +2425,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, | |||
2381 | return 0; | 2425 | return 0; |
2382 | } | 2426 | } |
2383 | 2427 | ||
2384 | static inline int cmp_encap_info(struct ip_tunnel_key *a, | 2428 | struct encap_key { |
2385 | struct ip_tunnel_key *b) | 2429 | struct ip_tunnel_key *ip_tun_key; |
2430 | int tunnel_type; | ||
2431 | }; | ||
2432 | |||
2433 | static inline int cmp_encap_info(struct encap_key *a, | ||
2434 | struct encap_key *b) | ||
2386 | { | 2435 | { |
2387 | return memcmp(a, b, sizeof(*a)); | 2436 | return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || |
2437 | a->tunnel_type != b->tunnel_type; | ||
2388 | } | 2438 | } |
2389 | 2439 | ||
2390 | static inline int hash_encap_info(struct ip_tunnel_key *key) | 2440 | static inline int hash_encap_info(struct encap_key *key) |
2391 | { | 2441 | { |
2392 | return jhash(key, sizeof(*key), 0); | 2442 | return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), |
2443 | key->tunnel_type); | ||
2393 | } | 2444 | } |
2394 | 2445 | ||
2395 | 2446 | ||
@@ -2420,7 +2471,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, | |||
2420 | struct mlx5_esw_flow_attr *attr = flow->esw_attr; | 2471 | struct mlx5_esw_flow_attr *attr = flow->esw_attr; |
2421 | struct mlx5e_tc_flow_parse_attr *parse_attr; | 2472 | struct mlx5e_tc_flow_parse_attr *parse_attr; |
2422 | struct ip_tunnel_info *tun_info; | 2473 | struct ip_tunnel_info *tun_info; |
2423 | struct ip_tunnel_key *key; | 2474 | struct encap_key key, e_key; |
2424 | struct mlx5e_encap_entry *e; | 2475 | struct mlx5e_encap_entry *e; |
2425 | unsigned short family; | 2476 | unsigned short family; |
2426 | uintptr_t hash_key; | 2477 | uintptr_t hash_key; |
@@ -2430,13 +2481,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, | |||
2430 | parse_attr = attr->parse_attr; | 2481 | parse_attr = attr->parse_attr; |
2431 | tun_info = &parse_attr->tun_info[out_index]; | 2482 | tun_info = &parse_attr->tun_info[out_index]; |
2432 | family = ip_tunnel_info_af(tun_info); | 2483 | family = ip_tunnel_info_af(tun_info); |
2433 | key = &tun_info->key; | 2484 | key.ip_tun_key = &tun_info->key; |
2485 | key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev); | ||
2434 | 2486 | ||
2435 | hash_key = hash_encap_info(key); | 2487 | hash_key = hash_encap_info(&key); |
2436 | 2488 | ||
2437 | hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, | 2489 | hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, |
2438 | encap_hlist, hash_key) { | 2490 | encap_hlist, hash_key) { |
2439 | if (!cmp_encap_info(&e->tun_info.key, key)) { | 2491 | e_key.ip_tun_key = &e->tun_info.key; |
2492 | e_key.tunnel_type = e->tunnel_type; | ||
2493 | if (!cmp_encap_info(&e_key, &key)) { | ||
2440 | found = true; | 2494 | found = true; |
2441 | break; | 2495 | break; |
2442 | } | 2496 | } |
@@ -2717,7 +2771,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, | |||
2717 | 2771 | ||
2718 | if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || | 2772 | if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || |
2719 | hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { | 2773 | hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { |
2720 | err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, | 2774 | err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, |
2721 | parse_attr, hdrs, extack); | 2775 | parse_attr, hdrs, extack); |
2722 | if (err) | 2776 | if (err) |
2723 | return err; | 2777 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ecd2c747f726..8a67fd197b79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, | |||
105 | opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); | 105 | opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); |
106 | MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); | 106 | MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); |
107 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); | 107 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); |
108 | if (vport) | 108 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); |
109 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); | ||
110 | nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, | 109 | nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, |
111 | in, nic_vport_context); | 110 | in, nic_vport_context); |
112 | 111 | ||
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, | |||
134 | MLX5_SET(modify_esw_vport_context_in, in, opcode, | 133 | MLX5_SET(modify_esw_vport_context_in, in, opcode, |
135 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); | 134 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); |
136 | MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); | 135 | MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); |
137 | if (vport) | 136 | MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); |
138 | MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); | ||
139 | return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); | 137 | return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
140 | } | 138 | } |
141 | 139 | ||
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) | |||
431 | { | 429 | { |
432 | int err; | 430 | int err; |
433 | 431 | ||
432 | memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); | ||
433 | |||
434 | err = esw_create_legacy_vepa_table(esw); | 434 | err = esw_create_legacy_vepa_table(esw); |
435 | if (err) | 435 | if (err) |
436 | return err; | 436 | return err; |
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, | |||
2157 | 2157 | ||
2158 | /* Star rule to forward all traffic to uplink vport */ | 2158 | /* Star rule to forward all traffic to uplink vport */ |
2159 | memset(spec, 0, sizeof(*spec)); | 2159 | memset(spec, 0, sizeof(*spec)); |
2160 | memset(&dest, 0, sizeof(dest)); | ||
2160 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | 2161 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
2161 | dest.vport.num = MLX5_VPORT_UPLINK; | 2162 | dest.vport.num = MLX5_VPORT_UPLINK; |
2162 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; | 2163 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 6c72f33f6d09..fe770cd2151c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -1609,6 +1609,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) | |||
1609 | { | 1609 | { |
1610 | int err; | 1610 | int err; |
1611 | 1611 | ||
1612 | memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); | ||
1612 | mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); | 1613 | mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); |
1613 | 1614 | ||
1614 | err = esw_create_offloads_fdb_tables(esw, nvports); | 1615 | err = esw_create_offloads_fdb_tables(esw, nvports); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 5cf5f2a9d51f..8de64e88c670 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c | |||
@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
217 | void *cmd; | 217 | void *cmd; |
218 | int ret; | 218 | int ret; |
219 | 219 | ||
220 | rcu_read_lock(); | ||
221 | flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); | ||
222 | rcu_read_unlock(); | ||
223 | |||
224 | if (!flow) { | ||
225 | WARN_ONCE(1, "Received NULL pointer for handle\n"); | ||
226 | return -EINVAL; | ||
227 | } | ||
228 | |||
220 | buf = kzalloc(size, GFP_ATOMIC); | 229 | buf = kzalloc(size, GFP_ATOMIC); |
221 | if (!buf) | 230 | if (!buf) |
222 | return -ENOMEM; | 231 | return -ENOMEM; |
223 | 232 | ||
224 | cmd = (buf + 1); | 233 | cmd = (buf + 1); |
225 | 234 | ||
226 | rcu_read_lock(); | ||
227 | flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); | ||
228 | rcu_read_unlock(); | ||
229 | mlx5_fpga_tls_flow_to_cmd(flow, cmd); | 235 | mlx5_fpga_tls_flow_to_cmd(flow, cmd); |
230 | 236 | ||
231 | MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); | 237 | MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); |
@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
238 | buf->complete = mlx_tls_kfree_complete; | 244 | buf->complete = mlx_tls_kfree_complete; |
239 | 245 | ||
240 | ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); | 246 | ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); |
247 | if (ret < 0) | ||
248 | kfree(buf); | ||
241 | 249 | ||
242 | return ret; | 250 | return ret; |
243 | } | 251 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 70cc906a102b..76716419370d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = { | |||
164 | .size = 8, | 164 | .size = 8, |
165 | .limit = 4 | 165 | .limit = 4 |
166 | }, | 166 | }, |
167 | .mr_cache[16] = { | ||
168 | .size = 8, | ||
169 | .limit = 4 | ||
170 | }, | ||
171 | .mr_cache[17] = { | ||
172 | .size = 8, | ||
173 | .limit = 4 | ||
174 | }, | ||
175 | .mr_cache[18] = { | ||
176 | .size = 8, | ||
177 | .limit = 4 | ||
178 | }, | ||
179 | .mr_cache[19] = { | ||
180 | .size = 4, | ||
181 | .limit = 2 | ||
182 | }, | ||
183 | .mr_cache[20] = { | ||
184 | .size = 4, | ||
185 | .limit = 2 | ||
186 | }, | ||
187 | }, | 167 | }, |
188 | }; | 168 | }; |
189 | 169 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 6e2a6caec3fb..c56e31d9f8a4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, | |||
48 | 48 | ||
49 | tmp_push_vlan_tci = | 49 | tmp_push_vlan_tci = |
50 | FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | | 50 | FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | |
51 | FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | | 51 | FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid); |
52 | NFP_FL_PUSH_VLAN_CFI; | ||
53 | push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); | 52 | push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); |
54 | } | 53 | } |
55 | 54 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4fcaf11ed56e..0ed51e79db00 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h | |||
@@ -26,7 +26,7 @@ | |||
26 | #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) | 26 | #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) |
27 | 27 | ||
28 | #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) | 28 | #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) |
29 | #define NFP_FLOWER_MASK_VLAN_CFI BIT(12) | 29 | #define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) |
30 | #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) | 30 | #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) |
31 | 31 | ||
32 | #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) | 32 | #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) |
@@ -82,7 +82,6 @@ | |||
82 | #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) | 82 | #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) |
83 | 83 | ||
84 | #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) | 84 | #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) |
85 | #define NFP_FL_PUSH_VLAN_CFI BIT(12) | ||
86 | #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) | 85 | #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) |
87 | 86 | ||
88 | #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) | 87 | #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e03c8ef2c28c..9b8b843d0340 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c | |||
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, | |||
30 | 30 | ||
31 | flow_rule_match_vlan(rule, &match); | 31 | flow_rule_match_vlan(rule, &match); |
32 | /* Populate the tci field. */ | 32 | /* Populate the tci field. */ |
33 | if (match.key->vlan_id || match.key->vlan_priority) { | 33 | tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; |
34 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | 34 | tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
35 | match.key->vlan_priority) | | 35 | match.key->vlan_priority) | |
36 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | 36 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, |
37 | match.key->vlan_id) | | 37 | match.key->vlan_id); |
38 | NFP_FLOWER_MASK_VLAN_CFI; | 38 | ext->tci = cpu_to_be16(tmp_tci); |
39 | ext->tci = cpu_to_be16(tmp_tci); | 39 | |
40 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | 40 | tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; |
41 | match.mask->vlan_priority) | | 41 | tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
42 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | 42 | match.mask->vlan_priority) | |
43 | match.mask->vlan_id) | | 43 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, |
44 | NFP_FLOWER_MASK_VLAN_CFI; | 44 | match.mask->vlan_id); |
45 | msk->tci = cpu_to_be16(tmp_tci); | 45 | msk->tci = cpu_to_be16(tmp_tci); |
46 | } | ||
47 | } | 46 | } |
48 | } | 47 | } |
49 | 48 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index c3ad083d36c6..08e9bfa95f9b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | |||
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
195 | ret = dev_queue_xmit(skb); | 195 | ret = dev_queue_xmit(skb); |
196 | nfp_repr_inc_tx_stats(netdev, len, ret); | 196 | nfp_repr_inc_tx_stats(netdev, len, ret); |
197 | 197 | ||
198 | return ret; | 198 | return NETDEV_TX_OK; |
199 | } | 199 | } |
200 | 200 | ||
201 | static int nfp_repr_stop(struct net_device *netdev) | 201 | static int nfp_repr_stop(struct net_device *netdev) |
@@ -382,7 +382,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, | |||
382 | netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | 382 | netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); |
383 | netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; | 383 | netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; |
384 | 384 | ||
385 | netdev->priv_flags |= IFF_NO_QUEUE; | 385 | netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; |
386 | netdev->features |= NETIF_F_LLTX; | 386 | netdev->features |= NETIF_F_LLTX; |
387 | 387 | ||
388 | if (nfp_app_has_tc(app)) { | 388 | if (nfp_app_has_tc(app)) { |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a8ca26c2ae0c..88eb9e05d2a1 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -5460,7 +5460,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) | |||
5460 | tp->cp_cmd |= PktCntrDisable | INTT_1; | 5460 | tp->cp_cmd |= PktCntrDisable | INTT_1; |
5461 | RTL_W16(tp, CPlusCmd, tp->cp_cmd); | 5461 | RTL_W16(tp, CPlusCmd, tp->cp_cmd); |
5462 | 5462 | ||
5463 | RTL_W16(tp, IntrMitigate, 0x5151); | 5463 | RTL_W16(tp, IntrMitigate, 0x5100); |
5464 | 5464 | ||
5465 | /* Work around for RxFIFO overflow. */ | 5465 | /* Work around for RxFIFO overflow. */ |
5466 | if (tp->mac_version == RTL_GIGA_MAC_VER_11) { | 5466 | if (tp->mac_version == RTL_GIGA_MAC_VER_11) { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 40d6356a7e73..3dfb07a78952 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h | |||
@@ -29,11 +29,13 @@ | |||
29 | /* Specific functions used for Ring mode */ | 29 | /* Specific functions used for Ring mode */ |
30 | 30 | ||
31 | /* Enhanced descriptors */ | 31 | /* Enhanced descriptors */ |
32 | static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) | 32 | static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, |
33 | int bfsize) | ||
33 | { | 34 | { |
34 | p->des1 |= cpu_to_le32((BUF_SIZE_8KiB | 35 | if (bfsize == BUF_SIZE_16KiB) |
35 | << ERDES1_BUFFER2_SIZE_SHIFT) | 36 | p->des1 |= cpu_to_le32((BUF_SIZE_8KiB |
36 | & ERDES1_BUFFER2_SIZE_MASK); | 37 | << ERDES1_BUFFER2_SIZE_SHIFT) |
38 | & ERDES1_BUFFER2_SIZE_MASK); | ||
37 | 39 | ||
38 | if (end) | 40 | if (end) |
39 | p->des1 |= cpu_to_le32(ERDES1_END_RING); | 41 | p->des1 |= cpu_to_le32(ERDES1_END_RING); |
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) | |||
59 | } | 61 | } |
60 | 62 | ||
61 | /* Normal descriptors */ | 63 | /* Normal descriptors */ |
62 | static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) | 64 | static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) |
63 | { | 65 | { |
64 | p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) | 66 | if (bfsize >= BUF_SIZE_2KiB) { |
65 | << RDES1_BUFFER2_SIZE_SHIFT) | 67 | int bfsize2; |
66 | & RDES1_BUFFER2_SIZE_MASK); | 68 | |
69 | bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); | ||
70 | p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) | ||
71 | & RDES1_BUFFER2_SIZE_MASK); | ||
72 | } | ||
67 | 73 | ||
68 | if (end) | 74 | if (end) |
69 | p->des1 |= cpu_to_le32(RDES1_END_RING); | 75 | p->des1 |= cpu_to_le32(RDES1_END_RING); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 7fbb6a4dbf51..e061e9f5fad7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
@@ -296,7 +296,7 @@ exit: | |||
296 | } | 296 | } |
297 | 297 | ||
298 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 298 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
299 | int mode, int end) | 299 | int mode, int end, int bfsize) |
300 | { | 300 | { |
301 | dwmac4_set_rx_owner(p, disable_rx_ic); | 301 | dwmac4_set_rx_owner(p, disable_rx_ic); |
302 | } | 302 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 1d858fdec997..98fa471da7c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c | |||
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, | |||
123 | } | 123 | } |
124 | 124 | ||
125 | static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 125 | static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
126 | int mode, int end) | 126 | int mode, int end, int bfsize) |
127 | { | 127 | { |
128 | dwxgmac2_set_rx_owner(p, disable_rx_ic); | 128 | dwxgmac2_set_rx_owner(p, disable_rx_ic); |
129 | } | 129 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 5ef91a790f9d..5202d6ad7919 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c | |||
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
201 | if (unlikely(rdes0 & RDES0_OWN)) | 201 | if (unlikely(rdes0 & RDES0_OWN)) |
202 | return dma_own; | 202 | return dma_own; |
203 | 203 | ||
204 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { | ||
205 | stats->rx_length_errors++; | ||
206 | return discard_frame; | ||
207 | } | ||
208 | |||
204 | if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { | 209 | if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { |
205 | if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { | 210 | if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { |
206 | x->rx_desc++; | 211 | x->rx_desc++; |
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
231 | * It doesn't match with the information reported into the databook. | 236 | * It doesn't match with the information reported into the databook. |
232 | * At any rate, we need to understand if the CSUM hw computation is ok | 237 | * At any rate, we need to understand if the CSUM hw computation is ok |
233 | * and report this info to the upper layers. */ | 238 | * and report this info to the upper layers. */ |
234 | ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), | 239 | if (likely(ret == good_frame)) |
235 | !!(rdes0 & RDES0_FRAME_TYPE), | 240 | ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), |
236 | !!(rdes0 & ERDES0_RX_MAC_ADDR)); | 241 | !!(rdes0 & RDES0_FRAME_TYPE), |
242 | !!(rdes0 & ERDES0_RX_MAC_ADDR)); | ||
237 | 243 | ||
238 | if (unlikely(rdes0 & RDES0_DRIBBLING)) | 244 | if (unlikely(rdes0 & RDES0_DRIBBLING)) |
239 | x->dribbling_bit++; | 245 | x->dribbling_bit++; |
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
259 | } | 265 | } |
260 | 266 | ||
261 | static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 267 | static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
262 | int mode, int end) | 268 | int mode, int end, int bfsize) |
263 | { | 269 | { |
270 | int bfsize1; | ||
271 | |||
264 | p->des0 |= cpu_to_le32(RDES0_OWN); | 272 | p->des0 |= cpu_to_le32(RDES0_OWN); |
265 | p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); | 273 | |
274 | bfsize1 = min(bfsize, BUF_SIZE_8KiB); | ||
275 | p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK); | ||
266 | 276 | ||
267 | if (mode == STMMAC_CHAIN_MODE) | 277 | if (mode == STMMAC_CHAIN_MODE) |
268 | ehn_desc_rx_set_on_chain(p); | 278 | ehn_desc_rx_set_on_chain(p); |
269 | else | 279 | else |
270 | ehn_desc_rx_set_on_ring(p, end); | 280 | ehn_desc_rx_set_on_ring(p, end, bfsize); |
271 | 281 | ||
272 | if (disable_rx_ic) | 282 | if (disable_rx_ic) |
273 | p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); | 283 | p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 92b8944f26e3..5bb00234d961 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h | |||
@@ -33,7 +33,7 @@ struct dma_extended_desc; | |||
33 | struct stmmac_desc_ops { | 33 | struct stmmac_desc_ops { |
34 | /* DMA RX descriptor ring initialization */ | 34 | /* DMA RX descriptor ring initialization */ |
35 | void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, | 35 | void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, |
36 | int end); | 36 | int end, int bfsize); |
37 | /* DMA TX descriptor ring initialization */ | 37 | /* DMA TX descriptor ring initialization */ |
38 | void (*init_tx_desc)(struct dma_desc *p, int mode, int end); | 38 | void (*init_tx_desc)(struct dma_desc *p, int mode, int end); |
39 | /* Invoked by the xmit function to prepare the tx descriptor */ | 39 | /* Invoked by the xmit function to prepare the tx descriptor */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index de65bb29feba..b7dd4e3c760d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c | |||
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
91 | return dma_own; | 91 | return dma_own; |
92 | 92 | ||
93 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { | 93 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { |
94 | pr_warn("%s: Oversized frame spanned multiple buffers\n", | ||
95 | __func__); | ||
96 | stats->rx_length_errors++; | 94 | stats->rx_length_errors++; |
97 | return discard_frame; | 95 | return discard_frame; |
98 | } | 96 | } |
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
135 | } | 133 | } |
136 | 134 | ||
137 | static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, | 135 | static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, |
138 | int end) | 136 | int end, int bfsize) |
139 | { | 137 | { |
138 | int bfsize1; | ||
139 | |||
140 | p->des0 |= cpu_to_le32(RDES0_OWN); | 140 | p->des0 |= cpu_to_le32(RDES0_OWN); |
141 | p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); | 141 | |
142 | bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); | ||
143 | p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK); | ||
142 | 144 | ||
143 | if (mode == STMMAC_CHAIN_MODE) | 145 | if (mode == STMMAC_CHAIN_MODE) |
144 | ndesc_rx_set_on_chain(p, end); | 146 | ndesc_rx_set_on_chain(p, end); |
145 | else | 147 | else |
146 | ndesc_rx_set_on_ring(p, end); | 148 | ndesc_rx_set_on_ring(p, end, bfsize); |
147 | 149 | ||
148 | if (disable_rx_ic) | 150 | if (disable_rx_ic) |
149 | p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); | 151 | p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6a2e1031a62a..a26e36dbb5df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) | |||
1136 | if (priv->extend_desc) | 1136 | if (priv->extend_desc) |
1137 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, | 1137 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, |
1138 | priv->use_riwt, priv->mode, | 1138 | priv->use_riwt, priv->mode, |
1139 | (i == DMA_RX_SIZE - 1)); | 1139 | (i == DMA_RX_SIZE - 1), |
1140 | priv->dma_buf_sz); | ||
1140 | else | 1141 | else |
1141 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], | 1142 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], |
1142 | priv->use_riwt, priv->mode, | 1143 | priv->use_riwt, priv->mode, |
1143 | (i == DMA_RX_SIZE - 1)); | 1144 | (i == DMA_RX_SIZE - 1), |
1145 | priv->dma_buf_sz); | ||
1144 | } | 1146 | } |
1145 | 1147 | ||
1146 | /** | 1148 | /** |
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3352 | { | 3354 | { |
3353 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3355 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
3354 | struct stmmac_channel *ch = &priv->channel[queue]; | 3356 | struct stmmac_channel *ch = &priv->channel[queue]; |
3355 | unsigned int entry = rx_q->cur_rx; | 3357 | unsigned int next_entry = rx_q->cur_rx; |
3356 | int coe = priv->hw->rx_csum; | 3358 | int coe = priv->hw->rx_csum; |
3357 | unsigned int next_entry; | ||
3358 | unsigned int count = 0; | 3359 | unsigned int count = 0; |
3359 | bool xmac; | 3360 | bool xmac; |
3360 | 3361 | ||
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3372 | stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); | 3373 | stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); |
3373 | } | 3374 | } |
3374 | while (count < limit) { | 3375 | while (count < limit) { |
3375 | int status; | 3376 | int entry, status; |
3376 | struct dma_desc *p; | 3377 | struct dma_desc *p; |
3377 | struct dma_desc *np; | 3378 | struct dma_desc *np; |
3378 | 3379 | ||
3380 | entry = next_entry; | ||
3381 | |||
3379 | if (priv->extend_desc) | 3382 | if (priv->extend_desc) |
3380 | p = (struct dma_desc *)(rx_q->dma_erx + entry); | 3383 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
3381 | else | 3384 | else |
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3431 | * ignored | 3434 | * ignored |
3432 | */ | 3435 | */ |
3433 | if (frame_len > priv->dma_buf_sz) { | 3436 | if (frame_len > priv->dma_buf_sz) { |
3434 | netdev_err(priv->dev, | 3437 | if (net_ratelimit()) |
3435 | "len %d larger than size (%d)\n", | 3438 | netdev_err(priv->dev, |
3436 | frame_len, priv->dma_buf_sz); | 3439 | "len %d larger than size (%d)\n", |
3440 | frame_len, priv->dma_buf_sz); | ||
3437 | priv->dev->stats.rx_length_errors++; | 3441 | priv->dev->stats.rx_length_errors++; |
3438 | break; | 3442 | continue; |
3439 | } | 3443 | } |
3440 | 3444 | ||
3441 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 | 3445 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3470 | dev_warn(priv->device, | 3474 | dev_warn(priv->device, |
3471 | "packet dropped\n"); | 3475 | "packet dropped\n"); |
3472 | priv->dev->stats.rx_dropped++; | 3476 | priv->dev->stats.rx_dropped++; |
3473 | break; | 3477 | continue; |
3474 | } | 3478 | } |
3475 | 3479 | ||
3476 | dma_sync_single_for_cpu(priv->device, | 3480 | dma_sync_single_for_cpu(priv->device, |
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3490 | } else { | 3494 | } else { |
3491 | skb = rx_q->rx_skbuff[entry]; | 3495 | skb = rx_q->rx_skbuff[entry]; |
3492 | if (unlikely(!skb)) { | 3496 | if (unlikely(!skb)) { |
3493 | netdev_err(priv->dev, | 3497 | if (net_ratelimit()) |
3494 | "%s: Inconsistent Rx chain\n", | 3498 | netdev_err(priv->dev, |
3495 | priv->dev->name); | 3499 | "%s: Inconsistent Rx chain\n", |
3500 | priv->dev->name); | ||
3496 | priv->dev->stats.rx_dropped++; | 3501 | priv->dev->stats.rx_dropped++; |
3497 | break; | 3502 | continue; |
3498 | } | 3503 | } |
3499 | prefetch(skb->data - NET_IP_ALIGN); | 3504 | prefetch(skb->data - NET_IP_ALIGN); |
3500 | rx_q->rx_skbuff[entry] = NULL; | 3505 | rx_q->rx_skbuff[entry] = NULL; |
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3529 | priv->dev->stats.rx_packets++; | 3534 | priv->dev->stats.rx_packets++; |
3530 | priv->dev->stats.rx_bytes += frame_len; | 3535 | priv->dev->stats.rx_bytes += frame_len; |
3531 | } | 3536 | } |
3532 | entry = next_entry; | ||
3533 | } | 3537 | } |
3534 | 3538 | ||
3535 | stmmac_rx_refill(priv, queue); | 3539 | stmmac_rx_refill(priv, queue); |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index e859ae2e42d5..49f41b64077b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -987,6 +987,7 @@ struct netvsc_device { | |||
987 | 987 | ||
988 | wait_queue_head_t wait_drain; | 988 | wait_queue_head_t wait_drain; |
989 | bool destroy; | 989 | bool destroy; |
990 | bool tx_disable; /* if true, do not wake up queue again */ | ||
990 | 991 | ||
991 | /* Receive buffer allocated by us but manages by NetVSP */ | 992 | /* Receive buffer allocated by us but manages by NetVSP */ |
992 | void *recv_buf; | 993 | void *recv_buf; |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 9a022539d305..fdbeb7070d42 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void) | |||
110 | 110 | ||
111 | init_waitqueue_head(&net_device->wait_drain); | 111 | init_waitqueue_head(&net_device->wait_drain); |
112 | net_device->destroy = false; | 112 | net_device->destroy = false; |
113 | net_device->tx_disable = false; | ||
113 | 114 | ||
114 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 115 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
115 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 116 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, | |||
719 | } else { | 720 | } else { |
720 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); | 721 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); |
721 | 722 | ||
722 | if (netif_tx_queue_stopped(txq) && | 723 | if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && |
723 | (hv_get_avail_to_write_percent(&channel->outbound) > | 724 | (hv_get_avail_to_write_percent(&channel->outbound) > |
724 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { | 725 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { |
725 | netif_tx_wake_queue(txq); | 726 | netif_tx_wake_queue(txq); |
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt( | |||
874 | } else if (ret == -EAGAIN) { | 875 | } else if (ret == -EAGAIN) { |
875 | netif_tx_stop_queue(txq); | 876 | netif_tx_stop_queue(txq); |
876 | ndev_ctx->eth_stats.stop_queue++; | 877 | ndev_ctx->eth_stats.stop_queue++; |
877 | if (atomic_read(&nvchan->queue_sends) < 1) { | 878 | if (atomic_read(&nvchan->queue_sends) < 1 && |
879 | !net_device->tx_disable) { | ||
878 | netif_tx_wake_queue(txq); | 880 | netif_tx_wake_queue(txq); |
879 | ndev_ctx->eth_stats.wake_queue++; | 881 | ndev_ctx->eth_stats.wake_queue++; |
880 | ret = -ENOSPC; | 882 | ret = -ENOSPC; |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1a08679f90ce..06393b215102 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net) | |||
109 | rcu_read_unlock(); | 109 | rcu_read_unlock(); |
110 | } | 110 | } |
111 | 111 | ||
112 | static void netvsc_tx_enable(struct netvsc_device *nvscdev, | ||
113 | struct net_device *ndev) | ||
114 | { | ||
115 | nvscdev->tx_disable = false; | ||
116 | virt_wmb(); /* ensure queue wake up mechanism is on */ | ||
117 | |||
118 | netif_tx_wake_all_queues(ndev); | ||
119 | } | ||
120 | |||
112 | static int netvsc_open(struct net_device *net) | 121 | static int netvsc_open(struct net_device *net) |
113 | { | 122 | { |
114 | struct net_device_context *ndev_ctx = netdev_priv(net); | 123 | struct net_device_context *ndev_ctx = netdev_priv(net); |
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net) | |||
129 | rdev = nvdev->extension; | 138 | rdev = nvdev->extension; |
130 | if (!rdev->link_state) { | 139 | if (!rdev->link_state) { |
131 | netif_carrier_on(net); | 140 | netif_carrier_on(net); |
132 | netif_tx_wake_all_queues(net); | 141 | netvsc_tx_enable(nvdev, net); |
133 | } | 142 | } |
134 | 143 | ||
135 | if (vf_netdev) { | 144 | if (vf_netdev) { |
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev) | |||
184 | } | 193 | } |
185 | } | 194 | } |
186 | 195 | ||
196 | static void netvsc_tx_disable(struct netvsc_device *nvscdev, | ||
197 | struct net_device *ndev) | ||
198 | { | ||
199 | if (nvscdev) { | ||
200 | nvscdev->tx_disable = true; | ||
201 | virt_wmb(); /* ensure txq will not wake up after stop */ | ||
202 | } | ||
203 | |||
204 | netif_tx_disable(ndev); | ||
205 | } | ||
206 | |||
187 | static int netvsc_close(struct net_device *net) | 207 | static int netvsc_close(struct net_device *net) |
188 | { | 208 | { |
189 | struct net_device_context *net_device_ctx = netdev_priv(net); | 209 | struct net_device_context *net_device_ctx = netdev_priv(net); |
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net) | |||
192 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | 212 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
193 | int ret; | 213 | int ret; |
194 | 214 | ||
195 | netif_tx_disable(net); | 215 | netvsc_tx_disable(nvdev, net); |
196 | 216 | ||
197 | /* No need to close rndis filter if it is removed already */ | 217 | /* No need to close rndis filter if it is removed already */ |
198 | if (!nvdev) | 218 | if (!nvdev) |
@@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev, | |||
918 | 938 | ||
919 | /* If device was up (receiving) then shutdown */ | 939 | /* If device was up (receiving) then shutdown */ |
920 | if (netif_running(ndev)) { | 940 | if (netif_running(ndev)) { |
921 | netif_tx_disable(ndev); | 941 | netvsc_tx_disable(nvdev, ndev); |
922 | 942 | ||
923 | ret = rndis_filter_close(nvdev); | 943 | ret = rndis_filter_close(nvdev); |
924 | if (ret) { | 944 | if (ret) { |
@@ -1906,7 +1926,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
1906 | if (rdev->link_state) { | 1926 | if (rdev->link_state) { |
1907 | rdev->link_state = false; | 1927 | rdev->link_state = false; |
1908 | netif_carrier_on(net); | 1928 | netif_carrier_on(net); |
1909 | netif_tx_wake_all_queues(net); | 1929 | netvsc_tx_enable(net_device, net); |
1910 | } else { | 1930 | } else { |
1911 | notify = true; | 1931 | notify = true; |
1912 | } | 1932 | } |
@@ -1916,7 +1936,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
1916 | if (!rdev->link_state) { | 1936 | if (!rdev->link_state) { |
1917 | rdev->link_state = true; | 1937 | rdev->link_state = true; |
1918 | netif_carrier_off(net); | 1938 | netif_carrier_off(net); |
1919 | netif_tx_stop_all_queues(net); | 1939 | netvsc_tx_disable(net_device, net); |
1920 | } | 1940 | } |
1921 | kfree(event); | 1941 | kfree(event); |
1922 | break; | 1942 | break; |
@@ -1925,7 +1945,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
1925 | if (!rdev->link_state) { | 1945 | if (!rdev->link_state) { |
1926 | rdev->link_state = true; | 1946 | rdev->link_state = true; |
1927 | netif_carrier_off(net); | 1947 | netif_carrier_off(net); |
1928 | netif_tx_stop_all_queues(net); | 1948 | netvsc_tx_disable(net_device, net); |
1929 | event->event = RNDIS_STATUS_MEDIA_CONNECT; | 1949 | event->event = RNDIS_STATUS_MEDIA_CONNECT; |
1930 | spin_lock_irqsave(&ndev_ctx->lock, flags); | 1950 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
1931 | list_add(&event->list, &ndev_ctx->reconfig_events); | 1951 | list_add(&event->list, &ndev_ctx->reconfig_events); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 74bebbdb4b15..9195f3476b1d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = { | |||
1203 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ | 1203 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ |
1204 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ | 1204 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ |
1205 | {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ | 1205 | {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ |
1206 | {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ | ||
1206 | {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ | 1207 | {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ |
1207 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ | 1208 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ |
1208 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1209 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7c1430ed0244..6d1a1abbed27 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
@@ -1273,6 +1273,7 @@ static void vrf_setup(struct net_device *dev) | |||
1273 | 1273 | ||
1274 | /* default to no qdisc; user can add if desired */ | 1274 | /* default to no qdisc; user can add if desired */ |
1275 | dev->priv_flags |= IFF_NO_QUEUE; | 1275 | dev->priv_flags |= IFF_NO_QUEUE; |
1276 | dev->priv_flags |= IFF_NO_RX_HANDLER; | ||
1276 | 1277 | ||
1277 | dev->min_mtu = 0; | 1278 | dev->min_mtu = 0; |
1278 | dev->max_mtu = 0; | 1279 | dev->max_mtu = 0; |