aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt4
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c9
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/net/bonding/bond_main.c40
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/dsa/mv88e6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c27
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c64
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c299
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c71
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c18
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c34
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c184
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c69
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c16
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c105
-rw-r--r--drivers/net/geneve.c31
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/phy/bcm7xxx.c43
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c9
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c188
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--include/linux/mlx4/device.h13
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2968
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/soc/ti/knav_dma.h4
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/batman-adv/gateway_client.c7
-rw-r--r--net/batman-adv/hard-interface.c25
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/bluetooth/hci_core.c6
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/flow_dissector.c7
-rw-r--r--net/dccp/ipv4.c14
-rw-r--r--net/dccp/ipv6.c14
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/inet_connection_sock.c14
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c77
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_ipv4.c20
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c74
-rw-r--r--net/ipv6/tcp_ipv6.c14
-rw-r--r--net/l2tp/l2tp_netlink.c18
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nfnetlink.c16
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nft_counter.c4
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sctp/protocol.c46
-rw-r--r--net/tipc/link.c4
-rw-r--r--net/tipc/node.c12
-rw-r--r--net/unix/af_unix.c26
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/vmw_vsock/af_vsock.c19
124 files changed, 3110 insertions, 2240 deletions
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 81a9f9e6b45f..c8ac222eac67 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -82,8 +82,8 @@ Example:
82 "ch16", "ch17", "ch18", "ch19", 82 "ch16", "ch17", "ch18", "ch19",
83 "ch20", "ch21", "ch22", "ch23", 83 "ch20", "ch21", "ch22", "ch23",
84 "ch24"; 84 "ch24";
85 clocks = <&mstp8_clks R8A7795_CLK_ETHERAVB>; 85 clocks = <&cpg CPG_MOD 812>;
86 power-domains = <&cpg_clocks>; 86 power-domains = <&cpg>;
87 phy-mode = "rgmii-id"; 87 phy-mode = "rgmii-id";
88 phy-handle = <&phy0>; 88 phy-handle = <&phy0>;
89 89
diff --git a/MAINTAINERS b/MAINTAINERS
index 4978dc19a4d2..2fa3303ea625 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12013,7 +12013,6 @@ F: arch/arm64/xen/
12013F: arch/arm64/include/asm/xen/ 12013F: arch/arm64/include/asm/xen/
12014 12014
12015XEN NETWORK BACKEND DRIVER 12015XEN NETWORK BACKEND DRIVER
12016M: Ian Campbell <ian.campbell@citrix.com>
12017M: Wei Liu <wei.liu2@citrix.com> 12016M: Wei Liu <wei.liu2@citrix.com>
12018L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12017L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12019L: netdev@vger.kernel.org 12018L: netdev@vger.kernel.org
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bc5536f00b6c..fd97534762b8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1681,9 +1681,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1681 } 1681 }
1682 1682
1683 if (qp->ibqp.uobject) 1683 if (qp->ibqp.uobject)
1684 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); 1684 context->usr_page = cpu_to_be32(
1685 mlx4_to_hw_uar_index(dev->dev,
1686 to_mucontext(ibqp->uobject->context)->uar.index));
1685 else 1687 else
1686 context->usr_page = cpu_to_be32(dev->priv_uar.index); 1688 context->usr_page = cpu_to_be32(
1689 mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
1687 1690
1688 if (attr_mask & IB_QP_DEST_QPN) 1691 if (attr_mask & IB_QP_DEST_QPN)
1689 context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 1692 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 2a506fe0c8a4..d1f8ab915b15 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -373,13 +373,7 @@ static void gigaset_freecshw(struct cardstate *cs)
373 373
374static void gigaset_device_release(struct device *dev) 374static void gigaset_device_release(struct device *dev)
375{ 375{
376 struct cardstate *cs = dev_get_drvdata(dev); 376 kfree(container_of(dev, struct ser_cardstate, dev.dev));
377
378 if (!cs)
379 return;
380 dev_set_drvdata(dev, NULL);
381 kfree(cs->hw.ser);
382 cs->hw.ser = NULL;
383} 377}
384 378
385/* 379/*
@@ -408,7 +402,6 @@ static int gigaset_initcshw(struct cardstate *cs)
408 cs->hw.ser = NULL; 402 cs->hw.ser = NULL;
409 return rc; 403 return rc;
410 } 404 }
411 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
412 405
413 tasklet_init(&cs->write_tasklet, 406 tasklet_init(&cs->write_tasklet,
414 gigaset_modem_fill, (unsigned long) cs); 407 gigaset_modem_fill, (unsigned long) cs);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 8e2944784e00..afde4edef9ae 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -392,7 +392,7 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
392 } 392 }
393 stat = bchannel_get_rxbuf(&bc->bch, cnt); 393 stat = bchannel_get_rxbuf(&bc->bch, cnt);
394 /* only transparent use the count here, HDLC overun is detected later */ 394 /* only transparent use the count here, HDLC overun is detected later */
395 if (stat == ENOMEM) { 395 if (stat == -ENOMEM) {
396 pr_warning("%s.B%d: No memory for %d bytes\n", 396 pr_warning("%s.B%d: No memory for %d bytes\n",
397 card->name, bc->bch.nr, cnt); 397 card->name, bc->bch.nr, cnt);
398 return; 398 return;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 56b560558884..b7f1a9919033 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, 214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
215 struct rtnl_link_stats64 *stats); 215 struct rtnl_link_stats64 *stats);
216static void bond_slave_arr_handler(struct work_struct *work); 216static void bond_slave_arr_handler(struct work_struct *work);
217static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
218 int mod);
217 219
218/*---------------------------- General routines -----------------------------*/ 220/*---------------------------- General routines -----------------------------*/
219 221
@@ -2127,6 +2129,7 @@ static void bond_miimon_commit(struct bonding *bond)
2127 continue; 2129 continue;
2128 2130
2129 case BOND_LINK_UP: 2131 case BOND_LINK_UP:
2132 bond_update_speed_duplex(slave);
2130 bond_set_slave_link_state(slave, BOND_LINK_UP, 2133 bond_set_slave_link_state(slave, BOND_LINK_UP,
2131 BOND_SLAVE_NOTIFY_NOW); 2134 BOND_SLAVE_NOTIFY_NOW);
2132 slave->last_link_up = jiffies; 2135 slave->last_link_up = jiffies;
@@ -2459,7 +2462,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2459 struct slave *slave) 2462 struct slave *slave)
2460{ 2463{
2461 struct arphdr *arp = (struct arphdr *)skb->data; 2464 struct arphdr *arp = (struct arphdr *)skb->data;
2462 struct slave *curr_active_slave; 2465 struct slave *curr_active_slave, *curr_arp_slave;
2463 unsigned char *arp_ptr; 2466 unsigned char *arp_ptr;
2464 __be32 sip, tip; 2467 __be32 sip, tip;
2465 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 2468 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
@@ -2506,26 +2509,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2506 &sip, &tip); 2509 &sip, &tip);
2507 2510
2508 curr_active_slave = rcu_dereference(bond->curr_active_slave); 2511 curr_active_slave = rcu_dereference(bond->curr_active_slave);
2512 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2509 2513
2510 /* Backup slaves won't see the ARP reply, but do come through 2514 /* We 'trust' the received ARP enough to validate it if:
2511 * here for each ARP probe (so we swap the sip/tip to validate 2515 *
2512 * the probe). In a "redundant switch, common router" type of 2516 * (a) the slave receiving the ARP is active (which includes the
2513 * configuration, the ARP probe will (hopefully) travel from 2517 * current ARP slave, if any), or
2514 * the active, through one switch, the router, then the other 2518 *
2515 * switch before reaching the backup. 2519 * (b) the receiving slave isn't active, but there is a currently
2520 * active slave and it received valid arp reply(s) after it became
2521 * the currently active slave, or
2522 *
2523 * (c) there is an ARP slave that sent an ARP during the prior ARP
2524 * interval, and we receive an ARP reply on any slave. We accept
2525 * these because switch FDB update delays may deliver the ARP
2526 * reply to a slave other than the sender of the ARP request.
2516 * 2527 *
2517 * We 'trust' the arp requests if there is an active slave and 2528 * Note: for (b), backup slaves are receiving the broadcast ARP
2518 * it received valid arp reply(s) after it became active. This 2529 * request, not a reply. This request passes from the sending
2519 * is done to avoid endless looping when we can't reach the 2530 * slave through the L2 switch(es) to the receiving slave. Since
2531 * this is checking the request, sip/tip are swapped for
2532 * validation.
2533 *
2534 * This is done to avoid endless looping when we can't reach the
2520 * arp_ip_target and fool ourselves with our own arp requests. 2535 * arp_ip_target and fool ourselves with our own arp requests.
2521 */ 2536 */
2522
2523 if (bond_is_active_slave(slave)) 2537 if (bond_is_active_slave(slave))
2524 bond_validate_arp(bond, slave, sip, tip); 2538 bond_validate_arp(bond, slave, sip, tip);
2525 else if (curr_active_slave && 2539 else if (curr_active_slave &&
2526 time_after(slave_last_rx(bond, curr_active_slave), 2540 time_after(slave_last_rx(bond, curr_active_slave),
2527 curr_active_slave->last_link_up)) 2541 curr_active_slave->last_link_up))
2528 bond_validate_arp(bond, slave, tip, sip); 2542 bond_validate_arp(bond, slave, tip, sip);
2543 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
2544 bond_time_in_interval(bond,
2545 dev_trans_start(curr_arp_slave->dev), 1))
2546 bond_validate_arp(bond, slave, sip, tip);
2529 2547
2530out_unlock: 2548out_unlock:
2531 if (arp != (struct arphdr *)skb->data) 2549 if (arp != (struct arphdr *)skb->data)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index fc5b75675cd8..eb7192fab593 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
117 */ 117 */
118#define EMS_USB_ARM7_CLOCK 8000000 118#define EMS_USB_ARM7_CLOCK 8000000
119 119
120#define CPC_TX_QUEUE_TRIGGER_LOW 25
121#define CPC_TX_QUEUE_TRIGGER_HIGH 35
122
120/* 123/*
121 * CAN-Message representation in a CPC_MSG. Message object type is 124 * CAN-Message representation in a CPC_MSG. Message object type is
122 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or 125 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
278 switch (urb->status) { 281 switch (urb->status) {
279 case 0: 282 case 0:
280 dev->free_slots = dev->intr_in_buffer[1]; 283 dev->free_slots = dev->intr_in_buffer[1];
284 if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
285 if (netif_queue_stopped(netdev)){
286 netif_wake_queue(netdev);
287 }
288 }
281 break; 289 break;
282 290
283 case -ECONNRESET: /* unlink */ 291 case -ECONNRESET: /* unlink */
@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
526 /* Release context */ 534 /* Release context */
527 context->echo_index = MAX_TX_URBS; 535 context->echo_index = MAX_TX_URBS;
528 536
529 if (netif_queue_stopped(netdev))
530 netif_wake_queue(netdev);
531} 537}
532 538
533/* 539/*
@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
587 int err, i; 593 int err, i;
588 594
589 dev->intr_in_buffer[0] = 0; 595 dev->intr_in_buffer[0] = 0;
590 dev->free_slots = 15; /* initial size */ 596 dev->free_slots = 50; /* initial size */
591 597
592 for (i = 0; i < MAX_RX_URBS; i++) { 598 for (i = 0; i < MAX_RX_URBS; i++) {
593 struct urb *urb = NULL; 599 struct urb *urb = NULL;
@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
835 841
836 /* Slow down tx path */ 842 /* Slow down tx path */
837 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || 843 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
838 dev->free_slots < 5) { 844 dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
839 netif_stop_queue(netdev); 845 netif_stop_queue(netdev);
840 } 846 }
841 } 847 }
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index cc6c54553418..a47f52f44b0d 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -25,6 +25,7 @@
25static const struct mv88e6xxx_switch_id mv88e6352_table[] = { 25static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" }, 26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" },
27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" }, 27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" },
28 { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
28 { PORT_SWITCH_ID_6320, "Marvell 88E6320" }, 29 { PORT_SWITCH_ID_6320, "Marvell 88E6320" },
29 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" }, 30 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
30 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" }, 31 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cf34681af4f6..512c8c0be1b4 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1555,7 +1555,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1555 1555
1556 if (vlan.vid != vid || !vlan.valid || 1556 if (vlan.vid != vid || !vlan.valid ||
1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) 1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1558 return -ENOENT; 1558 return -EOPNOTSUPP;
1559 1559
1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; 1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1561 1561
@@ -1582,6 +1582,7 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1582 const struct switchdev_obj_port_vlan *vlan) 1582 const struct switchdev_obj_port_vlan *vlan)
1583{ 1583{
1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1585 const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1585 u16 pvid, vid; 1586 u16 pvid, vid;
1586 int err = 0; 1587 int err = 0;
1587 1588
@@ -1597,7 +1598,8 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1597 goto unlock; 1598 goto unlock;
1598 1599
1599 if (vid == pvid) { 1600 if (vid == pvid) {
1600 err = _mv88e6xxx_port_pvid_set(ds, port, 0); 1601 /* restore reserved VLAN ID */
1602 err = _mv88e6xxx_port_pvid_set(ds, port, defpvid);
1601 if (err) 1603 if (err)
1602 goto unlock; 1604 goto unlock;
1603 } 1605 }
@@ -1889,26 +1891,20 @@ unlock:
1889 1891
1890int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members) 1892int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
1891{ 1893{
1892 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1894 return 0;
1893 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1894 int err;
1895
1896 /* The port joined a bridge, so leave its reserved VLAN */
1897 mutex_lock(&ps->smi_mutex);
1898 err = _mv88e6xxx_port_vlan_del(ds, port, pvid);
1899 if (!err)
1900 err = _mv88e6xxx_port_pvid_set(ds, port, 0);
1901 mutex_unlock(&ps->smi_mutex);
1902 return err;
1903} 1895}
1904 1896
1905int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members) 1897int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
1906{ 1898{
1899 return 0;
1900}
1901
1902static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port)
1903{
1907 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1904 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1908 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port; 1905 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1909 int err; 1906 int err;
1910 1907
1911 /* The port left the bridge, so join its reserved VLAN */
1912 mutex_lock(&ps->smi_mutex); 1908 mutex_lock(&ps->smi_mutex);
1913 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true); 1909 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
1914 if (!err) 1910 if (!err)
@@ -2192,8 +2188,7 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2192 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) 2188 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2193 continue; 2189 continue;
2194 2190
2195 /* setup the unbridged state */ 2191 ret = mv88e6xxx_setup_port_default_vlan(ds, i);
2196 ret = mv88e6xxx_port_bridge_leave(ds, i, 0);
2197 if (ret < 0) 2192 if (ret < 0)
2198 return ret; 2193 return ret;
2199 } 2194 }
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 2777289a26c0..2f79d29f17f2 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1501,6 +1501,7 @@ static const struct pcmcia_device_id pcnet_ids[] = {
1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), 1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), 1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103),
1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), 1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121),
1504 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0009),
1504 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), 1505 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941),
1505 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), 1506 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e),
1506 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), 1507 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b),
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3f3bcbea15bd..0907ab6ff309 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2380,7 +2380,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2380 sizeof(u32), 2380 sizeof(u32),
2381 &tx_ring->tx_status_pa, 2381 &tx_ring->tx_status_pa,
2382 GFP_KERNEL); 2382 GFP_KERNEL);
2383 if (!tx_ring->tx_status_pa) { 2383 if (!tx_ring->tx_status) {
2384 dev_err(&adapter->pdev->dev, 2384 dev_err(&adapter->pdev->dev,
2385 "Cannot alloc memory for Tx status block\n"); 2385 "Cannot alloc memory for Tx status block\n");
2386 return -ENOMEM; 2386 return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 87e727b921dc..fcdf5dda448f 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -50,8 +50,8 @@ static const char version[] =
50static void write_rreg(u_long base, u_int reg, u_int val) 50static void write_rreg(u_long base, u_int reg, u_int val)
51{ 51{
52 asm volatile( 52 asm volatile(
53 "str%?h %1, [%2] @ NET_RAP\n\t" 53 "strh %1, [%2] @ NET_RAP\n\t"
54 "str%?h %0, [%2, #-4] @ NET_RDP" 54 "strh %0, [%2, #-4] @ NET_RDP"
55 : 55 :
56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
57} 57}
@@ -60,8 +60,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
60{ 60{
61 unsigned short v; 61 unsigned short v;
62 asm volatile( 62 asm volatile(
63 "str%?h %1, [%2] @ NET_RAP\n\t" 63 "strh %1, [%2] @ NET_RAP\n\t"
64 "ldr%?h %0, [%2, #-4] @ NET_RDP" 64 "ldrh %0, [%2, #-4] @ NET_RDP"
65 : "=r" (v) 65 : "=r" (v)
66 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 66 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
67 return v; 67 return v;
@@ -70,8 +70,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
70static inline void write_ireg(u_long base, u_int reg, u_int val) 70static inline void write_ireg(u_long base, u_int reg, u_int val)
71{ 71{
72 asm volatile( 72 asm volatile(
73 "str%?h %1, [%2] @ NET_RAP\n\t" 73 "strh %1, [%2] @ NET_RAP\n\t"
74 "str%?h %0, [%2, #8] @ NET_IDP" 74 "strh %0, [%2, #8] @ NET_IDP"
75 : 75 :
76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
77} 77}
@@ -80,8 +80,8 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
80{ 80{
81 u_short v; 81 u_short v;
82 asm volatile( 82 asm volatile(
83 "str%?h %1, [%2] @ NAT_RAP\n\t" 83 "strh %1, [%2] @ NAT_RAP\n\t"
84 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" 84 "ldrh %0, [%2, #8] @ NET_IDP\n\t"
85 : "=r" (v) 85 : "=r" (v)
86 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 86 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
87 return v; 87 return v;
@@ -96,7 +96,7 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
96 offset = ISAMEM_BASE + (offset << 1); 96 offset = ISAMEM_BASE + (offset << 1);
97 length = (length + 1) & ~1; 97 length = (length + 1) & ~1;
98 if ((int)buf & 2) { 98 if ((int)buf & 2) {
99 asm volatile("str%?h %2, [%0], #4" 99 asm volatile("strh %2, [%0], #4"
100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
101 buf += 2; 101 buf += 2;
102 length -= 2; 102 length -= 2;
@@ -104,20 +104,20 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
104 while (length > 8) { 104 while (length > 8) {
105 register unsigned int tmp asm("r2"), tmp2 asm("r3"); 105 register unsigned int tmp asm("r2"), tmp2 asm("r3");
106 asm volatile( 106 asm volatile(
107 "ldm%?ia %0!, {%1, %2}" 107 "ldmia %0!, {%1, %2}"
108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); 108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
109 length -= 8; 109 length -= 8;
110 asm volatile( 110 asm volatile(
111 "str%?h %1, [%0], #4\n\t" 111 "strh %1, [%0], #4\n\t"
112 "mov%? %1, %1, lsr #16\n\t" 112 "mov %1, %1, lsr #16\n\t"
113 "str%?h %1, [%0], #4\n\t" 113 "strh %1, [%0], #4\n\t"
114 "str%?h %2, [%0], #4\n\t" 114 "strh %2, [%0], #4\n\t"
115 "mov%? %2, %2, lsr #16\n\t" 115 "mov %2, %2, lsr #16\n\t"
116 "str%?h %2, [%0], #4" 116 "strh %2, [%0], #4"
117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); 117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
118 } 118 }
119 while (length > 0) { 119 while (length > 0) {
120 asm volatile("str%?h %2, [%0], #4" 120 asm volatile("strh %2, [%0], #4"
121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
122 buf += 2; 122 buf += 2;
123 length -= 2; 123 length -= 2;
@@ -132,23 +132,23 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
132 if ((int)buf & 2) { 132 if ((int)buf & 2) {
133 unsigned int tmp; 133 unsigned int tmp;
134 asm volatile( 134 asm volatile(
135 "ldr%?h %2, [%0], #4\n\t" 135 "ldrh %2, [%0], #4\n\t"
136 "str%?b %2, [%1], #1\n\t" 136 "strb %2, [%1], #1\n\t"
137 "mov%? %2, %2, lsr #8\n\t" 137 "mov %2, %2, lsr #8\n\t"
138 "str%?b %2, [%1], #1" 138 "strb %2, [%1], #1"
139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); 139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
140 length -= 2; 140 length -= 2;
141 } 141 }
142 while (length > 8) { 142 while (length > 8) {
143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; 143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
144 asm volatile( 144 asm volatile(
145 "ldr%?h %2, [%0], #4\n\t" 145 "ldrh %2, [%0], #4\n\t"
146 "ldr%?h %4, [%0], #4\n\t" 146 "ldrh %4, [%0], #4\n\t"
147 "ldr%?h %3, [%0], #4\n\t" 147 "ldrh %3, [%0], #4\n\t"
148 "orr%? %2, %2, %4, lsl #16\n\t" 148 "orr %2, %2, %4, lsl #16\n\t"
149 "ldr%?h %4, [%0], #4\n\t" 149 "ldrh %4, [%0], #4\n\t"
150 "orr%? %3, %3, %4, lsl #16\n\t" 150 "orr %3, %3, %4, lsl #16\n\t"
151 "stm%?ia %1!, {%2, %3}" 151 "stmia %1!, {%2, %3}"
152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) 152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
153 : "0" (offset), "1" (buf)); 153 : "0" (offset), "1" (buf));
154 length -= 8; 154 length -= 8;
@@ -156,10 +156,10 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
156 while (length > 0) { 156 while (length > 0) {
157 unsigned int tmp; 157 unsigned int tmp;
158 asm volatile( 158 asm volatile(
159 "ldr%?h %2, [%0], #4\n\t" 159 "ldrh %2, [%0], #4\n\t"
160 "str%?b %2, [%1], #1\n\t" 160 "strb %2, [%1], #1\n\t"
161 "mov%? %2, %2, lsr #8\n\t" 161 "mov %2, %2, lsr #8\n\t"
162 "str%?b %2, [%1], #1" 162 "strb %2, [%1], #1"
163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); 163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
164 length -= 2; 164 length -= 2;
165 } 165 }
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 256f590f6bb1..3a7ebfdda57d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -547,8 +547,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */ 547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
548 548
549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); 549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
550 if(lp==NULL) 550 if (!lp)
551 return -ENODEV; 551 return -ENOMEM;
552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); 552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
553 dev->ml_priv = lp; 553 dev->ml_priv = lp;
554 lp->name = chipname; 554 lp->name = chipname;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abe1eabc0171..6446af1403f7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
163 struct sk_buff *skb = tx_buff->skb; 163 struct sk_buff *skb = tx_buff->skb;
164 unsigned int info = le32_to_cpu(txbd->info); 164 unsigned int info = le32_to_cpu(txbd->info);
165 165
166 if ((info & FOR_EMAC) || !txbd->data) 166 if ((info & FOR_EMAC) || !txbd->data || !skb)
167 break; 167 break;
168 168
169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
@@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
191 191
192 txbd->data = 0; 192 txbd->data = 0;
193 txbd->info = 0; 193 txbd->info = 0;
194 tx_buff->skb = NULL;
194 195
195 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 196 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
196 } 197 }
@@ -446,6 +447,9 @@ static int arc_emac_open(struct net_device *ndev)
446 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 447 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
447 } 448 }
448 449
450 priv->txbd_curr = 0;
451 priv->txbd_dirty = 0;
452
449 /* Clean Tx BD's */ 453 /* Clean Tx BD's */
450 memset(priv->txbd, 0, TX_RING_SZ); 454 memset(priv->txbd, 0, TX_RING_SZ);
451 455
@@ -514,6 +518,64 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
514} 518}
515 519
516/** 520/**
521 * arc_free_tx_queue - free skb from tx queue
522 * @ndev: Pointer to the network device.
523 *
524 * This function must be called while EMAC disable
525 */
526static void arc_free_tx_queue(struct net_device *ndev)
527{
528 struct arc_emac_priv *priv = netdev_priv(ndev);
529 unsigned int i;
530
531 for (i = 0; i < TX_BD_NUM; i++) {
532 struct arc_emac_bd *txbd = &priv->txbd[i];
533 struct buffer_state *tx_buff = &priv->tx_buff[i];
534
535 if (tx_buff->skb) {
536 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
537 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
538
539 /* return the sk_buff to system */
540 dev_kfree_skb_irq(tx_buff->skb);
541 }
542
543 txbd->info = 0;
544 txbd->data = 0;
545 tx_buff->skb = NULL;
546 }
547}
548
549/**
550 * arc_free_rx_queue - free skb from rx queue
551 * @ndev: Pointer to the network device.
552 *
553 * This function must be called while EMAC disable
554 */
555static void arc_free_rx_queue(struct net_device *ndev)
556{
557 struct arc_emac_priv *priv = netdev_priv(ndev);
558 unsigned int i;
559
560 for (i = 0; i < RX_BD_NUM; i++) {
561 struct arc_emac_bd *rxbd = &priv->rxbd[i];
562 struct buffer_state *rx_buff = &priv->rx_buff[i];
563
564 if (rx_buff->skb) {
565 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
566 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
567
568 /* return the sk_buff to system */
569 dev_kfree_skb_irq(rx_buff->skb);
570 }
571
572 rxbd->info = 0;
573 rxbd->data = 0;
574 rx_buff->skb = NULL;
575 }
576}
577
578/**
517 * arc_emac_stop - Close the network device. 579 * arc_emac_stop - Close the network device.
518 * @ndev: Pointer to the network device. 580 * @ndev: Pointer to the network device.
519 * 581 *
@@ -534,6 +596,10 @@ static int arc_emac_stop(struct net_device *ndev)
534 /* Disable EMAC */ 596 /* Disable EMAC */
535 arc_reg_clr(priv, R_CTRL, EN_MASK); 597 arc_reg_clr(priv, R_CTRL, EN_MASK);
536 598
599 /* Return the sk_buff to system */
600 arc_free_tx_queue(ndev);
601 arc_free_rx_queue(ndev);
602
537 return 0; 603 return 0;
538} 604}
539 605
@@ -610,7 +676,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
610 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 676 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
611 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 677 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
612 678
613 priv->tx_buff[*txbd_curr].skb = skb;
614 priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 679 priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
615 680
616 /* Make sure pointer to data buffer is set */ 681 /* Make sure pointer to data buffer is set */
@@ -620,6 +685,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
620 685
621 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 686 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
622 687
688 /* Make sure info word is set */
689 wmb();
690
691 priv->tx_buff[*txbd_curr].skb = skb;
692
623 /* Increment index to point to the next BD */ 693 /* Increment index to point to the next BD */
624 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 694 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
625 695
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d946bba43726..1fb80100e5e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6185,26 +6185,80 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
6185 shift -= 4; 6185 shift -= 4;
6186 digit = ((num & mask) >> shift); 6186 digit = ((num & mask) >> shift);
6187 if (digit == 0 && remove_leading_zeros) { 6187 if (digit == 0 && remove_leading_zeros) {
6188 mask = mask >> 4; 6188 *str_ptr = '0';
6189 continue; 6189 } else {
6190 } else if (digit < 0xa) 6190 if (digit < 0xa)
6191 *str_ptr = digit + '0'; 6191 *str_ptr = digit + '0';
6192 else 6192 else
6193 *str_ptr = digit - 0xa + 'a'; 6193 *str_ptr = digit - 0xa + 'a';
6194 remove_leading_zeros = 0; 6194
6195 str_ptr++; 6195 remove_leading_zeros = 0;
6196 (*len)--; 6196 str_ptr++;
6197 (*len)--;
6198 }
6197 mask = mask >> 4; 6199 mask = mask >> 4;
6198 if (shift == 4*4) { 6200 if (shift == 4*4) {
6201 if (remove_leading_zeros) {
6202 str_ptr++;
6203 (*len)--;
6204 }
6199 *str_ptr = '.'; 6205 *str_ptr = '.';
6200 str_ptr++; 6206 str_ptr++;
6201 (*len)--; 6207 (*len)--;
6202 remove_leading_zeros = 1; 6208 remove_leading_zeros = 1;
6203 } 6209 }
6204 } 6210 }
6211 if (remove_leading_zeros)
6212 (*len)--;
6205 return 0; 6213 return 0;
6206} 6214}
6207 6215
6216static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
6217{
6218 u8 *str_ptr = str;
6219 u32 mask = 0x00f00000;
6220 u8 shift = 8*3;
6221 u8 digit;
6222 u8 remove_leading_zeros = 1;
6223
6224 if (*len < 10) {
6225 /* Need more than 10chars for this format */
6226 *str_ptr = '\0';
6227 (*len)--;
6228 return -EINVAL;
6229 }
6230
6231 while (shift > 0) {
6232 shift -= 4;
6233 digit = ((num & mask) >> shift);
6234 if (digit == 0 && remove_leading_zeros) {
6235 *str_ptr = '0';
6236 } else {
6237 if (digit < 0xa)
6238 *str_ptr = digit + '0';
6239 else
6240 *str_ptr = digit - 0xa + 'a';
6241
6242 remove_leading_zeros = 0;
6243 str_ptr++;
6244 (*len)--;
6245 }
6246 mask = mask >> 4;
6247 if ((shift == 4*4) || (shift == 4*2)) {
6248 if (remove_leading_zeros) {
6249 str_ptr++;
6250 (*len)--;
6251 }
6252 *str_ptr = '.';
6253 str_ptr++;
6254 (*len)--;
6255 remove_leading_zeros = 1;
6256 }
6257 }
6258 if (remove_leading_zeros)
6259 (*len)--;
6260 return 0;
6261}
6208 6262
6209static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) 6263static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
6210{ 6264{
@@ -9677,8 +9731,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9677 9731
9678 if (bnx2x_is_8483x_8485x(phy)) { 9732 if (bnx2x_is_8483x_8485x(phy)) {
9679 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9733 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9680 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, 9734 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9681 phy->ver_addr); 9735 fw_ver1 &= 0xfff;
9736 bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr);
9682 } else { 9737 } else {
9683 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9738 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9684 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9739 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
@@ -9732,16 +9787,32 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9732static void bnx2x_848xx_set_led(struct bnx2x *bp, 9787static void bnx2x_848xx_set_led(struct bnx2x *bp,
9733 struct bnx2x_phy *phy) 9788 struct bnx2x_phy *phy)
9734{ 9789{
9735 u16 val, offset, i; 9790 u16 val, led3_blink_rate, offset, i;
9736 static struct bnx2x_reg_set reg_set[] = { 9791 static struct bnx2x_reg_set reg_set[] = {
9737 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, 9792 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
9738 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, 9793 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
9739 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, 9794 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
9740 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
9741 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, 9795 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
9742 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, 9796 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
9743 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} 9797 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
9744 }; 9798 };
9799
9800 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
9801 /* Set LED5 source */
9802 bnx2x_cl45_write(bp, phy,
9803 MDIO_PMA_DEVAD,
9804 MDIO_PMA_REG_8481_LED5_MASK,
9805 0x90);
9806 led3_blink_rate = 0x000f;
9807 } else {
9808 led3_blink_rate = 0x0000;
9809 }
9810 /* Set LED3 BLINK */
9811 bnx2x_cl45_write(bp, phy,
9812 MDIO_PMA_DEVAD,
9813 MDIO_PMA_REG_8481_LED3_BLINK,
9814 led3_blink_rate);
9815
9745 /* PHYC_CTL_LED_CTL */ 9816 /* PHYC_CTL_LED_CTL */
9746 bnx2x_cl45_read(bp, phy, 9817 bnx2x_cl45_read(bp, phy,
9747 MDIO_PMA_DEVAD, 9818 MDIO_PMA_DEVAD,
@@ -9749,6 +9820,9 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9749 val &= 0xFE00; 9820 val &= 0xFE00;
9750 val |= 0x0092; 9821 val |= 0x0092;
9751 9822
9823 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9824 val |= 2 << 12; /* LED5 ON based on source */
9825
9752 bnx2x_cl45_write(bp, phy, 9826 bnx2x_cl45_write(bp, phy,
9753 MDIO_PMA_DEVAD, 9827 MDIO_PMA_DEVAD,
9754 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 9828 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
@@ -9762,10 +9836,17 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9762 else 9836 else
9763 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; 9837 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
9764 9838
9765 /* stretch_en for LED3*/ 9839 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9840 val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT |
9841 MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9842 else
9843 val = MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9844
9845 /* stretch_en for LEDs */
9766 bnx2x_cl45_read_or_write(bp, phy, 9846 bnx2x_cl45_read_or_write(bp, phy,
9767 MDIO_PMA_DEVAD, offset, 9847 MDIO_PMA_DEVAD,
9768 MDIO_PMA_REG_84823_LED3_STRETCH_EN); 9848 offset,
9849 val);
9769} 9850}
9770 9851
9771static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, 9852static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9775,7 +9856,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
9775 struct bnx2x *bp = params->bp; 9856 struct bnx2x *bp = params->bp;
9776 switch (action) { 9857 switch (action) {
9777 case PHY_INIT: 9858 case PHY_INIT:
9778 if (!bnx2x_is_8483x_8485x(phy)) { 9859 if (bnx2x_is_8483x_8485x(phy)) {
9779 /* Save spirom version */ 9860 /* Save spirom version */
9780 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9861 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9781 } 9862 }
@@ -10036,15 +10117,20 @@ static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
10036 10117
10037static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 10118static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10038 struct link_params *params, u16 fw_cmd, 10119 struct link_params *params, u16 fw_cmd,
10039 u16 cmd_args[], int argc) 10120 u16 cmd_args[], int argc, int process)
10040{ 10121{
10041 int idx; 10122 int idx;
10042 u16 val; 10123 u16 val;
10043 struct bnx2x *bp = params->bp; 10124 struct bnx2x *bp = params->bp;
10044 /* Write CMD_OPEN_OVERRIDE to STATUS reg */ 10125 int rc = 0;
10045 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10126
10046 MDIO_848xx_CMD_HDLR_STATUS, 10127 if (process == PHY84833_MB_PROCESS2) {
10047 PHY84833_STATUS_CMD_OPEN_OVERRIDE); 10128 /* Write CMD_OPEN_OVERRIDE to STATUS reg */
10129 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10130 MDIO_848xx_CMD_HDLR_STATUS,
10131 PHY84833_STATUS_CMD_OPEN_OVERRIDE);
10132 }
10133
10048 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10134 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
10049 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10135 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10050 MDIO_848xx_CMD_HDLR_STATUS, &val); 10136 MDIO_848xx_CMD_HDLR_STATUS, &val);
@@ -10054,15 +10140,27 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10054 } 10140 }
10055 if (idx >= PHY848xx_CMDHDLR_WAIT) { 10141 if (idx >= PHY848xx_CMDHDLR_WAIT) {
10056 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 10142 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
10143 /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR
10144 * clear the status to CMD_CLEAR_COMPLETE
10145 */
10146 if (val == PHY84833_STATUS_CMD_COMPLETE_PASS ||
10147 val == PHY84833_STATUS_CMD_COMPLETE_ERROR) {
10148 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10149 MDIO_848xx_CMD_HDLR_STATUS,
10150 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10151 }
10057 return -EINVAL; 10152 return -EINVAL;
10058 } 10153 }
10059 10154 if (process == PHY84833_MB_PROCESS1 ||
10060 /* Prepare argument(s) and issue command */ 10155 process == PHY84833_MB_PROCESS2) {
10061 for (idx = 0; idx < argc; idx++) { 10156 /* Prepare argument(s) */
10062 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10157 for (idx = 0; idx < argc; idx++) {
10063 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10158 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10064 cmd_args[idx]); 10159 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10160 cmd_args[idx]);
10161 }
10065 } 10162 }
10163
10066 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10164 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10067 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); 10165 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
10068 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10166 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
@@ -10076,24 +10174,30 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10076 if ((idx >= PHY848xx_CMDHDLR_WAIT) || 10174 if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
10077 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 10175 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
10078 DP(NETIF_MSG_LINK, "FW cmd failed.\n"); 10176 DP(NETIF_MSG_LINK, "FW cmd failed.\n");
10079 return -EINVAL; 10177 rc = -EINVAL;
10080 } 10178 }
10081 /* Gather returning data */ 10179 if (process == PHY84833_MB_PROCESS3 && rc == 0) {
10082 for (idx = 0; idx < argc; idx++) { 10180 /* Gather returning data */
10083 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10181 for (idx = 0; idx < argc; idx++) {
10084 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10182 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10085 &cmd_args[idx]); 10183 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10184 &cmd_args[idx]);
10185 }
10086 } 10186 }
10087 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10187 if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR ||
10088 MDIO_848xx_CMD_HDLR_STATUS, 10188 val == PHY84833_STATUS_CMD_COMPLETE_PASS) {
10089 PHY84833_STATUS_CMD_CLEAR_COMPLETE); 10189 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10090 return 0; 10190 MDIO_848xx_CMD_HDLR_STATUS,
10191 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10192 }
10193 return rc;
10091} 10194}
10092 10195
10093static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, 10196static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10094 struct link_params *params, 10197 struct link_params *params,
10095 u16 fw_cmd, 10198 u16 fw_cmd,
10096 u16 cmd_args[], int argc) 10199 u16 cmd_args[], int argc,
10200 int process)
10097{ 10201{
10098 struct bnx2x *bp = params->bp; 10202 struct bnx2x *bp = params->bp;
10099 10203
@@ -10106,7 +10210,7 @@ static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10106 argc); 10210 argc);
10107 } else { 10211 } else {
10108 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, 10212 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
10109 argc); 10213 argc, process);
10110 } 10214 }
10111} 10215}
10112 10216
@@ -10133,7 +10237,7 @@ static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
10133 10237
10134 status = bnx2x_848xx_cmd_hdlr(phy, params, 10238 status = bnx2x_848xx_cmd_hdlr(phy, params,
10135 PHY848xx_CMD_SET_PAIR_SWAP, data, 10239 PHY848xx_CMD_SET_PAIR_SWAP, data,
10136 PHY848xx_CMDHDLR_MAX_ARGS); 10240 2, PHY84833_MB_PROCESS2);
10137 if (status == 0) 10241 if (status == 0)
10138 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); 10242 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
10139 10243
@@ -10222,8 +10326,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
10222 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); 10326 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
10223 10327
10224 /* Prevent Phy from working in EEE and advertising it */ 10328 /* Prevent Phy from working in EEE and advertising it */
10225 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10329 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10226 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10330 &cmd_args, 1, PHY84833_MB_PROCESS1);
10227 if (rc) { 10331 if (rc) {
10228 DP(NETIF_MSG_LINK, "EEE disable failed.\n"); 10332 DP(NETIF_MSG_LINK, "EEE disable failed.\n");
10229 return rc; 10333 return rc;
@@ -10240,8 +10344,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
10240 struct bnx2x *bp = params->bp; 10344 struct bnx2x *bp = params->bp;
10241 u16 cmd_args = 1; 10345 u16 cmd_args = 1;
10242 10346
10243 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10347 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10244 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10348 &cmd_args, 1, PHY84833_MB_PROCESS1);
10245 if (rc) { 10349 if (rc) {
10246 DP(NETIF_MSG_LINK, "EEE enable failed.\n"); 10350 DP(NETIF_MSG_LINK, "EEE enable failed.\n");
10247 return rc; 10351 return rc;
@@ -10362,7 +10466,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10362 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 10466 cmd_args[3] = PHY84833_CONSTANT_LATENCY;
10363 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10467 rc = bnx2x_848xx_cmd_hdlr(phy, params,
10364 PHY848xx_CMD_SET_EEE_MODE, cmd_args, 10468 PHY848xx_CMD_SET_EEE_MODE, cmd_args,
10365 PHY848xx_CMDHDLR_MAX_ARGS); 10469 4, PHY84833_MB_PROCESS1);
10366 if (rc) 10470 if (rc)
10367 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 10471 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
10368 } 10472 }
@@ -10416,6 +10520,32 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10416 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 10520 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10417 } 10521 }
10418 10522
10523 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
10524 /* Additional settings for jumbo packets in 1000BASE-T mode */
10525 /* Allow rx extended length */
10526 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10527 MDIO_AN_REG_8481_AUX_CTRL, &val);
10528 val |= 0x4000;
10529 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10530 MDIO_AN_REG_8481_AUX_CTRL, val);
10531 /* TX FIFO Elasticity LSB */
10532 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10533 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val);
10534 val |= 0x1;
10535 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10536 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val);
10537 /* TX FIFO Elasticity MSB */
10538 /* Enable expansion register 0x46 (Pattern Generator status) */
10539 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10540 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46);
10541
10542 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10543 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val);
10544 val |= 0x4000;
10545 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10546 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val);
10547 }
10548
10419 if (bnx2x_is_8483x_8485x(phy)) { 10549 if (bnx2x_is_8483x_8485x(phy)) {
10420 /* Bring PHY out of super isolate mode as the final step. */ 10550 /* Bring PHY out of super isolate mode as the final step. */
10421 bnx2x_cl45_read_and_write(bp, phy, 10551 bnx2x_cl45_read_and_write(bp, phy,
@@ -10555,6 +10685,17 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10555 return link_up; 10685 return link_up;
10556} 10686}
10557 10687
10688static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
10689{
10690 int status = 0;
10691 u32 num;
10692
10693 num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
10694 ((raw_ver & 0xF000) >> 12);
10695 status = bnx2x_3_seq_format_ver(num, str, len);
10696 return status;
10697}
10698
10558static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) 10699static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
10559{ 10700{
10560 int status = 0; 10701 int status = 0;
@@ -10651,10 +10792,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10651 0x0); 10792 0x0);
10652 10793
10653 } else { 10794 } else {
10795 /* LED 1 OFF */
10654 bnx2x_cl45_write(bp, phy, 10796 bnx2x_cl45_write(bp, phy,
10655 MDIO_PMA_DEVAD, 10797 MDIO_PMA_DEVAD,
10656 MDIO_PMA_REG_8481_LED1_MASK, 10798 MDIO_PMA_REG_8481_LED1_MASK,
10657 0x0); 10799 0x0);
10800
10801 if (phy->type ==
10802 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10803 /* LED 2 OFF */
10804 bnx2x_cl45_write(bp, phy,
10805 MDIO_PMA_DEVAD,
10806 MDIO_PMA_REG_8481_LED2_MASK,
10807 0x0);
10808 /* LED 3 OFF */
10809 bnx2x_cl45_write(bp, phy,
10810 MDIO_PMA_DEVAD,
10811 MDIO_PMA_REG_8481_LED3_MASK,
10812 0x0);
10813 }
10658 } 10814 }
10659 break; 10815 break;
10660 case LED_MODE_FRONT_PANEL_OFF: 10816 case LED_MODE_FRONT_PANEL_OFF:
@@ -10713,6 +10869,19 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10713 MDIO_PMA_REG_8481_SIGNAL_MASK, 10869 MDIO_PMA_REG_8481_SIGNAL_MASK,
10714 0x0); 10870 0x0);
10715 } 10871 }
10872 if (phy->type ==
10873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10874 /* LED 2 OFF */
10875 bnx2x_cl45_write(bp, phy,
10876 MDIO_PMA_DEVAD,
10877 MDIO_PMA_REG_8481_LED2_MASK,
10878 0x0);
10879 /* LED 3 OFF */
10880 bnx2x_cl45_write(bp, phy,
10881 MDIO_PMA_DEVAD,
10882 MDIO_PMA_REG_8481_LED3_MASK,
10883 0x0);
10884 }
10716 } 10885 }
10717 break; 10886 break;
10718 case LED_MODE_ON: 10887 case LED_MODE_ON:
@@ -10776,6 +10945,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10776 params->port*4, 10945 params->port*4,
10777 NIG_MASK_MI_INT); 10946 NIG_MASK_MI_INT);
10778 } 10947 }
10948 }
10949 if (phy->type ==
10950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10951 /* Tell LED3 to constant on */
10952 bnx2x_cl45_read(bp, phy,
10953 MDIO_PMA_DEVAD,
10954 MDIO_PMA_REG_8481_LINK_SIGNAL,
10955 &val);
10956 val &= ~(7<<6);
10957 val |= (2<<6); /* A83B[8:6]= 2 */
10958 bnx2x_cl45_write(bp, phy,
10959 MDIO_PMA_DEVAD,
10960 MDIO_PMA_REG_8481_LINK_SIGNAL,
10961 val);
10962 bnx2x_cl45_write(bp, phy,
10963 MDIO_PMA_DEVAD,
10964 MDIO_PMA_REG_8481_LED3_MASK,
10965 0x20);
10966 } else {
10779 bnx2x_cl45_write(bp, phy, 10967 bnx2x_cl45_write(bp, phy,
10780 MDIO_PMA_DEVAD, 10968 MDIO_PMA_DEVAD,
10781 MDIO_PMA_REG_8481_SIGNAL_MASK, 10969 MDIO_PMA_REG_8481_SIGNAL_MASK,
@@ -10854,6 +11042,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10854 MDIO_PMA_REG_8481_LINK_SIGNAL, 11042 MDIO_PMA_REG_8481_LINK_SIGNAL,
10855 val); 11043 val);
10856 if (phy->type == 11044 if (phy->type ==
11045 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
11046 bnx2x_cl45_write(bp, phy,
11047 MDIO_PMA_DEVAD,
11048 MDIO_PMA_REG_8481_LED2_MASK,
11049 0x18);
11050 bnx2x_cl45_write(bp, phy,
11051 MDIO_PMA_DEVAD,
11052 MDIO_PMA_REG_8481_LED3_MASK,
11053 0x06);
11054 }
11055 if (phy->type ==
10857 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { 11056 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10858 /* Restore LED4 source to external link, 11057 /* Restore LED4 source to external link,
10859 * and re-enable interrupts. 11058 * and re-enable interrupts.
@@ -11982,7 +12181,7 @@ static const struct bnx2x_phy phy_84858 = {
11982 .read_status = (read_status_t)bnx2x_848xx_read_status, 12181 .read_status = (read_status_t)bnx2x_848xx_read_status,
11983 .link_reset = (link_reset_t)bnx2x_848x3_link_reset, 12182 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
11984 .config_loopback = (config_loopback_t)NULL, 12183 .config_loopback = (config_loopback_t)NULL,
11985 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 12184 .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
11986 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 12185 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
11987 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 12186 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11988 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 12187 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
@@ -13807,8 +14006,10 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13807 if (CHIP_IS_E3(bp)) { 14006 if (CHIP_IS_E3(bp)) {
13808 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 14007 struct bnx2x_phy *phy = &params->phy[INT_PHY];
13809 bnx2x_set_aer_mmd(params, phy); 14008 bnx2x_set_aer_mmd(params, phy);
13810 if ((phy->supported & SUPPORTED_20000baseKR2_Full) && 14009 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
13811 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 14010 (phy->speed_cap_mask &
14011 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
14012 (phy->req_line_speed == SPEED_20000))
13812 bnx2x_check_kr2_wa(params, vars, phy); 14013 bnx2x_check_kr2_wa(params, vars, phy);
13813 bnx2x_check_over_curr(params, vars); 14014 bnx2x_check_over_curr(params, vars);
13814 if (vars->rx_tx_asic_rst) 14015 if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4dead49bd5cb..a43dea259b12 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7296,6 +7296,8 @@ Theotherbitsarereservedandshouldbezero*/
7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec 7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
7299/* BCM84858 only */
7300#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000
7299 7301
7300/* BCM84833 only */ 7302/* BCM84833 only */
7301#define MDIO_84833_TOP_CFG_FW_REV 0x400f 7303#define MDIO_84833_TOP_CFG_FW_REV 0x400f
@@ -7337,6 +7339,10 @@ Theotherbitsarereservedandshouldbezero*/
7337#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 7339#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
7338#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 7340#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
7339#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 7341#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
7342/* Mailbox Process */
7343#define PHY84833_MB_PROCESS1 1
7344#define PHY84833_MB_PROCESS2 2
7345#define PHY84833_MB_PROCESS3 3
7340 7346
7341/* Mailbox status set used by 84858 only */ 7347/* Mailbox status set used by 84858 only */
7342#define PHY84858_STATUS_CMD_RECEIVED 0x0001 7348#define PHY84858_STATUS_CMD_RECEIVED 0x0001
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5dc89e527e7d..8ab000dd52d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,7 +69,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD 69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256 70#define BNXT_RX_COPY_THRESH 256
71 71
72#define BNXT_TX_PUSH_THRESH 92 72#define BNXT_TX_PUSH_THRESH 164
73 73
74enum board_idx { 74enum board_idx {
75 BCM57301, 75 BCM57301,
@@ -223,11 +223,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
223 } 223 }
224 224
225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
226 struct tx_push_bd *push = txr->tx_push; 226 struct tx_push_buffer *tx_push_buf = txr->tx_push;
227 struct tx_bd *tx_push = &push->txbd1; 227 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
228 struct tx_bd_ext *tx_push1 = &push->txbd2; 228 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
229 void *pdata = tx_push1 + 1; 229 void *pdata = tx_push_buf->data;
230 int j; 230 u64 *end;
231 int j, push_len;
231 232
232 /* Set COAL_NOW to be ready quickly for the next push */ 233 /* Set COAL_NOW to be ready quickly for the next push */
233 tx_push->tx_bd_len_flags_type = 234 tx_push->tx_bd_len_flags_type =
@@ -247,6 +248,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 248 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
248 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 249 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
249 250
251 end = PTR_ALIGN(pdata + length + 1, 8) - 1;
252 *end = 0;
253
250 skb_copy_from_linear_data(skb, pdata, len); 254 skb_copy_from_linear_data(skb, pdata, len);
251 pdata += len; 255 pdata += len;
252 for (j = 0; j < last_frag; j++) { 256 for (j = 0; j < last_frag; j++) {
@@ -261,22 +265,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 pdata += skb_frag_size(frag); 265 pdata += skb_frag_size(frag);
262 } 266 }
263 267
264 memcpy(txbd, tx_push, sizeof(*txbd)); 268 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
269 txbd->tx_bd_haddr = txr->data_mapping;
265 prod = NEXT_TX(prod); 270 prod = NEXT_TX(prod);
266 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 271 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
267 memcpy(txbd, tx_push1, sizeof(*txbd)); 272 memcpy(txbd, tx_push1, sizeof(*txbd));
268 prod = NEXT_TX(prod); 273 prod = NEXT_TX(prod);
269 push->doorbell = 274 tx_push->doorbell =
270 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 275 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
271 txr->tx_prod = prod; 276 txr->tx_prod = prod;
272 277
273 netdev_tx_sent_queue(txq, skb->len); 278 netdev_tx_sent_queue(txq, skb->len);
274 279
275 __iowrite64_copy(txr->tx_doorbell, push, 280 push_len = (length + sizeof(*tx_push) + 7) / 8;
276 (length + sizeof(*push) + 8) / 8); 281 if (push_len > 16) {
282 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
283 __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
284 push_len - 16);
285 } else {
286 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
287 push_len);
288 }
277 289
278 tx_buf->is_push = 1; 290 tx_buf->is_push = 1;
279
280 goto tx_done; 291 goto tx_done;
281 } 292 }
282 293
@@ -1753,7 +1764,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1753 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 1764 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1754 bp->tx_push_thresh); 1765 bp->tx_push_thresh);
1755 1766
1756 if (push_size > 128) { 1767 if (push_size > 256) {
1757 push_size = 0; 1768 push_size = 0;
1758 bp->tx_push_thresh = 0; 1769 bp->tx_push_thresh = 0;
1759 } 1770 }
@@ -1772,7 +1783,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1772 return rc; 1783 return rc;
1773 1784
1774 if (bp->tx_push_size) { 1785 if (bp->tx_push_size) {
1775 struct tx_bd *txbd;
1776 dma_addr_t mapping; 1786 dma_addr_t mapping;
1777 1787
1778 /* One pre-allocated DMA buffer to backup 1788 /* One pre-allocated DMA buffer to backup
@@ -1786,13 +1796,11 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1786 if (!txr->tx_push) 1796 if (!txr->tx_push)
1787 return -ENOMEM; 1797 return -ENOMEM;
1788 1798
1789 txbd = &txr->tx_push->txbd1;
1790
1791 mapping = txr->tx_push_mapping + 1799 mapping = txr->tx_push_mapping +
1792 sizeof(struct tx_push_bd); 1800 sizeof(struct tx_push_bd);
1793 txbd->tx_bd_haddr = cpu_to_le64(mapping); 1801 txr->data_mapping = cpu_to_le64(mapping);
1794 1802
1795 memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); 1803 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
1796 } 1804 }
1797 ring->queue_id = bp->q_info[j].queue_id; 1805 ring->queue_id = bp->q_info[j].queue_id;
1798 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 1806 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -4546,20 +4554,18 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
4546 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4554 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4547 link_info->force_pause_setting != link_info->req_flow_ctrl) 4555 link_info->force_pause_setting != link_info->req_flow_ctrl)
4548 update_pause = true; 4556 update_pause = true;
4549 if (link_info->req_duplex != link_info->duplex_setting)
4550 update_link = true;
4551 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4557 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4552 if (BNXT_AUTO_MODE(link_info->auto_mode)) 4558 if (BNXT_AUTO_MODE(link_info->auto_mode))
4553 update_link = true; 4559 update_link = true;
4554 if (link_info->req_link_speed != link_info->force_link_speed) 4560 if (link_info->req_link_speed != link_info->force_link_speed)
4555 update_link = true; 4561 update_link = true;
4562 if (link_info->req_duplex != link_info->duplex_setting)
4563 update_link = true;
4556 } else { 4564 } else {
4557 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 4565 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4558 update_link = true; 4566 update_link = true;
4559 if (link_info->advertising != link_info->auto_link_speeds) 4567 if (link_info->advertising != link_info->auto_link_speeds)
4560 update_link = true; 4568 update_link = true;
4561 if (link_info->req_link_speed != link_info->auto_link_speed)
4562 update_link = true;
4563 } 4569 }
4564 4570
4565 if (update_link) 4571 if (update_link)
@@ -4636,7 +4642,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4636 if (link_re_init) { 4642 if (link_re_init) {
4637 rc = bnxt_update_phy_setting(bp); 4643 rc = bnxt_update_phy_setting(bp);
4638 if (rc) 4644 if (rc)
4639 goto open_err; 4645 netdev_warn(bp->dev, "failed to update phy settings\n");
4640 } 4646 }
4641 4647
4642 if (irq_re_init) { 4648 if (irq_re_init) {
@@ -4654,6 +4660,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4654 /* Enable TX queues */ 4660 /* Enable TX queues */
4655 bnxt_tx_enable(bp); 4661 bnxt_tx_enable(bp);
4656 mod_timer(&bp->timer, jiffies + bp->current_interval); 4662 mod_timer(&bp->timer, jiffies + bp->current_interval);
4663 bnxt_update_link(bp, true);
4657 4664
4658 return 0; 4665 return 0;
4659 4666
@@ -5670,22 +5677,16 @@ static int bnxt_probe_phy(struct bnxt *bp)
5670 } 5677 }
5671 5678
5672 /*initialize the ethool setting copy with NVM settings */ 5679 /*initialize the ethool setting copy with NVM settings */
5673 if (BNXT_AUTO_MODE(link_info->auto_mode)) 5680 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
5674 link_info->autoneg |= BNXT_AUTONEG_SPEED; 5681 link_info->autoneg = BNXT_AUTONEG_SPEED |
5675 5682 BNXT_AUTONEG_FLOW_CTRL;
5676 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5683 link_info->advertising = link_info->auto_link_speeds;
5677 if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5678 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5679 link_info->req_flow_ctrl = link_info->auto_pause_setting; 5684 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5680 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5685 } else {
5686 link_info->req_link_speed = link_info->force_link_speed;
5687 link_info->req_duplex = link_info->duplex_setting;
5681 link_info->req_flow_ctrl = link_info->force_pause_setting; 5688 link_info->req_flow_ctrl = link_info->force_pause_setting;
5682 } 5689 }
5683 link_info->req_duplex = link_info->duplex_setting;
5684 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5685 link_info->req_link_speed = link_info->auto_link_speed;
5686 else
5687 link_info->req_link_speed = link_info->force_link_speed;
5688 link_info->advertising = link_info->auto_link_speeds;
5689 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", 5690 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5690 link_info->phy_ver[0], 5691 link_info->phy_ver[0],
5691 link_info->phy_ver[1], 5692 link_info->phy_ver[1],
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 8af3ca8efcef..2be51b332652 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -411,8 +411,8 @@ struct rx_tpa_end_cmp_ext {
411 411
412#define BNXT_NUM_TESTS(bp) 0 412#define BNXT_NUM_TESTS(bp) 0
413 413
414#define BNXT_DEFAULT_RX_RING_SIZE 1023 414#define BNXT_DEFAULT_RX_RING_SIZE 511
415#define BNXT_DEFAULT_TX_RING_SIZE 512 415#define BNXT_DEFAULT_TX_RING_SIZE 511
416 416
417#define MAX_TPA 64 417#define MAX_TPA 64
418 418
@@ -523,10 +523,16 @@ struct bnxt_ring_struct {
523 523
524struct tx_push_bd { 524struct tx_push_bd {
525 __le32 doorbell; 525 __le32 doorbell;
526 struct tx_bd txbd1; 526 __le32 tx_bd_len_flags_type;
527 u32 tx_bd_opaque;
527 struct tx_bd_ext txbd2; 528 struct tx_bd_ext txbd2;
528}; 529};
529 530
531struct tx_push_buffer {
532 struct tx_push_bd push_bd;
533 u32 data[25];
534};
535
530struct bnxt_tx_ring_info { 536struct bnxt_tx_ring_info {
531 struct bnxt_napi *bnapi; 537 struct bnxt_napi *bnapi;
532 u16 tx_prod; 538 u16 tx_prod;
@@ -538,8 +544,9 @@ struct bnxt_tx_ring_info {
538 544
539 dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; 545 dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
540 546
541 struct tx_push_bd *tx_push; 547 struct tx_push_buffer *tx_push;
542 dma_addr_t tx_push_mapping; 548 dma_addr_t tx_push_mapping;
549 __le64 data_mapping;
543 550
544#define BNXT_DEV_STATE_CLOSING 0x1 551#define BNXT_DEV_STATE_CLOSING 0x1
545 u32 dev_state; 552 u32 dev_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 922b898e7a32..3238817dfd5f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -486,15 +486,8 @@ static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
486 speed_mask |= SUPPORTED_2500baseX_Full; 486 speed_mask |= SUPPORTED_2500baseX_Full;
487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
488 speed_mask |= SUPPORTED_10000baseT_Full; 488 speed_mask |= SUPPORTED_10000baseT_Full;
489 /* TODO: support 25GB, 50GB with different cable type */
490 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
491 speed_mask |= SUPPORTED_20000baseMLD2_Full |
492 SUPPORTED_20000baseKR2_Full;
493 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 489 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
494 speed_mask |= SUPPORTED_40000baseKR4_Full | 490 speed_mask |= SUPPORTED_40000baseCR4_Full;
495 SUPPORTED_40000baseCR4_Full |
496 SUPPORTED_40000baseSR4_Full |
497 SUPPORTED_40000baseLR4_Full;
498 491
499 return speed_mask; 492 return speed_mask;
500} 493}
@@ -514,15 +507,8 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
514 speed_mask |= ADVERTISED_2500baseX_Full; 507 speed_mask |= ADVERTISED_2500baseX_Full;
515 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 508 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
516 speed_mask |= ADVERTISED_10000baseT_Full; 509 speed_mask |= ADVERTISED_10000baseT_Full;
517 /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
518 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
519 speed_mask |= ADVERTISED_20000baseMLD2_Full |
520 ADVERTISED_20000baseKR2_Full;
521 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 510 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
522 speed_mask |= ADVERTISED_40000baseKR4_Full | 511 speed_mask |= ADVERTISED_40000baseCR4_Full;
523 ADVERTISED_40000baseCR4_Full |
524 ADVERTISED_40000baseSR4_Full |
525 ADVERTISED_40000baseLR4_Full;
526 return speed_mask; 512 return speed_mask;
527} 513}
528 514
@@ -557,11 +543,12 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
557 u16 ethtool_speed; 543 u16 ethtool_speed;
558 544
559 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); 545 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
546 cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
560 547
561 if (link_info->auto_link_speeds) 548 if (link_info->auto_link_speeds)
562 cmd->supported |= SUPPORTED_Autoneg; 549 cmd->supported |= SUPPORTED_Autoneg;
563 550
564 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 551 if (link_info->autoneg) {
565 cmd->advertising = 552 cmd->advertising =
566 bnxt_fw_to_ethtool_advertised_spds(link_info); 553 bnxt_fw_to_ethtool_advertised_spds(link_info);
567 cmd->advertising |= ADVERTISED_Autoneg; 554 cmd->advertising |= ADVERTISED_Autoneg;
@@ -570,28 +557,16 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570 cmd->autoneg = AUTONEG_DISABLE; 557 cmd->autoneg = AUTONEG_DISABLE;
571 cmd->advertising = 0; 558 cmd->advertising = 0;
572 } 559 }
573 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 560 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) {
574 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 561 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
575 BNXT_LINK_PAUSE_BOTH) { 562 BNXT_LINK_PAUSE_BOTH) {
576 cmd->advertising |= ADVERTISED_Pause; 563 cmd->advertising |= ADVERTISED_Pause;
577 cmd->supported |= SUPPORTED_Pause;
578 } else { 564 } else {
579 cmd->advertising |= ADVERTISED_Asym_Pause; 565 cmd->advertising |= ADVERTISED_Asym_Pause;
580 cmd->supported |= SUPPORTED_Asym_Pause;
581 if (link_info->auto_pause_setting & 566 if (link_info->auto_pause_setting &
582 BNXT_LINK_PAUSE_RX) 567 BNXT_LINK_PAUSE_RX)
583 cmd->advertising |= ADVERTISED_Pause; 568 cmd->advertising |= ADVERTISED_Pause;
584 } 569 }
585 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
586 if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
587 BNXT_LINK_PAUSE_BOTH) {
588 cmd->supported |= SUPPORTED_Pause;
589 } else {
590 cmd->supported |= SUPPORTED_Asym_Pause;
591 if (link_info->force_pause_setting &
592 BNXT_LINK_PAUSE_RX)
593 cmd->supported |= SUPPORTED_Pause;
594 }
595 } 570 }
596 571
597 cmd->port = PORT_NONE; 572 cmd->port = PORT_NONE;
@@ -670,6 +645,9 @@ static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
670 if (advertising & ADVERTISED_10000baseT_Full) 645 if (advertising & ADVERTISED_10000baseT_Full)
671 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 646 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
672 647
648 if (advertising & ADVERTISED_40000baseCR4_Full)
649 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
650
673 return fw_speed_mask; 651 return fw_speed_mask;
674} 652}
675 653
@@ -729,7 +707,7 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
729 speed = ethtool_cmd_speed(cmd); 707 speed = ethtool_cmd_speed(cmd);
730 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); 708 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
731 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 709 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
732 link_info->autoneg &= ~BNXT_AUTONEG_SPEED; 710 link_info->autoneg = 0;
733 link_info->advertising = 0; 711 link_info->advertising = 0;
734 } 712 }
735 713
@@ -748,8 +726,7 @@ static void bnxt_get_pauseparam(struct net_device *dev,
748 726
749 if (BNXT_VF(bp)) 727 if (BNXT_VF(bp))
750 return; 728 return;
751 epause->autoneg = !!(link_info->auto_pause_setting & 729 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
752 BNXT_LINK_PAUSE_BOTH);
753 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); 730 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
754 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); 731 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
755} 732}
@@ -765,6 +742,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
765 return rc; 742 return rc;
766 743
767 if (epause->autoneg) { 744 if (epause->autoneg) {
745 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
746 return -EINVAL;
747
768 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 748 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
769 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; 749 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
770 } else { 750 } else {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b15a60d787c7..d7e01a74e927 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2445,8 +2445,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
2445 } 2445 }
2446 2446
2447 /* Link UP/DOWN event */ 2447 /* Link UP/DOWN event */
2448 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 2448 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
2449 (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2450 phy_mac_interrupt(priv->phydev, 2449 phy_mac_interrupt(priv->phydev,
2451 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); 2450 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2452 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; 2451 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 872765527081..34d269cd5579 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1683,7 +1683,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1684 /* droq creation and local register settings. */ 1684 /* droq creation and local register settings. */
1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
1686 if (ret_val == -1) 1686 if (ret_val < 0)
1687 return ret_val; 1687 return ret_val;
1688 1688
1689 if (ret_val == 1) { 1689 if (ret_val == 1) {
@@ -2524,7 +2524,7 @@ static void handle_timestamp(struct octeon_device *oct,
2524 2524
2525 octeon_swap_8B_data(&resp->timestamp, 1); 2525 octeon_swap_8B_data(&resp->timestamp, 1);
2526 2526
2527 if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { 2527 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2528 struct skb_shared_hwtstamps ts; 2528 struct skb_shared_hwtstamps ts;
2529 u64 ns = resp->timestamp; 2529 u64 ns = resp->timestamp;
2530 2530
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 4dba86eaa045..174072b3740b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -983,5 +983,5 @@ int octeon_create_droq(struct octeon_device *oct,
983 983
984create_droq_fail: 984create_droq_fail:
985 octeon_delete_droq(oct, q_no); 985 octeon_delete_droq(oct, q_no);
986 return -1; 986 return -ENOMEM;
987} 987}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c24cb2a86a42..a009bc30dc4d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -574,8 +574,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
574 574
575static void nicvf_rcv_pkt_handler(struct net_device *netdev, 575static void nicvf_rcv_pkt_handler(struct net_device *netdev,
576 struct napi_struct *napi, 576 struct napi_struct *napi,
577 struct cmp_queue *cq, 577 struct cqe_rx_t *cqe_rx)
578 struct cqe_rx_t *cqe_rx, int cqe_type)
579{ 578{
580 struct sk_buff *skb; 579 struct sk_buff *skb;
581 struct nicvf *nic = netdev_priv(netdev); 580 struct nicvf *nic = netdev_priv(netdev);
@@ -591,7 +590,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
591 } 590 }
592 591
593 /* Check for errors */ 592 /* Check for errors */
594 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 593 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
595 if (err && !cqe_rx->rb_cnt) 594 if (err && !cqe_rx->rb_cnt)
596 return; 595 return;
597 596
@@ -682,8 +681,7 @@ loop:
682 cq_idx, cq_desc->cqe_type); 681 cq_idx, cq_desc->cqe_type);
683 switch (cq_desc->cqe_type) { 682 switch (cq_desc->cqe_type) {
684 case CQE_TYPE_RX: 683 case CQE_TYPE_RX:
685 nicvf_rcv_pkt_handler(netdev, napi, cq, 684 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
686 cq_desc, CQE_TYPE_RX);
687 work_done++; 685 work_done++;
688 break; 686 break;
689 case CQE_TYPE_SEND: 687 case CQE_TYPE_SEND:
@@ -1125,7 +1123,6 @@ int nicvf_stop(struct net_device *netdev)
1125 1123
1126 /* Clear multiqset info */ 1124 /* Clear multiqset info */
1127 nic->pnicvf = nic; 1125 nic->pnicvf = nic;
1128 nic->sqs_count = 0;
1129 1126
1130 return 0; 1127 return 0;
1131} 1128}
@@ -1354,6 +1351,9 @@ void nicvf_update_stats(struct nicvf *nic)
1354 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1351 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1355 stats->tx_bcast_frames_ok + 1352 stats->tx_bcast_frames_ok +
1356 stats->tx_mcast_frames_ok; 1353 stats->tx_mcast_frames_ok;
1354 drv_stats->rx_frames_ok = stats->rx_ucast_frames +
1355 stats->rx_bcast_frames +
1356 stats->rx_mcast_frames;
1357 drv_stats->rx_drops = stats->rx_drop_red + 1357 drv_stats->rx_drops = stats->rx_drop_red +
1358 stats->rx_drop_overrun; 1358 stats->rx_drop_overrun;
1359 drv_stats->tx_drops = stats->tx_drops; 1359 drv_stats->tx_drops = stats->tx_drops;
@@ -1538,6 +1538,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1538 1538
1539 nicvf_send_vf_struct(nic); 1539 nicvf_send_vf_struct(nic);
1540 1540
1541 if (!pass1_silicon(nic->pdev))
1542 nic->hw_tso = true;
1543
1541 /* Check if this VF is in QS only mode */ 1544 /* Check if this VF is in QS only mode */
1542 if (nic->sqs_mode) 1545 if (nic->sqs_mode)
1543 return 0; 1546 return 0;
@@ -1557,9 +1560,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1557 1560
1558 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 1561 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1559 1562
1560 if (!pass1_silicon(nic->pdev))
1561 nic->hw_tso = true;
1562
1563 netdev->netdev_ops = &nicvf_netdev_ops; 1563 netdev->netdev_ops = &nicvf_netdev_ops;
1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT; 1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1565 1565
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d0d1b5490061..767347b1f631 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1329,16 +1329,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1329} 1329}
1330 1330
1331/* Check for errors in the receive cmp.queue entry */ 1331/* Check for errors in the receive cmp.queue entry */
1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, 1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1333 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1334{ 1333{
1335 struct nicvf_hw_stats *stats = &nic->hw_stats; 1334 struct nicvf_hw_stats *stats = &nic->hw_stats;
1336 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1337 1335
1338 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 1336 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1339 drv_stats->rx_frames_ok++;
1340 return 0; 1337 return 0;
1341 }
1342 1338
1343 if (netif_msg_rx_err(nic)) 1339 if (netif_msg_rx_err(nic))
1344 netdev_err(nic->netdev, 1340 netdev_err(nic->netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index c5030a7f213a..6673e1133523 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -338,8 +338,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
338/* Stats */ 338/* Stats */
339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
341int nicvf_check_cqe_rx_errs(struct nicvf *nic, 341int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
342 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
343int nicvf_check_cqe_tx_errs(struct nicvf *nic, 342int nicvf_check_cqe_tx_errs(struct nicvf *nic,
344 struct cmp_queue *cq, struct cqe_send_t *cqe_tx); 343 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
345#endif /* NICVF_QUEUES_H */ 344#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index ee04caa6c4d8..a89721fad633 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -681,6 +681,24 @@ int t3_seeprom_wp(struct adapter *adapter, int enable)
681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682} 682}
683 683
684static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val)
685{
686 char tok[len + 1];
687
688 memcpy(tok, s, len);
689 tok[len] = 0;
690 return kstrtouint(strim(tok), base, val);
691}
692
693static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val)
694{
695 char tok[len + 1];
696
697 memcpy(tok, s, len);
698 tok[len] = 0;
699 return kstrtou16(strim(tok), base, val);
700}
701
684/** 702/**
685 * get_vpd_params - read VPD parameters from VPD EEPROM 703 * get_vpd_params - read VPD parameters from VPD EEPROM
686 * @adapter: adapter to read 704 * @adapter: adapter to read
@@ -709,19 +727,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
709 return ret; 727 return ret;
710 } 728 }
711 729
712 ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); 730 ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
713 if (ret) 731 if (ret)
714 return ret; 732 return ret;
715 ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); 733 ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
716 if (ret) 734 if (ret)
717 return ret; 735 return ret;
718 ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); 736 ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
719 if (ret) 737 if (ret)
720 return ret; 738 return ret;
721 ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); 739 ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
722 if (ret) 740 if (ret)
723 return ret; 741 return ret;
724 ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); 742 ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
725 if (ret) 743 if (ret)
726 return ret; 744 return ret;
727 memcpy(p->sn, vpd.sn_data, SERNUM_LEN); 745 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
@@ -733,10 +751,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
733 } else { 751 } else {
734 p->port_type[0] = hex_to_bin(vpd.port0_data[0]); 752 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
735 p->port_type[1] = hex_to_bin(vpd.port1_data[0]); 753 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
736 ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); 754 ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
755 &p->xauicfg[0]);
737 if (ret) 756 if (ret)
738 return ret; 757 return ret;
739 ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); 758 ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
759 &p->xauicfg[1]);
740 if (ret) 760 if (ret)
741 return ret; 761 return ret;
742 } 762 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a8dda635456d..06bc2d2e7a73 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -165,6 +165,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ 165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
168 169
169 /* T6 adapters: 170 /* T6 adapters:
170 */ 171 */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cf94b72dbacd..48d91941408d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -128,7 +128,6 @@ struct board_info {
128 struct resource *data_res; 128 struct resource *data_res;
129 struct resource *addr_req; /* resources requested */ 129 struct resource *addr_req; /* resources requested */
130 struct resource *data_req; 130 struct resource *data_req;
131 struct resource *irq_res;
132 131
133 int irq_wake; 132 int irq_wake;
134 133
@@ -1300,22 +1299,16 @@ static int
1300dm9000_open(struct net_device *dev) 1299dm9000_open(struct net_device *dev)
1301{ 1300{
1302 struct board_info *db = netdev_priv(dev); 1301 struct board_info *db = netdev_priv(dev);
1303 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1304 1302
1305 if (netif_msg_ifup(db)) 1303 if (netif_msg_ifup(db))
1306 dev_dbg(db->dev, "enabling %s\n", dev->name); 1304 dev_dbg(db->dev, "enabling %s\n", dev->name);
1307 1305
1308 /* If there is no IRQ type specified, default to something that 1306 /* If there is no IRQ type specified, tell the user that this is a
1309 * may work, and tell the user that this is a problem */ 1307 * problem
1310 1308 */
1311 if (irqflags == IRQF_TRIGGER_NONE) 1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
1312 irqflags = irq_get_trigger_type(dev->irq);
1313
1314 if (irqflags == IRQF_TRIGGER_NONE)
1315 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1316 1311
1317 irqflags |= IRQF_SHARED;
1318
1319 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1320 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1321 mdelay(1); /* delay needs by DM9000B */ 1314 mdelay(1); /* delay needs by DM9000B */
@@ -1323,7 +1316,8 @@ dm9000_open(struct net_device *dev)
1323 /* Initialize DM9000 board */ 1316 /* Initialize DM9000 board */
1324 dm9000_init_dm9000(dev); 1317 dm9000_init_dm9000(dev);
1325 1318
1326 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
1320 dev->name, dev))
1327 return -EAGAIN; 1321 return -EAGAIN;
1328 /* Now that we have an interrupt handler hooked up we can unmask 1322 /* Now that we have an interrupt handler hooked up we can unmask
1329 * our interrupts 1323 * our interrupts
@@ -1500,15 +1494,22 @@ dm9000_probe(struct platform_device *pdev)
1500 1494
1501 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1495 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1496 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1503 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1504 1497
1505 if (db->addr_res == NULL || db->data_res == NULL || 1498 if (!db->addr_res || !db->data_res) {
1506 db->irq_res == NULL) { 1499 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1507 dev_err(db->dev, "insufficient resources\n"); 1500 db->addr_res, db->data_res);
1508 ret = -ENOENT; 1501 ret = -ENOENT;
1509 goto out; 1502 goto out;
1510 } 1503 }
1511 1504
1505 ndev->irq = platform_get_irq(pdev, 0);
1506 if (ndev->irq < 0) {
1507 dev_err(db->dev, "interrupt resource unavailable: %d\n",
1508 ndev->irq);
1509 ret = ndev->irq;
1510 goto out;
1511 }
1512
1512 db->irq_wake = platform_get_irq(pdev, 1); 1513 db->irq_wake = platform_get_irq(pdev, 1);
1513 if (db->irq_wake >= 0) { 1514 if (db->irq_wake >= 0) {
1514 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1515 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
@@ -1570,7 +1571,6 @@ dm9000_probe(struct platform_device *pdev)
1570 1571
1571 /* fill in parameters for net-dev structure */ 1572 /* fill in parameters for net-dev structure */
1572 ndev->base_addr = (unsigned long)db->io_addr; 1573 ndev->base_addr = (unsigned long)db->io_addr;
1573 ndev->irq = db->irq_res->start;
1574 1574
1575 /* ensure at least we have a default set of IO routines */ 1575 /* ensure at least we have a default set of IO routines */
1576 dm9000_set_io(db, iosize); 1576 dm9000_set_io(db, iosize);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7139f588ad2..678f5018d0be 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -469,8 +469,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
469 goto failed; 469 goto failed;
470 } 470 }
471 /* Read MACID from CIS */ 471 /* Read MACID from CIS */
472 for (i = 5; i < 11; i++) 472 for (i = 0; i < 6; i++)
473 dev->dev_addr[i] = buf[i]; 473 dev->dev_addr[i] = buf[i + 5];
474 kfree(buf); 474 kfree(buf);
475 } else { 475 } else {
476 if (pcmcia_get_mac_from_cis(link, dev)) 476 if (pcmcia_get_mac_from_cis(link, dev))
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 662c2ee268c7..b0ae69f84493 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -370,6 +370,11 @@ struct mvneta_port {
370 struct net_device *dev; 370 struct net_device *dev;
371 struct notifier_block cpu_notifier; 371 struct notifier_block cpu_notifier;
372 int rxq_def; 372 int rxq_def;
373 /* Protect the access to the percpu interrupt registers,
374 * ensuring that the configuration remains coherent.
375 */
376 spinlock_t lock;
377 bool is_stopped;
373 378
374 /* Core clock */ 379 /* Core clock */
375 struct clk *clk; 380 struct clk *clk;
@@ -1038,6 +1043,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1038 } 1043 }
1039} 1044}
1040 1045
1046static void mvneta_percpu_unmask_interrupt(void *arg)
1047{
1048 struct mvneta_port *pp = arg;
1049
1050 /* All the queue are unmasked, but actually only the ones
1051 * mapped to this CPU will be unmasked
1052 */
1053 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1054 MVNETA_RX_INTR_MASK_ALL |
1055 MVNETA_TX_INTR_MASK_ALL |
1056 MVNETA_MISCINTR_INTR_MASK);
1057}
1058
1059static void mvneta_percpu_mask_interrupt(void *arg)
1060{
1061 struct mvneta_port *pp = arg;
1062
1063 /* All the queue are masked, but actually only the ones
1064 * mapped to this CPU will be masked
1065 */
1066 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1067 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1068 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1069}
1070
1071static void mvneta_percpu_clear_intr_cause(void *arg)
1072{
1073 struct mvneta_port *pp = arg;
1074
1075 /* All the queue are cleared, but actually only the ones
1076 * mapped to this CPU will be cleared
1077 */
1078 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1079 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1080 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1081}
1082
1041/* This method sets defaults to the NETA port: 1083/* This method sets defaults to the NETA port:
1042 * Clears interrupt Cause and Mask registers. 1084 * Clears interrupt Cause and Mask registers.
1043 * Clears all MAC tables. 1085 * Clears all MAC tables.
@@ -1055,14 +1097,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
1055 int max_cpu = num_present_cpus(); 1097 int max_cpu = num_present_cpus();
1056 1098
1057 /* Clear all Cause registers */ 1099 /* Clear all Cause registers */
1058 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1100 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1059 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1060 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1061 1101
1062 /* Mask all interrupts */ 1102 /* Mask all interrupts */
1063 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1103 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1064 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1065 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1066 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1104 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1067 1105
1068 /* Enable MBUS Retry bit16 */ 1106 /* Enable MBUS Retry bit16 */
@@ -2528,34 +2566,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
2528 return 0; 2566 return 0;
2529} 2567}
2530 2568
2531static void mvneta_percpu_unmask_interrupt(void *arg)
2532{
2533 struct mvneta_port *pp = arg;
2534
2535 /* All the queue are unmasked, but actually only the ones
2536 * maped to this CPU will be unmasked
2537 */
2538 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2539 MVNETA_RX_INTR_MASK_ALL |
2540 MVNETA_TX_INTR_MASK_ALL |
2541 MVNETA_MISCINTR_INTR_MASK);
2542}
2543
2544static void mvneta_percpu_mask_interrupt(void *arg)
2545{
2546 struct mvneta_port *pp = arg;
2547
2548 /* All the queue are masked, but actually only the ones
2549 * maped to this CPU will be masked
2550 */
2551 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2552 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2553 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2554}
2555
2556static void mvneta_start_dev(struct mvneta_port *pp) 2569static void mvneta_start_dev(struct mvneta_port *pp)
2557{ 2570{
2558 unsigned int cpu; 2571 int cpu;
2559 2572
2560 mvneta_max_rx_size_set(pp, pp->pkt_size); 2573 mvneta_max_rx_size_set(pp, pp->pkt_size);
2561 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2574 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2564,16 +2577,15 @@ static void mvneta_start_dev(struct mvneta_port *pp)
2564 mvneta_port_enable(pp); 2577 mvneta_port_enable(pp);
2565 2578
2566 /* Enable polling on the port */ 2579 /* Enable polling on the port */
2567 for_each_present_cpu(cpu) { 2580 for_each_online_cpu(cpu) {
2568 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2581 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2569 2582
2570 napi_enable(&port->napi); 2583 napi_enable(&port->napi);
2571 } 2584 }
2572 2585
2573 /* Unmask interrupts. It has to be done from each CPU */ 2586 /* Unmask interrupts. It has to be done from each CPU */
2574 for_each_online_cpu(cpu) 2587 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2575 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 2588
2576 pp, true);
2577 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2589 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2578 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2590 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2579 MVNETA_CAUSE_LINK_CHANGE | 2591 MVNETA_CAUSE_LINK_CHANGE |
@@ -2589,7 +2601,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2589 2601
2590 phy_stop(pp->phy_dev); 2602 phy_stop(pp->phy_dev);
2591 2603
2592 for_each_present_cpu(cpu) { 2604 for_each_online_cpu(cpu) {
2593 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2605 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2594 2606
2595 napi_disable(&port->napi); 2607 napi_disable(&port->napi);
@@ -2604,13 +2616,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2604 mvneta_port_disable(pp); 2616 mvneta_port_disable(pp);
2605 2617
2606 /* Clear all ethernet port interrupts */ 2618 /* Clear all ethernet port interrupts */
2607 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2619 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
2608 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2609 2620
2610 /* Mask all ethernet port interrupts */ 2621 /* Mask all ethernet port interrupts */
2611 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2622 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2612 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2613 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2614 2623
2615 mvneta_tx_reset(pp); 2624 mvneta_tx_reset(pp);
2616 mvneta_rx_reset(pp); 2625 mvneta_rx_reset(pp);
@@ -2847,11 +2856,20 @@ static void mvneta_percpu_disable(void *arg)
2847 disable_percpu_irq(pp->dev->irq); 2856 disable_percpu_irq(pp->dev->irq);
2848} 2857}
2849 2858
2859/* Electing a CPU must be done in an atomic way: it should be done
2860 * after or before the removal/insertion of a CPU and this function is
2861 * not reentrant.
2862 */
2850static void mvneta_percpu_elect(struct mvneta_port *pp) 2863static void mvneta_percpu_elect(struct mvneta_port *pp)
2851{ 2864{
2852 int online_cpu_idx, max_cpu, cpu, i = 0; 2865 int elected_cpu = 0, max_cpu, cpu, i = 0;
2866
2867 /* Use the cpu associated to the rxq when it is online, in all
2868 * the other cases, use the cpu 0 which can't be offline.
2869 */
2870 if (cpu_online(pp->rxq_def))
2871 elected_cpu = pp->rxq_def;
2853 2872
2854 online_cpu_idx = pp->rxq_def % num_online_cpus();
2855 max_cpu = num_present_cpus(); 2873 max_cpu = num_present_cpus();
2856 2874
2857 for_each_online_cpu(cpu) { 2875 for_each_online_cpu(cpu) {
@@ -2862,7 +2880,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2862 if ((rxq % max_cpu) == cpu) 2880 if ((rxq % max_cpu) == cpu)
2863 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 2881 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
2864 2882
2865 if (i == online_cpu_idx) 2883 if (cpu == elected_cpu)
2866 /* Map the default receive queue queue to the 2884 /* Map the default receive queue queue to the
2867 * elected CPU 2885 * elected CPU
2868 */ 2886 */
@@ -2873,7 +2891,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2873 * the CPU bound to the default RX queue 2891 * the CPU bound to the default RX queue
2874 */ 2892 */
2875 if (txq_number == 1) 2893 if (txq_number == 1)
2876 txq_map = (i == online_cpu_idx) ? 2894 txq_map = (cpu == elected_cpu) ?
2877 MVNETA_CPU_TXQ_ACCESS(1) : 0; 2895 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2878 else 2896 else
2879 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 2897 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
@@ -2902,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2902 switch (action) { 2920 switch (action) {
2903 case CPU_ONLINE: 2921 case CPU_ONLINE:
2904 case CPU_ONLINE_FROZEN: 2922 case CPU_ONLINE_FROZEN:
2923 spin_lock(&pp->lock);
2924 /* Configuring the driver for a new CPU while the
2925 * driver is stopping is racy, so just avoid it.
2926 */
2927 if (pp->is_stopped) {
2928 spin_unlock(&pp->lock);
2929 break;
2930 }
2905 netif_tx_stop_all_queues(pp->dev); 2931 netif_tx_stop_all_queues(pp->dev);
2906 2932
2907 /* We have to synchronise on tha napi of each CPU 2933 /* We have to synchronise on tha napi of each CPU
@@ -2917,9 +2943,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2917 } 2943 }
2918 2944
2919 /* Mask all ethernet port interrupts */ 2945 /* Mask all ethernet port interrupts */
2920 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2946 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2921 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2922 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2923 napi_enable(&port->napi); 2947 napi_enable(&port->napi);
2924 2948
2925 2949
@@ -2934,27 +2958,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2934 */ 2958 */
2935 mvneta_percpu_elect(pp); 2959 mvneta_percpu_elect(pp);
2936 2960
2937 /* Unmask all ethernet port interrupts, as this 2961 /* Unmask all ethernet port interrupts */
2938 * notifier is called for each CPU then the CPU to 2962 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2939 * Queue mapping is applied
2940 */
2941 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2942 MVNETA_RX_INTR_MASK(rxq_number) |
2943 MVNETA_TX_INTR_MASK(txq_number) |
2944 MVNETA_MISCINTR_INTR_MASK);
2945 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2963 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2946 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2964 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2947 MVNETA_CAUSE_LINK_CHANGE | 2965 MVNETA_CAUSE_LINK_CHANGE |
2948 MVNETA_CAUSE_PSC_SYNC_CHANGE); 2966 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2949 netif_tx_start_all_queues(pp->dev); 2967 netif_tx_start_all_queues(pp->dev);
2968 spin_unlock(&pp->lock);
2950 break; 2969 break;
2951 case CPU_DOWN_PREPARE: 2970 case CPU_DOWN_PREPARE:
2952 case CPU_DOWN_PREPARE_FROZEN: 2971 case CPU_DOWN_PREPARE_FROZEN:
2953 netif_tx_stop_all_queues(pp->dev); 2972 netif_tx_stop_all_queues(pp->dev);
2973 /* Thanks to this lock we are sure that any pending
2974 * cpu election is done
2975 */
2976 spin_lock(&pp->lock);
2954 /* Mask all ethernet port interrupts */ 2977 /* Mask all ethernet port interrupts */
2955 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2978 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2956 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2979 spin_unlock(&pp->lock);
2957 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2958 2980
2959 napi_synchronize(&port->napi); 2981 napi_synchronize(&port->napi);
2960 napi_disable(&port->napi); 2982 napi_disable(&port->napi);
@@ -2968,12 +2990,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2968 case CPU_DEAD: 2990 case CPU_DEAD:
2969 case CPU_DEAD_FROZEN: 2991 case CPU_DEAD_FROZEN:
2970 /* Check if a new CPU must be elected now this on is down */ 2992 /* Check if a new CPU must be elected now this on is down */
2993 spin_lock(&pp->lock);
2971 mvneta_percpu_elect(pp); 2994 mvneta_percpu_elect(pp);
2995 spin_unlock(&pp->lock);
2972 /* Unmask all ethernet port interrupts */ 2996 /* Unmask all ethernet port interrupts */
2973 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2997 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2974 MVNETA_RX_INTR_MASK(rxq_number) |
2975 MVNETA_TX_INTR_MASK(txq_number) |
2976 MVNETA_MISCINTR_INTR_MASK);
2977 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2998 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2978 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2999 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2979 MVNETA_CAUSE_LINK_CHANGE | 3000 MVNETA_CAUSE_LINK_CHANGE |
@@ -2988,7 +3009,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2988static int mvneta_open(struct net_device *dev) 3009static int mvneta_open(struct net_device *dev)
2989{ 3010{
2990 struct mvneta_port *pp = netdev_priv(dev); 3011 struct mvneta_port *pp = netdev_priv(dev);
2991 int ret, cpu; 3012 int ret;
2992 3013
2993 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 3014 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2994 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 3015 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
@@ -3010,22 +3031,12 @@ static int mvneta_open(struct net_device *dev)
3010 goto err_cleanup_txqs; 3031 goto err_cleanup_txqs;
3011 } 3032 }
3012 3033
3013 /* Even though the documentation says that request_percpu_irq
3014 * doesn't enable the interrupts automatically, it actually
3015 * does so on the local CPU.
3016 *
3017 * Make sure it's disabled.
3018 */
3019 mvneta_percpu_disable(pp);
3020
3021 /* Enable per-CPU interrupt on all the CPU to handle our RX 3034 /* Enable per-CPU interrupt on all the CPU to handle our RX
3022 * queue interrupts 3035 * queue interrupts
3023 */ 3036 */
3024 for_each_online_cpu(cpu) 3037 on_each_cpu(mvneta_percpu_enable, pp, true);
3025 smp_call_function_single(cpu, mvneta_percpu_enable,
3026 pp, true);
3027
3028 3038
3039 pp->is_stopped = false;
3029 /* Register a CPU notifier to handle the case where our CPU 3040 /* Register a CPU notifier to handle the case where our CPU
3030 * might be taken offline. 3041 * might be taken offline.
3031 */ 3042 */
@@ -3057,13 +3068,20 @@ err_cleanup_rxqs:
3057static int mvneta_stop(struct net_device *dev) 3068static int mvneta_stop(struct net_device *dev)
3058{ 3069{
3059 struct mvneta_port *pp = netdev_priv(dev); 3070 struct mvneta_port *pp = netdev_priv(dev);
3060 int cpu;
3061 3071
3072 /* Inform that we are stopping so we don't want to setup the
3073 * driver for new CPUs in the notifiers
3074 */
3075 spin_lock(&pp->lock);
3076 pp->is_stopped = true;
3062 mvneta_stop_dev(pp); 3077 mvneta_stop_dev(pp);
3063 mvneta_mdio_remove(pp); 3078 mvneta_mdio_remove(pp);
3064 unregister_cpu_notifier(&pp->cpu_notifier); 3079 unregister_cpu_notifier(&pp->cpu_notifier);
3065 for_each_present_cpu(cpu) 3080 /* Now that the notifier are unregistered, we can release le
3066 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); 3081 * lock
3082 */
3083 spin_unlock(&pp->lock);
3084 on_each_cpu(mvneta_percpu_disable, pp, true);
3067 free_percpu_irq(dev->irq, pp->ports); 3085 free_percpu_irq(dev->irq, pp->ports);
3068 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
3069 mvneta_cleanup_txqs(pp); 3087 mvneta_cleanup_txqs(pp);
@@ -3312,9 +3330,7 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3312 3330
3313 netif_tx_stop_all_queues(pp->dev); 3331 netif_tx_stop_all_queues(pp->dev);
3314 3332
3315 for_each_online_cpu(cpu) 3333 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3316 smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
3317 pp, true);
3318 3334
3319 /* We have to synchronise on the napi of each CPU */ 3335 /* We have to synchronise on the napi of each CPU */
3320 for_each_online_cpu(cpu) { 3336 for_each_online_cpu(cpu) {
@@ -3335,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3335 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 3351 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3336 3352
3337 /* Update the elected CPU matching the new rxq_def */ 3353 /* Update the elected CPU matching the new rxq_def */
3354 spin_lock(&pp->lock);
3338 mvneta_percpu_elect(pp); 3355 mvneta_percpu_elect(pp);
3356 spin_unlock(&pp->lock);
3339 3357
3340 /* We have to synchronise on the napi of each CPU */ 3358 /* We have to synchronise on the napi of each CPU */
3341 for_each_online_cpu(cpu) { 3359 for_each_online_cpu(cpu) {
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a4beccf1fd46..c797971aefab 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3061,7 +3061,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3061 3061
3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3063 if (!pe) 3063 if (!pe)
3064 return -1; 3064 return -ENOMEM;
3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3066 pe->index = tid; 3066 pe->index = tid;
3067 3067
@@ -3077,7 +3077,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3077 if (pmap == 0) { 3077 if (pmap == 0) {
3078 if (add) { 3078 if (add) {
3079 kfree(pe); 3079 kfree(pe);
3080 return -1; 3080 return -EINVAL;
3081 } 3081 }
3082 mvpp2_prs_hw_inv(priv, pe->index); 3082 mvpp2_prs_hw_inv(priv, pe->index);
3083 priv->prs_shadow[pe->index].valid = false; 3083 priv->prs_shadow[pe->index].valid = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 715de8affcc9..c7e939945259 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
182 err = mlx4_reset_slave(dev); 182 err = mlx4_reset_slave(dev);
183 else 183 else
184 err = mlx4_reset_master(dev); 184 err = mlx4_reset_master(dev);
185 BUG_ON(err != 0);
186 185
186 if (!err) {
187 mlx4_err(dev, "device was reset successfully\n");
188 } else {
189 /* EEH could have disabled the PCI channel during reset. That's
190 * recoverable and the PCI error flow will handle it.
191 */
192 if (!pci_channel_offline(dev->persist->pdev))
193 BUG_ON(1);
194 }
187 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; 195 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
188 mlx4_err(dev, "device was reset successfully\n");
189 mutex_unlock(&persist->device_state_mutex); 196 mutex_unlock(&persist->device_state_mutex);
190 197
191 /* At that step HW was already reset, now notify clients */ 198 /* At that step HW was already reset, now notify clients */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3348e646db70..a849da92f857 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -318,7 +318,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
318 if (timestamp_en) 318 if (timestamp_en)
319 cq_context->flags |= cpu_to_be32(1 << 19); 319 cq_context->flags |= cpu_to_be32(1 << 19);
320 320
321 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); 321 cq_context->logsize_usrpage =
322 cpu_to_be32((ilog2(nent) << 24) |
323 mlx4_to_hw_uar_index(dev, uar->index));
322 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; 324 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
323 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 325 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
324 326
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 038f9ce391e6..1494997c4f7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
236 .enable = mlx4_en_phc_enable, 236 .enable = mlx4_en_phc_enable,
237}; 237};
238 238
239#define MLX4_EN_WRAP_AROUND_SEC 10ULL
240
241/* This function calculates the max shift that enables the user range
242 * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
243 */
244static u32 freq_to_shift(u16 freq)
245{
246 u32 freq_khz = freq * 1000;
247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
248 u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
249 max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
250 /* calculate max possible multiplier in order to fit in 64bit */
251 u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
252
253 /* This comes from the reverse of clocksource_khz2mult */
254 return ilog2(div_u64(max_mul * freq_khz, 1000000));
255}
256
239void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) 257void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
240{ 258{
241 struct mlx4_dev *dev = mdev->dev; 259 struct mlx4_dev *dev = mdev->dev;
@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
254 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); 272 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
255 mdev->cycles.read = mlx4_en_read_clock; 273 mdev->cycles.read = mlx4_en_read_clock;
256 mdev->cycles.mask = CLOCKSOURCE_MASK(48); 274 mdev->cycles.mask = CLOCKSOURCE_MASK(48);
257 /* Using shift to make calculation more accurate. Since current HW 275 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
258 * clock frequency is 427 MHz, and cycles are given using a 48 bits
259 * register, the biggest shift when calculating using u64, is 14
260 * (max_cycles * multiplier < 2^64)
261 */
262 mdev->cycles.shift = 14;
263 mdev->cycles.mult = 276 mdev->cycles.mult =
264 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); 277 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
265 mdev->nominal_c_mult = mdev->cycles.mult; 278 mdev->nominal_c_mult = mdev->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c7e3f69a73b..f191a1612589 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2344,8 +2344,6 @@ out:
2344 /* set offloads */ 2344 /* set offloads */
2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; 2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2347 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2348 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2349} 2347}
2350 2348
2351static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2349static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2356,8 +2354,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2356 /* unset offloads */ 2354 /* unset offloads */
2357 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2355 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2358 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); 2356 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2359 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2360 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2361 2357
2362 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2358 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2363 VXLAN_STEER_BY_OUTER_MAC, 0); 2359 VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2980,6 +2976,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2980 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 2976 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2981 } 2977 }
2982 2978
2979 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2980 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2981 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2982 }
2983
2983 mdev->pndev[port] = dev; 2984 mdev->pndev[port] = dev;
2984 mdev->upper[port] = NULL; 2985 mdev->upper[port] = NULL;
2985 2986
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index ee99e67187f5..3904b5fc0b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
238 stats->collisions = 0; 238 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 241 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
243 stats->rx_frame_errors = 0; 243 stats->rx_frame_errors = 0;
244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
245 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 245 stats->rx_missed_errors = 0;
246 stats->tx_aborted_errors = 0; 246 stats->tx_aborted_errors = 0;
247 stats->tx_carrier_errors = 0; 247 stats->tx_carrier_errors = 0;
248 stats->tx_fifo_errors = 0; 248 stats->tx_fifo_errors = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 12aab5a659d3..02e925d6f734 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -58,7 +58,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
58 } else { 58 } else {
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 } 60 }
61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
62 mdev->priv_uar.index));
62 context->local_qpn = cpu_to_be32(qpn); 63 context->local_qpn = cpu_to_be32(qpn);
63 context->pri_path.ackto = 1 & 0x07; 64 context->pri_path.ackto = 1 & 0x07;
64 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 65 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4421bf5463f6..e0946ab22010 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -213,7 +213,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
214 ring->cqn, user_prio, &ring->context); 214 ring->cqn, user_prio, &ring->context);
215 if (ring->bf_alloced) 215 if (ring->bf_alloced)
216 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 216 ring->context.usr_page =
217 cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
218 ring->bf.uar->index));
217 219
218 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 220 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
219 &ring->qp, &ring->qp_state); 221 &ring->qp, &ring->qp_state);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 4696053165f8..f613977455e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -940,9 +940,10 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
940 940
941 if (!priv->eq_table.uar_map[index]) { 941 if (!priv->eq_table.uar_map[index]) {
942 priv->eq_table.uar_map[index] = 942 priv->eq_table.uar_map[index] =
943 ioremap(pci_resource_start(dev->persist->pdev, 2) + 943 ioremap(
944 ((eq->eqn / 4) << PAGE_SHIFT), 944 pci_resource_start(dev->persist->pdev, 2) +
945 PAGE_SIZE); 945 ((eq->eqn / 4) << (dev->uar_page_shift)),
946 (1 << (dev->uar_page_shift)));
946 if (!priv->eq_table.uar_map[index]) { 947 if (!priv->eq_table.uar_map[index]) {
947 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 948 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
948 eq->eqn); 949 eq->eqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f1b6d219e445..2cc3c626c3fe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -168,6 +168,20 @@ struct mlx4_port_config {
168 168
169static atomic_t pf_loading = ATOMIC_INIT(0); 169static atomic_t pf_loading = ATOMIC_INIT(0);
170 170
171static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
172 struct mlx4_dev_cap *dev_cap)
173{
174 /* The reserved_uars is calculated by system page size unit.
175 * Therefore, adjustment is added when the uar page size is less
176 * than the system page size
177 */
178 dev->caps.reserved_uars =
179 max_t(int,
180 mlx4_get_num_reserved_uar(dev),
181 dev_cap->reserved_uars /
182 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
183}
184
171int mlx4_check_port_params(struct mlx4_dev *dev, 185int mlx4_check_port_params(struct mlx4_dev *dev,
172 enum mlx4_port_type *port_type) 186 enum mlx4_port_type *port_type)
173{ 187{
@@ -386,8 +400,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
386 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 400 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
387 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 401 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
388 402
389 /* The first 128 UARs are used for EQ doorbells */
390 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
391 dev->caps.reserved_pds = dev_cap->reserved_pds; 403 dev->caps.reserved_pds = dev_cap->reserved_pds;
392 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 404 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
393 dev_cap->reserved_xrcds : 0; 405 dev_cap->reserved_xrcds : 0;
@@ -405,6 +417,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
405 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 417 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
406 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 418 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
407 419
420 /* Save uar page shift */
421 if (!mlx4_is_slave(dev)) {
422 /* Virtual PCI function needs to determine UAR page size from
423 * firmware. Only master PCI function can set the uar page size
424 */
425 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
426 mlx4_set_num_reserved_uars(dev, dev_cap);
427 }
428
408 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 429 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
409 struct mlx4_init_hca_param hca_param; 430 struct mlx4_init_hca_param hca_param;
410 431
@@ -815,16 +836,25 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
815 return -ENODEV; 836 return -ENODEV;
816 } 837 }
817 838
818 /* slave gets uar page size from QUERY_HCA fw command */ 839 /* Set uar_page_shift for VF */
819 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 840 dev->uar_page_shift = hca_param.uar_page_sz + 12;
820 841
821 /* TODO: relax this assumption */ 842 /* Make sure the master uar page size is valid */
822 if (dev->caps.uar_page_size != PAGE_SIZE) { 843 if (dev->uar_page_shift > PAGE_SHIFT) {
823 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 844 mlx4_err(dev,
824 dev->caps.uar_page_size, PAGE_SIZE); 845 "Invalid configuration: uar page size is larger than system page size\n");
825 return -ENODEV; 846 return -ENODEV;
826 } 847 }
827 848
849 /* Set reserved_uars based on the uar_page_shift */
850 mlx4_set_num_reserved_uars(dev, &dev_cap);
851
852 /* Although uar page size in FW differs from system page size,
853 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
854 * still works with assumption that uar page size == system page size
855 */
856 dev->caps.uar_page_size = PAGE_SIZE;
857
828 memset(&func_cap, 0, sizeof(func_cap)); 858 memset(&func_cap, 0, sizeof(func_cap));
829 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 859 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
830 if (err) { 860 if (err) {
@@ -2179,8 +2209,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
2179 2209
2180 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2210 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2181 2211
2182 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2212 /* Always set UAR page size 4KB, set log_uar_sz accordingly */
2183 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2213 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2214 PAGE_SHIFT -
2215 DEFAULT_UAR_PAGE_SHIFT;
2216 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2217
2184 init_hca.mw_enabled = 0; 2218 init_hca.mw_enabled = 0;
2185 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2219 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2186 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2220 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 609c59dc854e..b3cc3ab63799 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -269,9 +269,15 @@ EXPORT_SYMBOL_GPL(mlx4_bf_free);
269 269
270int mlx4_init_uar_table(struct mlx4_dev *dev) 270int mlx4_init_uar_table(struct mlx4_dev *dev)
271{ 271{
272 if (dev->caps.num_uars <= 128) { 272 int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
273 mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", 273
274 dev->caps.num_uars); 274 mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
275 mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
276
277 if (dev->caps.num_uars <= num_reserved_uar) {
278 mlx4_err(
279 dev, "Only %d UAR pages (need more than %d)\n",
280 dev->caps.num_uars, num_reserved_uar);
275 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); 281 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
276 return -ENODEV; 282 return -ENODEV;
277 } 283 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b46dbe29ef6c..25ce1b030a00 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -915,11 +915,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
915 915
916 spin_lock_irq(mlx4_tlock(dev)); 916 spin_lock_irq(mlx4_tlock(dev));
917 r = find_res(dev, counter_index, RES_COUNTER); 917 r = find_res(dev, counter_index, RES_COUNTER);
918 if (!r || r->owner != slave) 918 if (!r || r->owner != slave) {
919 ret = -EINVAL; 919 ret = -EINVAL;
920 counter = container_of(r, struct res_counter, com); 920 } else {
921 if (!counter->port) 921 counter = container_of(r, struct res_counter, com);
922 counter->port = port; 922 if (!counter->port)
923 counter->port = port;
924 }
923 925
924 spin_unlock_irq(mlx4_tlock(dev)); 926 spin_unlock_irq(mlx4_tlock(dev));
925 return ret; 927 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6a3e430f1062..d4e1c3045200 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2024,18 +2024,37 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
2024 vf_stats); 2024 vf_stats);
2025} 2025}
2026 2026
2027static struct net_device_ops mlx5e_netdev_ops = { 2027static const struct net_device_ops mlx5e_netdev_ops_basic = {
2028 .ndo_open = mlx5e_open, 2028 .ndo_open = mlx5e_open,
2029 .ndo_stop = mlx5e_close, 2029 .ndo_stop = mlx5e_close,
2030 .ndo_start_xmit = mlx5e_xmit, 2030 .ndo_start_xmit = mlx5e_xmit,
2031 .ndo_get_stats64 = mlx5e_get_stats, 2031 .ndo_get_stats64 = mlx5e_get_stats,
2032 .ndo_set_rx_mode = mlx5e_set_rx_mode, 2032 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2033 .ndo_set_mac_address = mlx5e_set_mac, 2033 .ndo_set_mac_address = mlx5e_set_mac,
2034 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, 2034 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2035 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, 2035 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2036 .ndo_set_features = mlx5e_set_features, 2036 .ndo_set_features = mlx5e_set_features,
2037 .ndo_change_mtu = mlx5e_change_mtu, 2037 .ndo_change_mtu = mlx5e_change_mtu,
2038 .ndo_do_ioctl = mlx5e_ioctl, 2038 .ndo_do_ioctl = mlx5e_ioctl,
2039};
2040
2041static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2042 .ndo_open = mlx5e_open,
2043 .ndo_stop = mlx5e_close,
2044 .ndo_start_xmit = mlx5e_xmit,
2045 .ndo_get_stats64 = mlx5e_get_stats,
2046 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2047 .ndo_set_mac_address = mlx5e_set_mac,
2048 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2049 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2050 .ndo_set_features = mlx5e_set_features,
2051 .ndo_change_mtu = mlx5e_change_mtu,
2052 .ndo_do_ioctl = mlx5e_ioctl,
2053 .ndo_set_vf_mac = mlx5e_set_vf_mac,
2054 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
2055 .ndo_get_vf_config = mlx5e_get_vf_config,
2056 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2057 .ndo_get_vf_stats = mlx5e_get_vf_stats,
2039}; 2058};
2040 2059
2041static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2060static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2137,18 +2156,11 @@ static void mlx5e_build_netdev(struct net_device *netdev)
2137 2156
2138 SET_NETDEV_DEV(netdev, &mdev->pdev->dev); 2157 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2139 2158
2140 if (priv->params.num_tc > 1) 2159 if (MLX5_CAP_GEN(mdev, vport_group_manager))
2141 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; 2160 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
2142 2161 else
2143 if (MLX5_CAP_GEN(mdev, vport_group_manager)) { 2162 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
2144 mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac;
2145 mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan;
2146 mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config;
2147 mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state;
2148 mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats;
2149 }
2150 2163
2151 netdev->netdev_ops = &mlx5e_netdev_ops;
2152 netdev->watchdog_timeo = 15 * HZ; 2164 netdev->watchdog_timeo = 15 * HZ;
2153 2165
2154 netdev->ethtool_ops = &mlx5e_ethtool_ops; 2166 netdev->ethtool_ops = &mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 726f5435b32f..ae65b9940aed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -49,7 +49,7 @@
49#define MLXSW_PORT_MID 0xd000 49#define MLXSW_PORT_MID 0xd000
50 50
51#define MLXSW_PORT_MAX_PHY_PORTS 0x40 51#define MLXSW_PORT_MAX_PHY_PORTS 0x40
52#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS 52#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
53 53
54#define MLXSW_PORT_DEVID_BITS_OFFSET 10 54#define MLXSW_PORT_DEVID_BITS_OFFSET 10
55#define MLXSW_PORT_PHY_BITS_OFFSET 4 55#define MLXSW_PORT_PHY_BITS_OFFSET 4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index bb77e2207804..ffe4c0305733 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
873 } 873 }
874} 874}
875 875
876/* SPAFT - Switch Port Acceptable Frame Types
877 * ------------------------------------------
878 * The Switch Port Acceptable Frame Types register configures the frame
879 * admittance of the port.
880 */
881#define MLXSW_REG_SPAFT_ID 0x2010
882#define MLXSW_REG_SPAFT_LEN 0x08
883
884static const struct mlxsw_reg_info mlxsw_reg_spaft = {
885 .id = MLXSW_REG_SPAFT_ID,
886 .len = MLXSW_REG_SPAFT_LEN,
887};
888
889/* reg_spaft_local_port
890 * Local port number.
891 * Access: Index
892 *
893 * Note: CPU port is not supported (all tag types are allowed).
894 */
895MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
896
897/* reg_spaft_sub_port
898 * Virtual port within the physical port.
899 * Should be set to 0 when virtual ports are not enabled on the port.
900 * Access: RW
901 */
902MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
903
904/* reg_spaft_allow_untagged
905 * When set, untagged frames on the ingress are allowed (default).
906 * Access: RW
907 */
908MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
909
910/* reg_spaft_allow_prio_tagged
911 * When set, priority tagged frames on the ingress are allowed (default).
912 * Access: RW
913 */
914MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
915
916/* reg_spaft_allow_tagged
917 * When set, tagged frames on the ingress are allowed (default).
918 * Access: RW
919 */
920MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
921
922static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
923 bool allow_untagged)
924{
925 MLXSW_REG_ZERO(spaft, payload);
926 mlxsw_reg_spaft_local_port_set(payload, local_port);
927 mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
928 mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
929 mlxsw_reg_spaft_allow_tagged_set(payload, true);
930}
931
876/* SFGC - Switch Flooding Group Configuration 932/* SFGC - Switch Flooding Group Configuration
877 * ------------------------------------------ 933 * ------------------------------------------
878 * The following register controls the association of flooding tables and MIDs 934 * The following register controls the association of flooding tables and MIDs
@@ -3203,6 +3259,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
3203 return "SPVID"; 3259 return "SPVID";
3204 case MLXSW_REG_SPVM_ID: 3260 case MLXSW_REG_SPVM_ID:
3205 return "SPVM"; 3261 return "SPVM";
3262 case MLXSW_REG_SPAFT_ID:
3263 return "SPAFT";
3206 case MLXSW_REG_SFGC_ID: 3264 case MLXSW_REG_SFGC_ID:
3207 return "SFGC"; 3265 return "SFGC";
3208 case MLXSW_REG_SFTR_ID: 3266 case MLXSW_REG_SFTR_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 217856bdd400..09ce451c283b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2123,6 +2123,8 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) 2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2125 2125
2126 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2127
2126 mlxsw_sp_port->learning = 0; 2128 mlxsw_sp_port->learning = 0;
2127 mlxsw_sp_port->learning_sync = 0; 2129 mlxsw_sp_port->learning_sync = 0;
2128 mlxsw_sp_port->uc_flood = 0; 2130 mlxsw_sp_port->uc_flood = 0;
@@ -2746,6 +2748,13 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2746 goto err_vport_flood_set; 2748 goto err_vport_flood_set;
2747 } 2749 }
2748 2750
2751 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
2752 MLXSW_REG_SPMS_STATE_FORWARDING);
2753 if (err) {
2754 netdev_err(dev, "Failed to set STP state\n");
2755 goto err_port_stp_state_set;
2756 }
2757
2749 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) 2758 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
2750 netdev_err(dev, "Failed to flush FDB\n"); 2759 netdev_err(dev, "Failed to flush FDB\n");
2751 2760
@@ -2763,6 +2772,7 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2763 2772
2764 return 0; 2773 return 0;
2765 2774
2775err_port_stp_state_set:
2766err_vport_flood_set: 2776err_vport_flood_set:
2767err_port_vid_learning_set: 2777err_port_vid_learning_set:
2768err_port_vid_to_fid_validate: 2778err_port_vid_to_fid_validate:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 7f42eb1c320e..3b89ed2f3c76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -254,5 +254,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
255 bool set, bool only_uc); 255 bool set, bool only_uc);
256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
257int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
257 258
258#endif 259#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index e492ca2cdecd..7b56098acc58 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -370,7 +370,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
370 return err; 370 return err;
371} 371}
372 372
373static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 373static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
374 u16 vid)
374{ 375{
375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
376 char spvid_pl[MLXSW_REG_SPVID_LEN]; 377 char spvid_pl[MLXSW_REG_SPVID_LEN];
@@ -379,6 +380,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
380} 381}
381 382
383static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
384 bool allow)
385{
386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387 char spaft_pl[MLXSW_REG_SPAFT_LEN];
388
389 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
391}
392
393int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
394{
395 struct net_device *dev = mlxsw_sp_port->dev;
396 int err;
397
398 if (!vid) {
399 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
400 if (err) {
401 netdev_err(dev, "Failed to disallow untagged traffic\n");
402 return err;
403 }
404 } else {
405 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
406 if (err) {
407 netdev_err(dev, "Failed to set PVID\n");
408 return err;
409 }
410
411 /* Only allow if not already allowed. */
412 if (!mlxsw_sp_port->pvid) {
413 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
414 true);
415 if (err) {
416 netdev_err(dev, "Failed to allow untagged traffic\n");
417 goto err_port_allow_untagged_set;
418 }
419 }
420 }
421
422 mlxsw_sp_port->pvid = vid;
423 return 0;
424
425err_port_allow_untagged_set:
426 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
427 return err;
428}
429
382static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 430static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
383{ 431{
384 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 432 char sfmr_pl[MLXSW_REG_SFMR_LEN];
@@ -540,7 +588,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
540 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 588 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
541 goto err_port_pvid_set; 589 goto err_port_pvid_set;
542 } 590 }
543 mlxsw_sp_port->pvid = vid_begin; 591 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
592 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
593 if (err) {
594 netdev_err(dev, "Unable to del PVID\n");
595 goto err_port_pvid_set;
596 }
544 } 597 }
545 598
546 /* Changing activity bits only if HW operation succeded */ 599 /* Changing activity bits only if HW operation succeded */
@@ -892,20 +945,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
892 return err; 945 return err;
893 } 946 }
894 947
948 if (init)
949 goto out;
950
895 pvid = mlxsw_sp_port->pvid; 951 pvid = mlxsw_sp_port->pvid;
896 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { 952 if (pvid >= vid_begin && pvid <= vid_end) {
897 /* Default VLAN is always 1 */ 953 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
898 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
899 if (err) { 954 if (err) {
900 netdev_err(dev, "Unable to del PVID %d\n", pvid); 955 netdev_err(dev, "Unable to del PVID %d\n", pvid);
901 return err; 956 return err;
902 } 957 }
903 mlxsw_sp_port->pvid = 1;
904 } 958 }
905 959
906 if (init)
907 goto out;
908
909 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 960 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
910 false, false); 961 false, false);
911 if (err) { 962 if (err) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 17d5571d0432..537974cfd427 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6137,28 +6137,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6137 sw_cnt_1ms_ini = 16000000/rg_saw_cnt; 6137 sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
6138 sw_cnt_1ms_ini &= 0x0fff; 6138 sw_cnt_1ms_ini &= 0x0fff;
6139 data = r8168_mac_ocp_read(tp, 0xd412); 6139 data = r8168_mac_ocp_read(tp, 0xd412);
6140 data &= 0x0fff; 6140 data &= ~0x0fff;
6141 data |= sw_cnt_1ms_ini; 6141 data |= sw_cnt_1ms_ini;
6142 r8168_mac_ocp_write(tp, 0xd412, data); 6142 r8168_mac_ocp_write(tp, 0xd412, data);
6143 } 6143 }
6144 6144
6145 data = r8168_mac_ocp_read(tp, 0xe056); 6145 data = r8168_mac_ocp_read(tp, 0xe056);
6146 data &= 0xf0; 6146 data &= ~0xf0;
6147 data |= 0x07; 6147 data |= 0x70;
6148 r8168_mac_ocp_write(tp, 0xe056, data); 6148 r8168_mac_ocp_write(tp, 0xe056, data);
6149 6149
6150 data = r8168_mac_ocp_read(tp, 0xe052); 6150 data = r8168_mac_ocp_read(tp, 0xe052);
6151 data &= 0x8008; 6151 data &= ~0x6000;
6152 data |= 0x6000; 6152 data |= 0x8008;
6153 r8168_mac_ocp_write(tp, 0xe052, data); 6153 r8168_mac_ocp_write(tp, 0xe052, data);
6154 6154
6155 data = r8168_mac_ocp_read(tp, 0xe0d6); 6155 data = r8168_mac_ocp_read(tp, 0xe0d6);
6156 data &= 0x01ff; 6156 data &= ~0x01ff;
6157 data |= 0x017f; 6157 data |= 0x017f;
6158 r8168_mac_ocp_write(tp, 0xe0d6, data); 6158 r8168_mac_ocp_write(tp, 0xe0d6, data);
6159 6159
6160 data = r8168_mac_ocp_read(tp, 0xd420); 6160 data = r8168_mac_ocp_read(tp, 0xd420);
6161 data &= 0x0fff; 6161 data &= ~0x0fff;
6162 data |= 0x047f; 6162 data |= 0x047f;
6163 r8168_mac_ocp_write(tp, 0xd420, data); 6163 r8168_mac_ocp_write(tp, 0xd420, data);
6164 6164
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac43ed914fcf..744d7806a9ee 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1139,7 +1139,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1139 if (netif_running(ndev)) { 1139 if (netif_running(ndev)) {
1140 netif_device_detach(ndev); 1140 netif_device_detach(ndev);
1141 /* Stop PTP Clock driver */ 1141 /* Stop PTP Clock driver */
1142 ravb_ptp_stop(ndev); 1142 if (priv->chip_id == RCAR_GEN2)
1143 ravb_ptp_stop(ndev);
1143 /* Wait for DMA stopping */ 1144 /* Wait for DMA stopping */
1144 error = ravb_stop_dma(ndev); 1145 error = ravb_stop_dma(ndev);
1145 if (error) { 1146 if (error) {
@@ -1170,7 +1171,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1170 ravb_emac_init(ndev); 1171 ravb_emac_init(ndev);
1171 1172
1172 /* Initialise PTP Clock driver */ 1173 /* Initialise PTP Clock driver */
1173 ravb_ptp_init(ndev, priv->pdev); 1174 if (priv->chip_id == RCAR_GEN2)
1175 ravb_ptp_init(ndev, priv->pdev);
1174 1176
1175 netif_device_attach(ndev); 1177 netif_device_attach(ndev);
1176 } 1178 }
@@ -1298,7 +1300,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1298 netif_tx_stop_all_queues(ndev); 1300 netif_tx_stop_all_queues(ndev);
1299 1301
1300 /* Stop PTP Clock driver */ 1302 /* Stop PTP Clock driver */
1301 ravb_ptp_stop(ndev); 1303 if (priv->chip_id == RCAR_GEN2)
1304 ravb_ptp_stop(ndev);
1302 1305
1303 /* Wait for DMA stopping */ 1306 /* Wait for DMA stopping */
1304 ravb_stop_dma(ndev); 1307 ravb_stop_dma(ndev);
@@ -1311,7 +1314,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1311 ravb_emac_init(ndev); 1314 ravb_emac_init(ndev);
1312 1315
1313 /* Initialise PTP Clock driver */ 1316 /* Initialise PTP Clock driver */
1314 ravb_ptp_init(ndev, priv->pdev); 1317 if (priv->chip_id == RCAR_GEN2)
1318 ravb_ptp_init(ndev, priv->pdev);
1315 1319
1316 netif_tx_start_all_queues(ndev); 1320 netif_tx_start_all_queues(ndev);
1317} 1321}
@@ -1814,10 +1818,6 @@ static int ravb_probe(struct platform_device *pdev)
1814 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); 1818 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
1815 } 1819 }
1816 1820
1817 /* Set CSEL value */
1818 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1819 CCC);
1820
1821 /* Set GTI value */ 1821 /* Set GTI value */
1822 error = ravb_set_gti(ndev); 1822 error = ravb_set_gti(ndev);
1823 if (error) 1823 if (error)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0e2fc1a844ab..db7db8ac4ca3 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2342,8 +2342,8 @@ static int smc_drv_probe(struct platform_device *pdev)
2342 } 2342 }
2343 2343
2344 ndev->irq = platform_get_irq(pdev, 0); 2344 ndev->irq = platform_get_irq(pdev, 0);
2345 if (ndev->irq <= 0) { 2345 if (ndev->irq < 0) {
2346 ret = -ENODEV; 2346 ret = ndev->irq;
2347 goto out_release_io; 2347 goto out_release_io;
2348 } 2348 }
2349 /* 2349 /*
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index e9cc61e1ec74..c3e85acfdc70 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -63,8 +63,12 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
63 mode = AM33XX_GMII_SEL_MODE_RGMII; 63 mode = AM33XX_GMII_SEL_MODE_RGMII;
64 break; 64 break;
65 65
66 case PHY_INTERFACE_MODE_MII:
67 default: 66 default:
67 dev_warn(priv->dev,
68 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
69 phy_modes(phy_mode));
70 /* fallthrough */
71 case PHY_INTERFACE_MODE_MII:
68 mode = AM33XX_GMII_SEL_MODE_MII; 72 mode = AM33XX_GMII_SEL_MODE_MII;
69 break; 73 break;
70 }; 74 };
@@ -106,8 +110,12 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
106 mode = AM33XX_GMII_SEL_MODE_RGMII; 110 mode = AM33XX_GMII_SEL_MODE_RGMII;
107 break; 111 break;
108 112
109 case PHY_INTERFACE_MODE_MII:
110 default: 113 default:
114 dev_warn(priv->dev,
115 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
116 phy_modes(phy_mode));
117 /* fallthrough */
118 case PHY_INTERFACE_MODE_MII:
111 mode = AM33XX_GMII_SEL_MODE_MII; 119 mode = AM33XX_GMII_SEL_MODE_MII;
112 break; 120 break;
113 }; 121 };
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index c61d66d38634..029841f98c32 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -117,21 +117,17 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
117 *ndesc = le32_to_cpu(desc->next_desc); 117 *ndesc = le32_to_cpu(desc->next_desc);
118} 118}
119 119
120static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) 120static u32 get_sw_data(int index, struct knav_dma_desc *desc)
121{ 121{
122 *pad0 = le32_to_cpu(desc->pad[0]); 122 /* No Endian conversion needed as this data is untouched by hw */
123 *pad1 = le32_to_cpu(desc->pad[1]); 123 return desc->sw_data[index];
124 *pad2 = le32_to_cpu(desc->pad[2]);
125} 124}
126 125
127static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) 126/* use these macros to get sw data */
128{ 127#define GET_SW_DATA0(desc) get_sw_data(0, desc)
129 u64 pad64; 128#define GET_SW_DATA1(desc) get_sw_data(1, desc)
130 129#define GET_SW_DATA2(desc) get_sw_data(2, desc)
131 pad64 = le32_to_cpu(desc->pad[0]) + 130#define GET_SW_DATA3(desc) get_sw_data(3, desc)
132 ((u64)le32_to_cpu(desc->pad[1]) << 32);
133 *padptr = (void *)(uintptr_t)pad64;
134}
135 131
136static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, 132static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
137 struct knav_dma_desc *desc) 133 struct knav_dma_desc *desc)
@@ -163,13 +159,18 @@ static void set_desc_info(u32 desc_info, u32 pkt_info,
163 desc->packet_info = cpu_to_le32(pkt_info); 159 desc->packet_info = cpu_to_le32(pkt_info);
164} 160}
165 161
166static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) 162static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
167{ 163{
168 desc->pad[0] = cpu_to_le32(pad0); 164 /* No Endian conversion needed as this data is untouched by hw */
169 desc->pad[1] = cpu_to_le32(pad1); 165 desc->sw_data[index] = data;
170 desc->pad[2] = cpu_to_le32(pad1);
171} 166}
172 167
168/* use these macros to set sw data */
169#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
170#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
171#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
172#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
173
173static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, 174static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
174 struct knav_dma_desc *desc) 175 struct knav_dma_desc *desc)
175{ 176{
@@ -581,7 +582,6 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
581 dma_addr_t dma_desc, dma_buf; 582 dma_addr_t dma_desc, dma_buf;
582 unsigned int buf_len, dma_sz = sizeof(*ndesc); 583 unsigned int buf_len, dma_sz = sizeof(*ndesc);
583 void *buf_ptr; 584 void *buf_ptr;
584 u32 pad[2];
585 u32 tmp; 585 u32 tmp;
586 586
587 get_words(&dma_desc, 1, &desc->next_desc); 587 get_words(&dma_desc, 1, &desc->next_desc);
@@ -593,14 +593,20 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
593 break; 593 break;
594 } 594 }
595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
596 get_pad_ptr(&buf_ptr, ndesc); 596 /* warning!!!! We are retrieving the virtual ptr in the sw_data
597 * field as a 32bit value. Will not work on 64bit machines
598 */
599 buf_ptr = (void *)GET_SW_DATA0(ndesc);
600 buf_len = (int)GET_SW_DATA1(desc);
597 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 601 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
598 __free_page(buf_ptr); 602 __free_page(buf_ptr);
599 knav_pool_desc_put(netcp->rx_pool, desc); 603 knav_pool_desc_put(netcp->rx_pool, desc);
600 } 604 }
601 605 /* warning!!!! We are retrieving the virtual ptr in the sw_data
602 get_pad_info(&pad[0], &pad[1], &buf_len, desc); 606 * field as a 32bit value. Will not work on 64bit machines
603 buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 607 */
608 buf_ptr = (void *)GET_SW_DATA0(desc);
609 buf_len = (int)GET_SW_DATA1(desc);
604 610
605 if (buf_ptr) 611 if (buf_ptr)
606 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); 612 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
@@ -639,7 +645,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
639 dma_addr_t dma_desc, dma_buff; 645 dma_addr_t dma_desc, dma_buff;
640 struct netcp_packet p_info; 646 struct netcp_packet p_info;
641 struct sk_buff *skb; 647 struct sk_buff *skb;
642 u32 pad[2];
643 void *org_buf_ptr; 648 void *org_buf_ptr;
644 649
645 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); 650 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
@@ -653,8 +658,11 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
653 } 658 }
654 659
655 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 660 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
656 get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); 661 /* warning!!!! We are retrieving the virtual ptr in the sw_data
657 org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 662 * field as a 32bit value. Will not work on 64bit machines
663 */
664 org_buf_ptr = (void *)GET_SW_DATA0(desc);
665 org_buf_len = (int)GET_SW_DATA1(desc);
658 666
659 if (unlikely(!org_buf_ptr)) { 667 if (unlikely(!org_buf_ptr)) {
660 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 668 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -679,7 +687,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
679 /* Fill in the page fragment list */ 687 /* Fill in the page fragment list */
680 while (dma_desc) { 688 while (dma_desc) {
681 struct page *page; 689 struct page *page;
682 void *ptr;
683 690
684 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 691 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
685 if (unlikely(!ndesc)) { 692 if (unlikely(!ndesc)) {
@@ -688,8 +695,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
688 } 695 }
689 696
690 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 697 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
691 get_pad_ptr(&ptr, ndesc); 698 /* warning!!!! We are retrieving the virtual ptr in the sw_data
692 page = ptr; 699 * field as a 32bit value. Will not work on 64bit machines
700 */
701 page = (struct page *)GET_SW_DATA0(desc);
693 702
694 if (likely(dma_buff && buf_len && page)) { 703 if (likely(dma_buff && buf_len && page)) {
695 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 704 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
@@ -777,7 +786,10 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
777 } 786 }
778 787
779 get_org_pkt_info(&dma, &buf_len, desc); 788 get_org_pkt_info(&dma, &buf_len, desc);
780 get_pad_ptr(&buf_ptr, desc); 789 /* warning!!!! We are retrieving the virtual ptr in the sw_data
790 * field as a 32bit value. Will not work on 64bit machines
791 */
792 buf_ptr = (void *)GET_SW_DATA0(desc);
781 793
782 if (unlikely(!dma)) { 794 if (unlikely(!dma)) {
783 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); 795 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
@@ -829,7 +841,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
829 struct page *page; 841 struct page *page;
830 dma_addr_t dma; 842 dma_addr_t dma;
831 void *bufptr; 843 void *bufptr;
832 u32 pad[3]; 844 u32 sw_data[2];
833 845
834 /* Allocate descriptor */ 846 /* Allocate descriptor */
835 hwdesc = knav_pool_desc_get(netcp->rx_pool); 847 hwdesc = knav_pool_desc_get(netcp->rx_pool);
@@ -846,7 +858,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
846 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 858 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
847 859
848 bufptr = netdev_alloc_frag(primary_buf_len); 860 bufptr = netdev_alloc_frag(primary_buf_len);
849 pad[2] = primary_buf_len; 861 sw_data[1] = primary_buf_len;
850 862
851 if (unlikely(!bufptr)) { 863 if (unlikely(!bufptr)) {
852 dev_warn_ratelimited(netcp->ndev_dev, 864 dev_warn_ratelimited(netcp->ndev_dev,
@@ -858,9 +870,10 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
858 if (unlikely(dma_mapping_error(netcp->dev, dma))) 870 if (unlikely(dma_mapping_error(netcp->dev, dma)))
859 goto fail; 871 goto fail;
860 872
861 pad[0] = lower_32_bits((uintptr_t)bufptr); 873 /* warning!!!! We are saving the virtual ptr in the sw_data
862 pad[1] = upper_32_bits((uintptr_t)bufptr); 874 * field as a 32bit value. Will not work on 64bit machines
863 875 */
876 sw_data[0] = (u32)bufptr;
864 } else { 877 } else {
865 /* Allocate a secondary receive queue entry */ 878 /* Allocate a secondary receive queue entry */
866 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); 879 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
@@ -870,9 +883,11 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
870 } 883 }
871 buf_len = PAGE_SIZE; 884 buf_len = PAGE_SIZE;
872 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 885 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
873 pad[0] = lower_32_bits(dma); 886 /* warning!!!! We are saving the virtual ptr in the sw_data
874 pad[1] = upper_32_bits(dma); 887 * field as a 32bit value. Will not work on 64bit machines
875 pad[2] = 0; 888 */
889 sw_data[0] = (u32)page;
890 sw_data[1] = 0;
876 } 891 }
877 892
878 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; 893 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
@@ -882,7 +897,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
882 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 897 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
883 KNAV_DMA_DESC_RETQ_SHIFT; 898 KNAV_DMA_DESC_RETQ_SHIFT;
884 set_org_pkt_info(dma, buf_len, hwdesc); 899 set_org_pkt_info(dma, buf_len, hwdesc);
885 set_pad_info(pad[0], pad[1], pad[2], hwdesc); 900 SET_SW_DATA0(sw_data[0], hwdesc);
901 SET_SW_DATA1(sw_data[1], hwdesc);
886 set_desc_info(desc_info, pkt_info, hwdesc); 902 set_desc_info(desc_info, pkt_info, hwdesc);
887 903
888 /* Push to FDQs */ 904 /* Push to FDQs */
@@ -971,7 +987,6 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
971 unsigned int budget) 987 unsigned int budget)
972{ 988{
973 struct knav_dma_desc *desc; 989 struct knav_dma_desc *desc;
974 void *ptr;
975 struct sk_buff *skb; 990 struct sk_buff *skb;
976 unsigned int dma_sz; 991 unsigned int dma_sz;
977 dma_addr_t dma; 992 dma_addr_t dma;
@@ -988,8 +1003,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
988 continue; 1003 continue;
989 } 1004 }
990 1005
991 get_pad_ptr(&ptr, desc); 1006 /* warning!!!! We are retrieving the virtual ptr in the sw_data
992 skb = ptr; 1007 * field as a 32bit value. Will not work on 64bit machines
1008 */
1009 skb = (struct sk_buff *)GET_SW_DATA0(desc);
993 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 1010 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
994 if (!skb) { 1011 if (!skb) {
995 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); 1012 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
@@ -1194,10 +1211,10 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1194 } 1211 }
1195 1212
1196 set_words(&tmp, 1, &desc->packet_info); 1213 set_words(&tmp, 1, &desc->packet_info);
1197 tmp = lower_32_bits((uintptr_t)&skb); 1214 /* warning!!!! We are saving the virtual ptr in the sw_data
1198 set_words(&tmp, 1, &desc->pad[0]); 1215 * field as a 32bit value. Will not work on 64bit machines
1199 tmp = upper_32_bits((uintptr_t)&skb); 1216 */
1200 set_words(&tmp, 1, &desc->pad[1]); 1217 SET_SW_DATA0((u32)skb, desc);
1201 1218
1202 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1219 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1203 tmp = tx_pipe->switch_to_port; 1220 tmp = tx_pipe->switch_to_port;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 028e3873c310..0bf7edd99573 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1039,17 +1039,34 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
1039 return geneve_xmit_skb(skb, dev, info); 1039 return geneve_xmit_skb(skb, dev, info);
1040} 1040}
1041 1041
1042static int geneve_change_mtu(struct net_device *dev, int new_mtu) 1042static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1043{ 1043{
1044 /* GENEVE overhead is not fixed, so we can't enforce a more 1044 /* The max_mtu calculation does not take account of GENEVE
1045 * precise max MTU. 1045 * options, to avoid excluding potentially valid
1046 * configurations.
1046 */ 1047 */
1047 if (new_mtu < 68 || new_mtu > IP_MAX_MTU) 1048 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
1049 - dev->hard_header_len;
1050
1051 if (new_mtu < 68)
1048 return -EINVAL; 1052 return -EINVAL;
1053
1054 if (new_mtu > max_mtu) {
1055 if (strict)
1056 return -EINVAL;
1057
1058 new_mtu = max_mtu;
1059 }
1060
1049 dev->mtu = new_mtu; 1061 dev->mtu = new_mtu;
1050 return 0; 1062 return 0;
1051} 1063}
1052 1064
1065static int geneve_change_mtu(struct net_device *dev, int new_mtu)
1066{
1067 return __geneve_change_mtu(dev, new_mtu, true);
1068}
1069
1053static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1070static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1054{ 1071{
1055 struct ip_tunnel_info *info = skb_tunnel_info(skb); 1072 struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1161,6 +1178,7 @@ static void geneve_setup(struct net_device *dev)
1161 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1178 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1162 1179
1163 netif_keep_dst(dev); 1180 netif_keep_dst(dev);
1181 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1164 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 1182 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
1165 eth_hw_addr_random(dev); 1183 eth_hw_addr_random(dev);
1166} 1184}
@@ -1452,14 +1470,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
1452 return dev; 1470 return dev;
1453 1471
1454 err = geneve_configure(net, dev, &geneve_remote_unspec, 1472 err = geneve_configure(net, dev, &geneve_remote_unspec,
1455 0, 0, 0, htons(dst_port), true, 0); 1473 0, 0, 0, htons(dst_port), true,
1474 GENEVE_F_UDP_ZERO_CSUM6_RX);
1456 if (err) 1475 if (err)
1457 goto err; 1476 goto err;
1458 1477
1459 /* openvswitch users expect packet sizes to be unrestricted, 1478 /* openvswitch users expect packet sizes to be unrestricted,
1460 * so set the largest MTU we can. 1479 * so set the largest MTU we can.
1461 */ 1480 */
1462 err = geneve_change_mtu(dev, IP_MAX_MTU); 1481 err = __geneve_change_mtu(dev, IP_MAX_MTU, false);
1463 if (err) 1482 if (err)
1464 goto err; 1483 goto err;
1465 1484
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1d3a66563bac..98e34fee45c7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1089,6 +1089,9 @@ static int netvsc_probe(struct hv_device *dev,
1089 net->ethtool_ops = &ethtool_ops; 1089 net->ethtool_ops = &ethtool_ops;
1090 SET_NETDEV_DEV(net, &dev->device); 1090 SET_NETDEV_DEV(net, &dev->device);
1091 1091
1092 /* We always need headroom for rndis header */
1093 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1094
1092 /* Notify the netvsc driver of the new device */ 1095 /* Notify the netvsc driver of the new device */
1093 memset(&device_info, 0, sizeof(device_info)); 1096 memset(&device_info, 0, sizeof(device_info));
1094 device_info.ring_size = ring_size; 1097 device_info.ring_size = ring_size;
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index bf241a3ec5e5..db507e3bcab9 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -250,10 +250,6 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); 250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
251 phy_read(phydev, MII_BCM7XXX_AUX_MODE); 251 phy_read(phydev, MII_BCM7XXX_AUX_MODE);
252 252
253 /* Workaround only required for 100Mbits/sec capable PHYs */
254 if (phydev->supported & PHY_GBIT_FEATURES)
255 return 0;
256
257 /* set shadow mode 2 */ 253 /* set shadow mode 2 */
258 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 254 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
259 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2); 255 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
@@ -270,7 +266,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
270 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555); 266 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
271 267
272 /* reset shadow mode 2 */ 268 /* reset shadow mode 2 */
273 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0); 269 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, MII_BCM7XXX_SHD_MODE_2);
274 if (ret < 0) 270 if (ret < 0)
275 return ret; 271 return ret;
276 272
@@ -307,11 +303,6 @@ static int bcm7xxx_suspend(struct phy_device *phydev)
307 return 0; 303 return 0;
308} 304}
309 305
310static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
311{
312 return 0;
313}
314
315#define BCM7XXX_28NM_GPHY(_oui, _name) \ 306#define BCM7XXX_28NM_GPHY(_oui, _name) \
316{ \ 307{ \
317 .phy_id = (_oui), \ 308 .phy_id = (_oui), \
@@ -337,7 +328,7 @@ static struct phy_driver bcm7xxx_driver[] = {
337 .phy_id = PHY_ID_BCM7425, 328 .phy_id = PHY_ID_BCM7425,
338 .phy_id_mask = 0xfffffff0, 329 .phy_id_mask = 0xfffffff0,
339 .name = "Broadcom BCM7425", 330 .name = "Broadcom BCM7425",
340 .features = PHY_GBIT_FEATURES | 331 .features = PHY_BASIC_FEATURES |
341 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 332 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
342 .flags = PHY_IS_INTERNAL, 333 .flags = PHY_IS_INTERNAL,
343 .config_init = bcm7xxx_config_init, 334 .config_init = bcm7xxx_config_init,
@@ -349,7 +340,7 @@ static struct phy_driver bcm7xxx_driver[] = {
349 .phy_id = PHY_ID_BCM7429, 340 .phy_id = PHY_ID_BCM7429,
350 .phy_id_mask = 0xfffffff0, 341 .phy_id_mask = 0xfffffff0,
351 .name = "Broadcom BCM7429", 342 .name = "Broadcom BCM7429",
352 .features = PHY_GBIT_FEATURES | 343 .features = PHY_BASIC_FEATURES |
353 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 344 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
354 .flags = PHY_IS_INTERNAL, 345 .flags = PHY_IS_INTERNAL,
355 .config_init = bcm7xxx_config_init, 346 .config_init = bcm7xxx_config_init,
@@ -361,7 +352,7 @@ static struct phy_driver bcm7xxx_driver[] = {
361 .phy_id = PHY_ID_BCM7435, 352 .phy_id = PHY_ID_BCM7435,
362 .phy_id_mask = 0xfffffff0, 353 .phy_id_mask = 0xfffffff0,
363 .name = "Broadcom BCM7435", 354 .name = "Broadcom BCM7435",
364 .features = PHY_GBIT_FEATURES | 355 .features = PHY_BASIC_FEATURES |
365 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 356 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
366 .flags = PHY_IS_INTERNAL, 357 .flags = PHY_IS_INTERNAL,
367 .config_init = bcm7xxx_config_init, 358 .config_init = bcm7xxx_config_init,
@@ -369,30 +360,6 @@ static struct phy_driver bcm7xxx_driver[] = {
369 .read_status = genphy_read_status, 360 .read_status = genphy_read_status,
370 .suspend = bcm7xxx_suspend, 361 .suspend = bcm7xxx_suspend,
371 .resume = bcm7xxx_config_init, 362 .resume = bcm7xxx_config_init,
372}, {
373 .phy_id = PHY_BCM_OUI_4,
374 .phy_id_mask = 0xffff0000,
375 .name = "Broadcom BCM7XXX 40nm",
376 .features = PHY_GBIT_FEATURES |
377 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
378 .flags = PHY_IS_INTERNAL,
379 .config_init = bcm7xxx_config_init,
380 .config_aneg = genphy_config_aneg,
381 .read_status = genphy_read_status,
382 .suspend = bcm7xxx_suspend,
383 .resume = bcm7xxx_config_init,
384}, {
385 .phy_id = PHY_BCM_OUI_5,
386 .phy_id_mask = 0xffffff00,
387 .name = "Broadcom BCM7XXX 65nm",
388 .features = PHY_BASIC_FEATURES |
389 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
390 .flags = PHY_IS_INTERNAL,
391 .config_init = bcm7xxx_dummy_config_init,
392 .config_aneg = genphy_config_aneg,
393 .read_status = genphy_read_status,
394 .suspend = bcm7xxx_suspend,
395 .resume = bcm7xxx_config_init,
396} }; 363} };
397 364
398static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { 365static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
@@ -404,8 +371,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
404 { PHY_ID_BCM7439, 0xfffffff0, }, 371 { PHY_ID_BCM7439, 0xfffffff0, },
405 { PHY_ID_BCM7435, 0xfffffff0, }, 372 { PHY_ID_BCM7435, 0xfffffff0, },
406 { PHY_ID_BCM7445, 0xfffffff0, }, 373 { PHY_ID_BCM7445, 0xfffffff0, },
407 { PHY_BCM_OUI_4, 0xffff0000 },
408 { PHY_BCM_OUI_5, 0xffffff00 },
409 { } 374 { }
410}; 375};
411 376
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e3eb96443c97..ab1d0fcaf1d9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -446,6 +446,12 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
446 if (err < 0) 446 if (err < 0)
447 return err; 447 return err;
448 448
449 return 0;
450}
451
452static int marvell_config_init(struct phy_device *phydev)
453{
454 /* Set registers from marvell,reg-init DT property */
449 return marvell_of_reg_init(phydev); 455 return marvell_of_reg_init(phydev);
450} 456}
451 457
@@ -495,7 +501,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
495 501
496 mdelay(500); 502 mdelay(500);
497 503
498 return 0; 504 return marvell_config_init(phydev);
499} 505}
500 506
501static int m88e3016_config_init(struct phy_device *phydev) 507static int m88e3016_config_init(struct phy_device *phydev)
@@ -514,7 +520,7 @@ static int m88e3016_config_init(struct phy_device *phydev)
514 if (reg < 0) 520 if (reg < 0)
515 return reg; 521 return reg;
516 522
517 return 0; 523 return marvell_config_init(phydev);
518} 524}
519 525
520static int m88e1111_config_init(struct phy_device *phydev) 526static int m88e1111_config_init(struct phy_device *phydev)
@@ -1078,6 +1084,7 @@ static struct phy_driver marvell_drivers[] = {
1078 .features = PHY_GBIT_FEATURES, 1084 .features = PHY_GBIT_FEATURES,
1079 .probe = marvell_probe, 1085 .probe = marvell_probe,
1080 .flags = PHY_HAS_INTERRUPT, 1086 .flags = PHY_HAS_INTERRUPT,
1087 .config_init = &marvell_config_init,
1081 .config_aneg = &marvell_config_aneg, 1088 .config_aneg = &marvell_config_aneg,
1082 .read_status = &genphy_read_status, 1089 .read_status = &genphy_read_status,
1083 .ack_interrupt = &marvell_ack_interrupt, 1090 .ack_interrupt = &marvell_ack_interrupt,
@@ -1149,6 +1156,7 @@ static struct phy_driver marvell_drivers[] = {
1149 .features = PHY_GBIT_FEATURES, 1156 .features = PHY_GBIT_FEATURES,
1150 .flags = PHY_HAS_INTERRUPT, 1157 .flags = PHY_HAS_INTERRUPT,
1151 .probe = marvell_probe, 1158 .probe = marvell_probe,
1159 .config_init = &marvell_config_init,
1152 .config_aneg = &m88e1121_config_aneg, 1160 .config_aneg = &m88e1121_config_aneg,
1153 .read_status = &marvell_read_status, 1161 .read_status = &marvell_read_status,
1154 .ack_interrupt = &marvell_ack_interrupt, 1162 .ack_interrupt = &marvell_ack_interrupt,
@@ -1167,6 +1175,7 @@ static struct phy_driver marvell_drivers[] = {
1167 .features = PHY_GBIT_FEATURES, 1175 .features = PHY_GBIT_FEATURES,
1168 .flags = PHY_HAS_INTERRUPT, 1176 .flags = PHY_HAS_INTERRUPT,
1169 .probe = marvell_probe, 1177 .probe = marvell_probe,
1178 .config_init = &marvell_config_init,
1170 .config_aneg = &m88e1318_config_aneg, 1179 .config_aneg = &m88e1318_config_aneg,
1171 .read_status = &marvell_read_status, 1180 .read_status = &marvell_read_status,
1172 .ack_interrupt = &marvell_ack_interrupt, 1181 .ack_interrupt = &marvell_ack_interrupt,
@@ -1259,6 +1268,7 @@ static struct phy_driver marvell_drivers[] = {
1259 .features = PHY_GBIT_FEATURES, 1268 .features = PHY_GBIT_FEATURES,
1260 .flags = PHY_HAS_INTERRUPT, 1269 .flags = PHY_HAS_INTERRUPT,
1261 .probe = marvell_probe, 1270 .probe = marvell_probe,
1271 .config_init = &marvell_config_init,
1262 .config_aneg = &m88e1510_config_aneg, 1272 .config_aneg = &m88e1510_config_aneg,
1263 .read_status = &marvell_read_status, 1273 .read_status = &marvell_read_status,
1264 .ack_interrupt = &marvell_ack_interrupt, 1274 .ack_interrupt = &marvell_ack_interrupt,
@@ -1277,6 +1287,7 @@ static struct phy_driver marvell_drivers[] = {
1277 .features = PHY_GBIT_FEATURES, 1287 .features = PHY_GBIT_FEATURES,
1278 .flags = PHY_HAS_INTERRUPT, 1288 .flags = PHY_HAS_INTERRUPT,
1279 .probe = marvell_probe, 1289 .probe = marvell_probe,
1290 .config_init = &marvell_config_init,
1280 .config_aneg = &m88e1510_config_aneg, 1291 .config_aneg = &m88e1510_config_aneg,
1281 .read_status = &marvell_read_status, 1292 .read_status = &marvell_read_status,
1282 .ack_interrupt = &marvell_ack_interrupt, 1293 .ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bad3f005faee..e551f3a89cfd 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1410,7 +1410,7 @@ int genphy_config_init(struct phy_device *phydev)
1410 1410
1411 features = (SUPPORTED_TP | SUPPORTED_MII 1411 features = (SUPPORTED_TP | SUPPORTED_MII
1412 | SUPPORTED_AUI | SUPPORTED_FIBRE | 1412 | SUPPORTED_AUI | SUPPORTED_FIBRE |
1413 SUPPORTED_BNC); 1413 SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1414 1414
1415 /* Do we support autonegotiation? */ 1415 /* Do we support autonegotiation? */
1416 val = phy_read(phydev, MII_BMSR); 1416 val = phy_read(phydev, MII_BMSR);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f3c63022eb3c..4ddae8118c85 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
395 395
396 if (!__pppoe_xmit(sk_pppox(relay_po), skb)) 396 if (!__pppoe_xmit(sk_pppox(relay_po), skb))
397 goto abort_put; 397 goto abort_put;
398
399 sock_put(sk_pppox(relay_po));
398 } else { 400 } else {
399 if (sock_queue_rcv_skb(sk, skb)) 401 if (sock_queue_rcv_skb(sk, skb))
400 goto abort_kfree; 402 goto abort_kfree;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7f83504dfa69..cdde59089f72 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -395,6 +395,10 @@ config USB_NET_RNDIS_HOST
395 The protocol specification is incomplete, and is controlled by 395 The protocol specification is incomplete, and is controlled by
396 (and for) Microsoft; it isn't an "Open" ecosystem or market. 396 (and for) Microsoft; it isn't an "Open" ecosystem or market.
397 397
398config USB_NET_CDC_SUBSET_ENABLE
399 tristate
400 depends on USB_NET_CDC_SUBSET
401
398config USB_NET_CDC_SUBSET 402config USB_NET_CDC_SUBSET
399 tristate "Simple USB Network Links (CDC Ethernet subset)" 403 tristate "Simple USB Network Links (CDC Ethernet subset)"
400 depends on USB_USBNET 404 depends on USB_USBNET
@@ -413,6 +417,7 @@ config USB_NET_CDC_SUBSET
413config USB_ALI_M5632 417config USB_ALI_M5632
414 bool "ALi M5632 based 'USB 2.0 Data Link' cables" 418 bool "ALi M5632 based 'USB 2.0 Data Link' cables"
415 depends on USB_NET_CDC_SUBSET 419 depends on USB_NET_CDC_SUBSET
420 select USB_NET_CDC_SUBSET_ENABLE
416 help 421 help
417 Choose this option if you're using a host-to-host cable 422 Choose this option if you're using a host-to-host cable
418 based on this design, which supports USB 2.0 high speed. 423 based on this design, which supports USB 2.0 high speed.
@@ -420,6 +425,7 @@ config USB_ALI_M5632
420config USB_AN2720 425config USB_AN2720
421 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)" 426 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
422 depends on USB_NET_CDC_SUBSET 427 depends on USB_NET_CDC_SUBSET
428 select USB_NET_CDC_SUBSET_ENABLE
423 help 429 help
424 Choose this option if you're using a host-to-host cable 430 Choose this option if you're using a host-to-host cable
425 based on this design. Note that AnchorChips is now a 431 based on this design. Note that AnchorChips is now a
@@ -428,6 +434,7 @@ config USB_AN2720
428config USB_BELKIN 434config USB_BELKIN
429 bool "eTEK based host-to-host cables (Advance, Belkin, ...)" 435 bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
430 depends on USB_NET_CDC_SUBSET 436 depends on USB_NET_CDC_SUBSET
437 select USB_NET_CDC_SUBSET_ENABLE
431 default y 438 default y
432 help 439 help
433 Choose this option if you're using a host-to-host cable 440 Choose this option if you're using a host-to-host cable
@@ -437,6 +444,7 @@ config USB_BELKIN
437config USB_ARMLINUX 444config USB_ARMLINUX
438 bool "Embedded ARM Linux links (iPaq, ...)" 445 bool "Embedded ARM Linux links (iPaq, ...)"
439 depends on USB_NET_CDC_SUBSET 446 depends on USB_NET_CDC_SUBSET
447 select USB_NET_CDC_SUBSET_ENABLE
440 default y 448 default y
441 help 449 help
442 Choose this option to support the "usb-eth" networking driver 450 Choose this option to support the "usb-eth" networking driver
@@ -454,6 +462,7 @@ config USB_ARMLINUX
454config USB_EPSON2888 462config USB_EPSON2888
455 bool "Epson 2888 based firmware (DEVELOPMENT)" 463 bool "Epson 2888 based firmware (DEVELOPMENT)"
456 depends on USB_NET_CDC_SUBSET 464 depends on USB_NET_CDC_SUBSET
465 select USB_NET_CDC_SUBSET_ENABLE
457 help 466 help
458 Choose this option to support the usb networking links used 467 Choose this option to support the usb networking links used
459 by some sample firmware from Epson. 468 by some sample firmware from Epson.
@@ -461,6 +470,7 @@ config USB_EPSON2888
461config USB_KC2190 470config USB_KC2190
462 bool "KT Technology KC2190 based cables (InstaNet)" 471 bool "KT Technology KC2190 based cables (InstaNet)"
463 depends on USB_NET_CDC_SUBSET 472 depends on USB_NET_CDC_SUBSET
473 select USB_NET_CDC_SUBSET_ENABLE
464 help 474 help
465 Choose this option if you're using a host-to-host cable 475 Choose this option if you're using a host-to-host cable
466 with one of these chips. 476 with one of these chips.
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b5f04068dbe4..37fb46aee341 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
23obj-$(CONFIG_USB_NET_NET1080) += net1080.o 23obj-$(CONFIG_USB_NET_NET1080) += net1080.o
24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o 24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o 25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
26obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o 26obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE) += cdc_subset.o
27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o 27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o 28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
29obj-$(CONFIG_USB_USBNET) += usbnet.o 29obj-$(CONFIG_USB_USBNET) += usbnet.o
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23e9880791fc..570deef53f74 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -637,6 +637,7 @@ static const struct usb_device_id products[] = {
637 637
638 /* 3. Combined interface devices matching on interface number */ 638 /* 3. Combined interface devices matching on interface number */
639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
640 {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
640 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)}, 641 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
641 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)}, 642 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
642 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)}, 643 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 221a53025fd0..72ba8ae7f09a 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -377,7 +377,7 @@ union Vmxnet3_GenericDesc {
377#define VMXNET3_TX_RING_MAX_SIZE 4096 377#define VMXNET3_TX_RING_MAX_SIZE 4096
378#define VMXNET3_TC_RING_MAX_SIZE 4096 378#define VMXNET3_TC_RING_MAX_SIZE 4096
379#define VMXNET3_RX_RING_MAX_SIZE 4096 379#define VMXNET3_RX_RING_MAX_SIZE 4096
380#define VMXNET3_RX_RING2_MAX_SIZE 2048 380#define VMXNET3_RX_RING2_MAX_SIZE 4096
381#define VMXNET3_RC_RING_MAX_SIZE 8192 381#define VMXNET3_RC_RING_MAX_SIZE 8192
382 382
383/* a list of reasons for queue stop */ 383/* a list of reasons for queue stop */
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index bdb8a6c0f8aa..729c344e6774 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040500 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a31cd954b308..e6944b29588e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2171,9 +2171,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2171#endif 2171#endif
2172 } 2172 }
2173 2173
2174 if (vxlan->flags & VXLAN_F_COLLECT_METADATA && 2174 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2175 info && info->mode & IP_TUNNEL_INFO_TX) { 2175 if (info && info->mode & IP_TUNNEL_INFO_TX)
2176 vxlan_xmit_one(skb, dev, NULL, false); 2176 vxlan_xmit_one(skb, dev, NULL, false);
2177 else
2178 kfree_skb(skb);
2177 return NETDEV_TX_OK; 2179 return NETDEV_TX_OK;
2178 } 2180 }
2179 2181
@@ -2537,6 +2539,7 @@ static void vxlan_setup(struct net_device *dev)
2537 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2539 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2538 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2540 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2539 netif_keep_dst(dev); 2541 netif_keep_dst(dev);
2542 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2540 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 2543 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
2541 2544
2542 INIT_LIST_HEAD(&vxlan->next); 2545 INIT_LIST_HEAD(&vxlan->next);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 7a72407208b1..629225980463 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1626,7 +1626,7 @@ try:
1626 if (state & Xpr) { 1626 if (state & Xpr) {
1627 void __iomem *scc_addr; 1627 void __iomem *scc_addr;
1628 unsigned long ring; 1628 unsigned long ring;
1629 int i; 1629 unsigned int i;
1630 1630
1631 /* 1631 /*
1632 * - the busy condition happens (sometimes); 1632 * - the busy condition happens (sometimes);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 866067789330..7438fbeef744 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -53,7 +53,6 @@ config IWLWIFI_LEDS
53 53
54config IWLDVM 54config IWLDVM
55 tristate "Intel Wireless WiFi DVM Firmware support" 55 tristate "Intel Wireless WiFi DVM Firmware support"
56 depends on m
57 help 56 help
58 This is the driver that supports the DVM firmware. The list 57 This is the driver that supports the DVM firmware. The list
59 of the devices that use this firmware is available here: 58 of the devices that use this firmware is available here:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index c84a0299d43e..bce9b3420a13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -70,12 +71,15 @@
70 71
71/* Highest firmware API version supported */ 72/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 20 73#define IWL8000_UCODE_API_MAX 20
74#define IWL8265_UCODE_API_MAX 20
73 75
74/* Oldest version we won't warn about */ 76/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13 77#define IWL8000_UCODE_API_OK 13
78#define IWL8265_UCODE_API_OK 20
76 79
77/* Lowest firmware API version supported */ 80/* Lowest firmware API version supported */
78#define IWL8000_UCODE_API_MIN 13 81#define IWL8000_UCODE_API_MIN 13
82#define IWL8265_UCODE_API_MIN 20
79 83
80/* NVM versions */ 84/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d 85#define IWL8000_NVM_VERSION 0x0a1d
@@ -93,6 +97,10 @@
93#define IWL8000_MODULE_FIRMWARE(api) \ 97#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 98 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
95 99
100#define IWL8265_FW_PRE "iwlwifi-8265-"
101#define IWL8265_MODULE_FIRMWARE(api) \
102 IWL8265_FW_PRE __stringify(api) ".ucode"
103
96#define NVM_HW_SECTION_NUM_FAMILY_8000 10 104#define NVM_HW_SECTION_NUM_FAMILY_8000 10
97#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B" 105#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
98#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" 106#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
@@ -144,10 +152,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
144 .support_tx_backoff = true, 152 .support_tx_backoff = true,
145}; 153};
146 154
147#define IWL_DEVICE_8000 \ 155#define IWL_DEVICE_8000_COMMON \
148 .ucode_api_max = IWL8000_UCODE_API_MAX, \
149 .ucode_api_ok = IWL8000_UCODE_API_OK, \
150 .ucode_api_min = IWL8000_UCODE_API_MIN, \
151 .device_family = IWL_DEVICE_FAMILY_8000, \ 156 .device_family = IWL_DEVICE_FAMILY_8000, \
152 .max_inst_size = IWL60_RTC_INST_SIZE, \ 157 .max_inst_size = IWL60_RTC_INST_SIZE, \
153 .max_data_size = IWL60_RTC_DATA_SIZE, \ 158 .max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -167,10 +172,28 @@ static const struct iwl_tt_params iwl8000_tt_params = {
167 .thermal_params = &iwl8000_tt_params, \ 172 .thermal_params = &iwl8000_tt_params, \
168 .apmg_not_supported = true 173 .apmg_not_supported = true
169 174
175#define IWL_DEVICE_8000 \
176 IWL_DEVICE_8000_COMMON, \
177 .ucode_api_max = IWL8000_UCODE_API_MAX, \
178 .ucode_api_ok = IWL8000_UCODE_API_OK, \
179 .ucode_api_min = IWL8000_UCODE_API_MIN \
180
181#define IWL_DEVICE_8260 \
182 IWL_DEVICE_8000_COMMON, \
183 .ucode_api_max = IWL8000_UCODE_API_MAX, \
184 .ucode_api_ok = IWL8000_UCODE_API_OK, \
185 .ucode_api_min = IWL8000_UCODE_API_MIN \
186
187#define IWL_DEVICE_8265 \
188 IWL_DEVICE_8000_COMMON, \
189 .ucode_api_max = IWL8265_UCODE_API_MAX, \
190 .ucode_api_ok = IWL8265_UCODE_API_OK, \
191 .ucode_api_min = IWL8265_UCODE_API_MIN \
192
170const struct iwl_cfg iwl8260_2n_cfg = { 193const struct iwl_cfg iwl8260_2n_cfg = {
171 .name = "Intel(R) Dual Band Wireless N 8260", 194 .name = "Intel(R) Dual Band Wireless N 8260",
172 .fw_name_pre = IWL8000_FW_PRE, 195 .fw_name_pre = IWL8000_FW_PRE,
173 IWL_DEVICE_8000, 196 IWL_DEVICE_8260,
174 .ht_params = &iwl8000_ht_params, 197 .ht_params = &iwl8000_ht_params,
175 .nvm_ver = IWL8000_NVM_VERSION, 198 .nvm_ver = IWL8000_NVM_VERSION,
176 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 199 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -179,7 +202,7 @@ const struct iwl_cfg iwl8260_2n_cfg = {
179const struct iwl_cfg iwl8260_2ac_cfg = { 202const struct iwl_cfg iwl8260_2ac_cfg = {
180 .name = "Intel(R) Dual Band Wireless AC 8260", 203 .name = "Intel(R) Dual Band Wireless AC 8260",
181 .fw_name_pre = IWL8000_FW_PRE, 204 .fw_name_pre = IWL8000_FW_PRE,
182 IWL_DEVICE_8000, 205 IWL_DEVICE_8260,
183 .ht_params = &iwl8000_ht_params, 206 .ht_params = &iwl8000_ht_params,
184 .nvm_ver = IWL8000_NVM_VERSION, 207 .nvm_ver = IWL8000_NVM_VERSION,
185 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 208 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -188,8 +211,8 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
188 211
189const struct iwl_cfg iwl8265_2ac_cfg = { 212const struct iwl_cfg iwl8265_2ac_cfg = {
190 .name = "Intel(R) Dual Band Wireless AC 8265", 213 .name = "Intel(R) Dual Band Wireless AC 8265",
191 .fw_name_pre = IWL8000_FW_PRE, 214 .fw_name_pre = IWL8265_FW_PRE,
192 IWL_DEVICE_8000, 215 IWL_DEVICE_8265,
193 .ht_params = &iwl8000_ht_params, 216 .ht_params = &iwl8000_ht_params,
194 .nvm_ver = IWL8000_NVM_VERSION, 217 .nvm_ver = IWL8000_NVM_VERSION,
195 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 218 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -209,7 +232,7 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
209const struct iwl_cfg iwl8260_2ac_sdio_cfg = { 232const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
210 .name = "Intel(R) Dual Band Wireless-AC 8260", 233 .name = "Intel(R) Dual Band Wireless-AC 8260",
211 .fw_name_pre = IWL8000_FW_PRE, 234 .fw_name_pre = IWL8000_FW_PRE,
212 IWL_DEVICE_8000, 235 IWL_DEVICE_8260,
213 .ht_params = &iwl8000_ht_params, 236 .ht_params = &iwl8000_ht_params,
214 .nvm_ver = IWL8000_NVM_VERSION, 237 .nvm_ver = IWL8000_NVM_VERSION,
215 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 238 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -236,3 +259,4 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
236}; 259};
237 260
238MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); 261MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
262MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 7acb49075683..ab4c2a0470b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,8 +243,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); 244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
245 245
246 snprintf(drv->firmware_name, sizeof(drv->firmware_name), 246 if (rev_step != 'A')
247 "%s%c-%s.ucode", name_pre, rev_step, tag); 247 snprintf(drv->firmware_name,
248 sizeof(drv->firmware_name), "%s%c-%s.ucode",
249 name_pre, rev_step, tag);
248 } 250 }
249 251
250 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", 252 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9a15642f80dd..ea1e177c2ea1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1298,6 +1298,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1298 return -EBUSY; 1298 return -EBUSY;
1299 } 1299 }
1300 1300
1301 /* we don't support "match all" in the firmware */
1302 if (!req->n_match_sets)
1303 return -EOPNOTSUPP;
1304
1301 ret = iwl_mvm_check_running_scans(mvm, type); 1305 ret = iwl_mvm_check_running_scans(mvm, type);
1302 if (ret) 1306 if (ret)
1303 return ret; 1307 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index cc3888e2700d..73c95594eabe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -490,6 +490,15 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
491} 491}
492 492
493static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
494{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
496
497 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
498 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
499 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
500}
501
493static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 502static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
494{ 503{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 504 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index ccafbd8cf4b3..152cf9ad9566 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1438,9 +1438,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1438 inta & ~trans_pcie->inta_mask); 1438 inta & ~trans_pcie->inta_mask);
1439 } 1439 }
1440 1440
1441 /* Re-enable all interrupts */ 1441 /* we are loading the firmware, enable FH_TX interrupt only */
1442 /* only Re-enable if disabled by irq */ 1442 if (handled & CSR_INT_BIT_FH_TX)
1443 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1443 iwl_enable_fw_load_int(trans);
1444 /* only Re-enable all interrupt if disabled by irq */
1445 else if (test_bit(STATUS_INT_ENABLED, &trans->status))
1444 iwl_enable_interrupts(trans); 1446 iwl_enable_interrupts(trans);
1445 /* Re-enable RF_KILL if it occurred */ 1447 /* Re-enable RF_KILL if it occurred */
1446 else if (handled & CSR_INT_BIT_RF_KILL) 1448 else if (handled & CSR_INT_BIT_RF_KILL)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d60a467a983c..5a854c609477 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1021,82 +1021,6 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1021 &first_ucode_section); 1021 &first_ucode_section);
1022} 1022}
1023 1023
1024static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1025 const struct fw_img *fw, bool run_in_rfkill)
1026{
1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028 bool hw_rfkill;
1029 int ret;
1030
1031 mutex_lock(&trans_pcie->mutex);
1032
1033 /* Someone called stop_device, don't try to start_fw */
1034 if (trans_pcie->is_down) {
1035 IWL_WARN(trans,
1036 "Can't start_fw since the HW hasn't been started\n");
1037 ret = EIO;
1038 goto out;
1039 }
1040
1041 /* This may fail if AMT took ownership of the device */
1042 if (iwl_pcie_prepare_card_hw(trans)) {
1043 IWL_WARN(trans, "Exit HW not ready\n");
1044 ret = -EIO;
1045 goto out;
1046 }
1047
1048 iwl_enable_rfkill_int(trans);
1049
1050 /* If platform's RF_KILL switch is NOT set to KILL */
1051 hw_rfkill = iwl_is_rfkill_set(trans);
1052 if (hw_rfkill)
1053 set_bit(STATUS_RFKILL, &trans->status);
1054 else
1055 clear_bit(STATUS_RFKILL, &trans->status);
1056 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1057 if (hw_rfkill && !run_in_rfkill) {
1058 ret = -ERFKILL;
1059 goto out;
1060 }
1061
1062 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1063
1064 ret = iwl_pcie_nic_init(trans);
1065 if (ret) {
1066 IWL_ERR(trans, "Unable to init nic\n");
1067 goto out;
1068 }
1069
1070 /* make sure rfkill handshake bits are cleared */
1071 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1072 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1073 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1074
1075 /* clear (again), then enable host interrupts */
1076 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1077 iwl_enable_interrupts(trans);
1078
1079 /* really make sure rfkill handshake bits are cleared */
1080 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1081 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1082
1083 /* Load the given image to the HW */
1084 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1085 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1086 else
1087 ret = iwl_pcie_load_given_ucode(trans, fw);
1088
1089out:
1090 mutex_unlock(&trans_pcie->mutex);
1091 return ret;
1092}
1093
1094static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1095{
1096 iwl_pcie_reset_ict(trans);
1097 iwl_pcie_tx_start(trans, scd_addr);
1098}
1099
1100static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1024static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1101{ 1025{
1102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1026 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1127,7 +1051,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1127 * already dead. 1051 * already dead.
1128 */ 1052 */
1129 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1053 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1130 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); 1054 IWL_DEBUG_INFO(trans,
1055 "DEVICE_ENABLED bit was set and is now cleared\n");
1131 iwl_pcie_tx_stop(trans); 1056 iwl_pcie_tx_stop(trans);
1132 iwl_pcie_rx_stop(trans); 1057 iwl_pcie_rx_stop(trans);
1133 1058
@@ -1161,7 +1086,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1161 iwl_disable_interrupts(trans); 1086 iwl_disable_interrupts(trans);
1162 spin_unlock(&trans_pcie->irq_lock); 1087 spin_unlock(&trans_pcie->irq_lock);
1163 1088
1164
1165 /* clear all status bits */ 1089 /* clear all status bits */
1166 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1090 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1167 clear_bit(STATUS_INT_ENABLED, &trans->status); 1091 clear_bit(STATUS_INT_ENABLED, &trans->status);
@@ -1194,10 +1118,116 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1194 if (hw_rfkill != was_hw_rfkill) 1118 if (hw_rfkill != was_hw_rfkill)
1195 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1119 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1196 1120
1197 /* re-take ownership to prevent other users from stealing the deivce */ 1121 /* re-take ownership to prevent other users from stealing the device */
1198 iwl_pcie_prepare_card_hw(trans); 1122 iwl_pcie_prepare_card_hw(trans);
1199} 1123}
1200 1124
1125static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1126 const struct fw_img *fw, bool run_in_rfkill)
1127{
1128 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1129 bool hw_rfkill;
1130 int ret;
1131
1132 /* This may fail if AMT took ownership of the device */
1133 if (iwl_pcie_prepare_card_hw(trans)) {
1134 IWL_WARN(trans, "Exit HW not ready\n");
1135 ret = -EIO;
1136 goto out;
1137 }
1138
1139 iwl_enable_rfkill_int(trans);
1140
1141 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1142
1143 /*
1144 * We enabled the RF-Kill interrupt and the handler may very
1145 * well be running. Disable the interrupts to make sure no other
1146 * interrupt can be fired.
1147 */
1148 iwl_disable_interrupts(trans);
1149
1150 /* Make sure it finished running */
1151 synchronize_irq(trans_pcie->pci_dev->irq);
1152
1153 mutex_lock(&trans_pcie->mutex);
1154
1155 /* If platform's RF_KILL switch is NOT set to KILL */
1156 hw_rfkill = iwl_is_rfkill_set(trans);
1157 if (hw_rfkill)
1158 set_bit(STATUS_RFKILL, &trans->status);
1159 else
1160 clear_bit(STATUS_RFKILL, &trans->status);
1161 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1162 if (hw_rfkill && !run_in_rfkill) {
1163 ret = -ERFKILL;
1164 goto out;
1165 }
1166
1167 /* Someone called stop_device, don't try to start_fw */
1168 if (trans_pcie->is_down) {
1169 IWL_WARN(trans,
1170 "Can't start_fw since the HW hasn't been started\n");
1171 ret = -EIO;
1172 goto out;
1173 }
1174
1175 /* make sure rfkill handshake bits are cleared */
1176 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1177 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1178 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1179
1180 /* clear (again), then enable host interrupts */
1181 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1182
1183 ret = iwl_pcie_nic_init(trans);
1184 if (ret) {
1185 IWL_ERR(trans, "Unable to init nic\n");
1186 goto out;
1187 }
1188
1189 /*
1190 * Now, we load the firmware and don't want to be interrupted, even
1191 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1192 * FH_TX interrupt which is needed to load the firmware). If the
1193 * RF-Kill switch is toggled, we will find out after having loaded
1194 * the firmware and return the proper value to the caller.
1195 */
1196 iwl_enable_fw_load_int(trans);
1197
1198 /* really make sure rfkill handshake bits are cleared */
1199 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1200 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1201
1202 /* Load the given image to the HW */
1203 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1204 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1205 else
1206 ret = iwl_pcie_load_given_ucode(trans, fw);
1207 iwl_enable_interrupts(trans);
1208
1209 /* re-check RF-Kill state since we may have missed the interrupt */
1210 hw_rfkill = iwl_is_rfkill_set(trans);
1211 if (hw_rfkill)
1212 set_bit(STATUS_RFKILL, &trans->status);
1213 else
1214 clear_bit(STATUS_RFKILL, &trans->status);
1215
1216 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1217 if (hw_rfkill && !run_in_rfkill)
1218 ret = -ERFKILL;
1219
1220out:
1221 mutex_unlock(&trans_pcie->mutex);
1222 return ret;
1223}
1224
1225static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1226{
1227 iwl_pcie_reset_ict(trans);
1228 iwl_pcie_tx_start(trans, scd_addr);
1229}
1230
1201static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1231static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1202{ 1232{
1203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 74c14ce28238..28f7010e7108 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -138,6 +138,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
138 ((wireless_mode == WIRELESS_MODE_N_5G) || 138 ((wireless_mode == WIRELESS_MODE_N_5G) ||
139 (wireless_mode == WIRELESS_MODE_N_24G))) 139 (wireless_mode == WIRELESS_MODE_N_24G)))
140 rate->flags |= IEEE80211_TX_RC_MCS; 140 rate->flags |= IEEE80211_TX_RC_MCS;
141 if (sta && sta->vht_cap.vht_supported &&
142 (wireless_mode == WIRELESS_MODE_AC_5G ||
143 wireless_mode == WIRELESS_MODE_AC_24G ||
144 wireless_mode == WIRELESS_MODE_AC_ONLY))
145 rate->flags |= IEEE80211_TX_RC_VHT_MCS;
141 } 146 }
142} 147}
143 148
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 9ac118e727e9..564ca750c5ee 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -175,14 +175,14 @@ int wlcore_set_partition(struct wl1271 *wl,
175 if (ret < 0) 175 if (ret < 0)
176 goto out; 176 goto out;
177 177
178 /* We don't need the size of the last partition, as it is
179 * automatically calculated based on the total memory size and
180 * the sizes of the previous partitions.
181 */
178 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 182 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
179 if (ret < 0) 183 if (ret < 0)
180 goto out; 184 goto out;
181 185
182 ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
183 if (ret < 0)
184 goto out;
185
186out: 186out:
187 return ret; 187 return ret;
188} 188}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 6c257b54f415..10cf3747694d 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -36,8 +36,8 @@
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12) 36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16) 37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20) 38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) 39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28) 40
41#define HW_ACCESS_REGISTER_SIZE 4 41#define HW_ACCESS_REGISTER_SIZE 4
42 42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 430a929f048b..a0e8cc8dcc67 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -44,6 +44,8 @@
44 44
45#include <linux/timecounter.h> 45#include <linux/timecounter.h>
46 46
47#define DEFAULT_UAR_PAGE_SHIFT 12
48
47#define MAX_MSIX_P_PORT 17 49#define MAX_MSIX_P_PORT 17
48#define MAX_MSIX 64 50#define MAX_MSIX 64
49#define MIN_MSIX_P_PORT 5 51#define MIN_MSIX_P_PORT 5
@@ -856,6 +858,7 @@ struct mlx4_dev {
856 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 858 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
857 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 859 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
858 struct mlx4_vf_dev *dev_vfs; 860 struct mlx4_vf_dev *dev_vfs;
861 u8 uar_page_shift;
859}; 862};
860 863
861struct mlx4_clock_params { 864struct mlx4_clock_params {
@@ -1528,4 +1531,14 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1528int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1531int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1529 struct mlx4_clock_params *params); 1532 struct mlx4_clock_params *params);
1530 1533
1534static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index)
1535{
1536 return (index << (PAGE_SHIFT - dev->uar_page_shift));
1537}
1538
1539static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev)
1540{
1541 /* The first 128 UARs are used for EQ doorbells */
1542 return (128 >> (PAGE_SHIFT - dev->uar_page_shift));
1543}
1531#endif /* MLX4_DEVICE_H */ 1544#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 231ab6bcea76..51f1e540fc2b 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -207,15 +207,15 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
207 u8 outer_dmac[0x1]; 207 u8 outer_dmac[0x1];
208 u8 outer_smac[0x1]; 208 u8 outer_smac[0x1];
209 u8 outer_ether_type[0x1]; 209 u8 outer_ether_type[0x1];
210 u8 reserved_0[0x1]; 210 u8 reserved_at_3[0x1];
211 u8 outer_first_prio[0x1]; 211 u8 outer_first_prio[0x1];
212 u8 outer_first_cfi[0x1]; 212 u8 outer_first_cfi[0x1];
213 u8 outer_first_vid[0x1]; 213 u8 outer_first_vid[0x1];
214 u8 reserved_1[0x1]; 214 u8 reserved_at_7[0x1];
215 u8 outer_second_prio[0x1]; 215 u8 outer_second_prio[0x1];
216 u8 outer_second_cfi[0x1]; 216 u8 outer_second_cfi[0x1];
217 u8 outer_second_vid[0x1]; 217 u8 outer_second_vid[0x1];
218 u8 reserved_2[0x1]; 218 u8 reserved_at_b[0x1];
219 u8 outer_sip[0x1]; 219 u8 outer_sip[0x1];
220 u8 outer_dip[0x1]; 220 u8 outer_dip[0x1];
221 u8 outer_frag[0x1]; 221 u8 outer_frag[0x1];
@@ -230,21 +230,21 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
230 u8 outer_gre_protocol[0x1]; 230 u8 outer_gre_protocol[0x1];
231 u8 outer_gre_key[0x1]; 231 u8 outer_gre_key[0x1];
232 u8 outer_vxlan_vni[0x1]; 232 u8 outer_vxlan_vni[0x1];
233 u8 reserved_3[0x5]; 233 u8 reserved_at_1a[0x5];
234 u8 source_eswitch_port[0x1]; 234 u8 source_eswitch_port[0x1];
235 235
236 u8 inner_dmac[0x1]; 236 u8 inner_dmac[0x1];
237 u8 inner_smac[0x1]; 237 u8 inner_smac[0x1];
238 u8 inner_ether_type[0x1]; 238 u8 inner_ether_type[0x1];
239 u8 reserved_4[0x1]; 239 u8 reserved_at_23[0x1];
240 u8 inner_first_prio[0x1]; 240 u8 inner_first_prio[0x1];
241 u8 inner_first_cfi[0x1]; 241 u8 inner_first_cfi[0x1];
242 u8 inner_first_vid[0x1]; 242 u8 inner_first_vid[0x1];
243 u8 reserved_5[0x1]; 243 u8 reserved_at_27[0x1];
244 u8 inner_second_prio[0x1]; 244 u8 inner_second_prio[0x1];
245 u8 inner_second_cfi[0x1]; 245 u8 inner_second_cfi[0x1];
246 u8 inner_second_vid[0x1]; 246 u8 inner_second_vid[0x1];
247 u8 reserved_6[0x1]; 247 u8 reserved_at_2b[0x1];
248 u8 inner_sip[0x1]; 248 u8 inner_sip[0x1];
249 u8 inner_dip[0x1]; 249 u8 inner_dip[0x1];
250 u8 inner_frag[0x1]; 250 u8 inner_frag[0x1];
@@ -256,37 +256,37 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
256 u8 inner_tcp_sport[0x1]; 256 u8 inner_tcp_sport[0x1];
257 u8 inner_tcp_dport[0x1]; 257 u8 inner_tcp_dport[0x1];
258 u8 inner_tcp_flags[0x1]; 258 u8 inner_tcp_flags[0x1];
259 u8 reserved_7[0x9]; 259 u8 reserved_at_37[0x9];
260 260
261 u8 reserved_8[0x40]; 261 u8 reserved_at_40[0x40];
262}; 262};
263 263
264struct mlx5_ifc_flow_table_prop_layout_bits { 264struct mlx5_ifc_flow_table_prop_layout_bits {
265 u8 ft_support[0x1]; 265 u8 ft_support[0x1];
266 u8 reserved_0[0x2]; 266 u8 reserved_at_1[0x2];
267 u8 flow_modify_en[0x1]; 267 u8 flow_modify_en[0x1];
268 u8 modify_root[0x1]; 268 u8 modify_root[0x1];
269 u8 identified_miss_table_mode[0x1]; 269 u8 identified_miss_table_mode[0x1];
270 u8 flow_table_modify[0x1]; 270 u8 flow_table_modify[0x1];
271 u8 reserved_1[0x19]; 271 u8 reserved_at_7[0x19];
272 272
273 u8 reserved_2[0x2]; 273 u8 reserved_at_20[0x2];
274 u8 log_max_ft_size[0x6]; 274 u8 log_max_ft_size[0x6];
275 u8 reserved_3[0x10]; 275 u8 reserved_at_28[0x10];
276 u8 max_ft_level[0x8]; 276 u8 max_ft_level[0x8];
277 277
278 u8 reserved_4[0x20]; 278 u8 reserved_at_40[0x20];
279 279
280 u8 reserved_5[0x18]; 280 u8 reserved_at_60[0x18];
281 u8 log_max_ft_num[0x8]; 281 u8 log_max_ft_num[0x8];
282 282
283 u8 reserved_6[0x18]; 283 u8 reserved_at_80[0x18];
284 u8 log_max_destination[0x8]; 284 u8 log_max_destination[0x8];
285 285
286 u8 reserved_7[0x18]; 286 u8 reserved_at_a0[0x18];
287 u8 log_max_flow[0x8]; 287 u8 log_max_flow[0x8];
288 288
289 u8 reserved_8[0x40]; 289 u8 reserved_at_c0[0x40];
290 290
291 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; 291 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
292 292
@@ -298,13 +298,13 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
298 u8 receive[0x1]; 298 u8 receive[0x1];
299 u8 write[0x1]; 299 u8 write[0x1];
300 u8 read[0x1]; 300 u8 read[0x1];
301 u8 reserved_0[0x1]; 301 u8 reserved_at_4[0x1];
302 u8 srq_receive[0x1]; 302 u8 srq_receive[0x1];
303 u8 reserved_1[0x1a]; 303 u8 reserved_at_6[0x1a];
304}; 304};
305 305
306struct mlx5_ifc_ipv4_layout_bits { 306struct mlx5_ifc_ipv4_layout_bits {
307 u8 reserved_0[0x60]; 307 u8 reserved_at_0[0x60];
308 308
309 u8 ipv4[0x20]; 309 u8 ipv4[0x20];
310}; 310};
@@ -316,7 +316,7 @@ struct mlx5_ifc_ipv6_layout_bits {
316union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { 316union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
317 struct mlx5_ifc_ipv6_layout_bits ipv6_layout; 317 struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
318 struct mlx5_ifc_ipv4_layout_bits ipv4_layout; 318 struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
319 u8 reserved_0[0x80]; 319 u8 reserved_at_0[0x80];
320}; 320};
321 321
322struct mlx5_ifc_fte_match_set_lyr_2_4_bits { 322struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
@@ -336,15 +336,15 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
336 u8 ip_dscp[0x6]; 336 u8 ip_dscp[0x6];
337 u8 ip_ecn[0x2]; 337 u8 ip_ecn[0x2];
338 u8 vlan_tag[0x1]; 338 u8 vlan_tag[0x1];
339 u8 reserved_0[0x1]; 339 u8 reserved_at_91[0x1];
340 u8 frag[0x1]; 340 u8 frag[0x1];
341 u8 reserved_1[0x4]; 341 u8 reserved_at_93[0x4];
342 u8 tcp_flags[0x9]; 342 u8 tcp_flags[0x9];
343 343
344 u8 tcp_sport[0x10]; 344 u8 tcp_sport[0x10];
345 u8 tcp_dport[0x10]; 345 u8 tcp_dport[0x10];
346 346
347 u8 reserved_2[0x20]; 347 u8 reserved_at_c0[0x20];
348 348
349 u8 udp_sport[0x10]; 349 u8 udp_sport[0x10];
350 u8 udp_dport[0x10]; 350 u8 udp_dport[0x10];
@@ -355,9 +355,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
355}; 355};
356 356
357struct mlx5_ifc_fte_match_set_misc_bits { 357struct mlx5_ifc_fte_match_set_misc_bits {
358 u8 reserved_0[0x20]; 358 u8 reserved_at_0[0x20];
359 359
360 u8 reserved_1[0x10]; 360 u8 reserved_at_20[0x10];
361 u8 source_port[0x10]; 361 u8 source_port[0x10];
362 362
363 u8 outer_second_prio[0x3]; 363 u8 outer_second_prio[0x3];
@@ -369,31 +369,31 @@ struct mlx5_ifc_fte_match_set_misc_bits {
369 369
370 u8 outer_second_vlan_tag[0x1]; 370 u8 outer_second_vlan_tag[0x1];
371 u8 inner_second_vlan_tag[0x1]; 371 u8 inner_second_vlan_tag[0x1];
372 u8 reserved_2[0xe]; 372 u8 reserved_at_62[0xe];
373 u8 gre_protocol[0x10]; 373 u8 gre_protocol[0x10];
374 374
375 u8 gre_key_h[0x18]; 375 u8 gre_key_h[0x18];
376 u8 gre_key_l[0x8]; 376 u8 gre_key_l[0x8];
377 377
378 u8 vxlan_vni[0x18]; 378 u8 vxlan_vni[0x18];
379 u8 reserved_3[0x8]; 379 u8 reserved_at_b8[0x8];
380 380
381 u8 reserved_4[0x20]; 381 u8 reserved_at_c0[0x20];
382 382
383 u8 reserved_5[0xc]; 383 u8 reserved_at_e0[0xc];
384 u8 outer_ipv6_flow_label[0x14]; 384 u8 outer_ipv6_flow_label[0x14];
385 385
386 u8 reserved_6[0xc]; 386 u8 reserved_at_100[0xc];
387 u8 inner_ipv6_flow_label[0x14]; 387 u8 inner_ipv6_flow_label[0x14];
388 388
389 u8 reserved_7[0xe0]; 389 u8 reserved_at_120[0xe0];
390}; 390};
391 391
392struct mlx5_ifc_cmd_pas_bits { 392struct mlx5_ifc_cmd_pas_bits {
393 u8 pa_h[0x20]; 393 u8 pa_h[0x20];
394 394
395 u8 pa_l[0x14]; 395 u8 pa_l[0x14];
396 u8 reserved_0[0xc]; 396 u8 reserved_at_34[0xc];
397}; 397};
398 398
399struct mlx5_ifc_uint64_bits { 399struct mlx5_ifc_uint64_bits {
@@ -418,31 +418,31 @@ enum {
418struct mlx5_ifc_ads_bits { 418struct mlx5_ifc_ads_bits {
419 u8 fl[0x1]; 419 u8 fl[0x1];
420 u8 free_ar[0x1]; 420 u8 free_ar[0x1];
421 u8 reserved_0[0xe]; 421 u8 reserved_at_2[0xe];
422 u8 pkey_index[0x10]; 422 u8 pkey_index[0x10];
423 423
424 u8 reserved_1[0x8]; 424 u8 reserved_at_20[0x8];
425 u8 grh[0x1]; 425 u8 grh[0x1];
426 u8 mlid[0x7]; 426 u8 mlid[0x7];
427 u8 rlid[0x10]; 427 u8 rlid[0x10];
428 428
429 u8 ack_timeout[0x5]; 429 u8 ack_timeout[0x5];
430 u8 reserved_2[0x3]; 430 u8 reserved_at_45[0x3];
431 u8 src_addr_index[0x8]; 431 u8 src_addr_index[0x8];
432 u8 reserved_3[0x4]; 432 u8 reserved_at_50[0x4];
433 u8 stat_rate[0x4]; 433 u8 stat_rate[0x4];
434 u8 hop_limit[0x8]; 434 u8 hop_limit[0x8];
435 435
436 u8 reserved_4[0x4]; 436 u8 reserved_at_60[0x4];
437 u8 tclass[0x8]; 437 u8 tclass[0x8];
438 u8 flow_label[0x14]; 438 u8 flow_label[0x14];
439 439
440 u8 rgid_rip[16][0x8]; 440 u8 rgid_rip[16][0x8];
441 441
442 u8 reserved_5[0x4]; 442 u8 reserved_at_100[0x4];
443 u8 f_dscp[0x1]; 443 u8 f_dscp[0x1];
444 u8 f_ecn[0x1]; 444 u8 f_ecn[0x1];
445 u8 reserved_6[0x1]; 445 u8 reserved_at_106[0x1];
446 u8 f_eth_prio[0x1]; 446 u8 f_eth_prio[0x1];
447 u8 ecn[0x2]; 447 u8 ecn[0x2];
448 u8 dscp[0x6]; 448 u8 dscp[0x6];
@@ -458,25 +458,25 @@ struct mlx5_ifc_ads_bits {
458}; 458};
459 459
460struct mlx5_ifc_flow_table_nic_cap_bits { 460struct mlx5_ifc_flow_table_nic_cap_bits {
461 u8 reserved_0[0x200]; 461 u8 reserved_at_0[0x200];
462 462
463 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; 463 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
464 464
465 u8 reserved_1[0x200]; 465 u8 reserved_at_400[0x200];
466 466
467 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; 467 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
468 468
469 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; 469 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
470 470
471 u8 reserved_2[0x200]; 471 u8 reserved_at_a00[0x200];
472 472
473 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; 473 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
474 474
475 u8 reserved_3[0x7200]; 475 u8 reserved_at_e00[0x7200];
476}; 476};
477 477
478struct mlx5_ifc_flow_table_eswitch_cap_bits { 478struct mlx5_ifc_flow_table_eswitch_cap_bits {
479 u8 reserved_0[0x200]; 479 u8 reserved_at_0[0x200];
480 480
481 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; 481 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
482 482
@@ -484,7 +484,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
484 484
485 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; 485 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
486 486
487 u8 reserved_1[0x7800]; 487 u8 reserved_at_800[0x7800];
488}; 488};
489 489
490struct mlx5_ifc_e_switch_cap_bits { 490struct mlx5_ifc_e_switch_cap_bits {
@@ -493,9 +493,9 @@ struct mlx5_ifc_e_switch_cap_bits {
493 u8 vport_svlan_insert[0x1]; 493 u8 vport_svlan_insert[0x1];
494 u8 vport_cvlan_insert_if_not_exist[0x1]; 494 u8 vport_cvlan_insert_if_not_exist[0x1];
495 u8 vport_cvlan_insert_overwrite[0x1]; 495 u8 vport_cvlan_insert_overwrite[0x1];
496 u8 reserved_0[0x1b]; 496 u8 reserved_at_5[0x1b];
497 497
498 u8 reserved_1[0x7e0]; 498 u8 reserved_at_20[0x7e0];
499}; 499};
500 500
501struct mlx5_ifc_per_protocol_networking_offload_caps_bits { 501struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
@@ -504,51 +504,51 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
504 u8 lro_cap[0x1]; 504 u8 lro_cap[0x1];
505 u8 lro_psh_flag[0x1]; 505 u8 lro_psh_flag[0x1];
506 u8 lro_time_stamp[0x1]; 506 u8 lro_time_stamp[0x1];
507 u8 reserved_0[0x3]; 507 u8 reserved_at_5[0x3];
508 u8 self_lb_en_modifiable[0x1]; 508 u8 self_lb_en_modifiable[0x1];
509 u8 reserved_1[0x2]; 509 u8 reserved_at_9[0x2];
510 u8 max_lso_cap[0x5]; 510 u8 max_lso_cap[0x5];
511 u8 reserved_2[0x4]; 511 u8 reserved_at_10[0x4];
512 u8 rss_ind_tbl_cap[0x4]; 512 u8 rss_ind_tbl_cap[0x4];
513 u8 reserved_3[0x3]; 513 u8 reserved_at_18[0x3];
514 u8 tunnel_lso_const_out_ip_id[0x1]; 514 u8 tunnel_lso_const_out_ip_id[0x1];
515 u8 reserved_4[0x2]; 515 u8 reserved_at_1c[0x2];
516 u8 tunnel_statless_gre[0x1]; 516 u8 tunnel_statless_gre[0x1];
517 u8 tunnel_stateless_vxlan[0x1]; 517 u8 tunnel_stateless_vxlan[0x1];
518 518
519 u8 reserved_5[0x20]; 519 u8 reserved_at_20[0x20];
520 520
521 u8 reserved_6[0x10]; 521 u8 reserved_at_40[0x10];
522 u8 lro_min_mss_size[0x10]; 522 u8 lro_min_mss_size[0x10];
523 523
524 u8 reserved_7[0x120]; 524 u8 reserved_at_60[0x120];
525 525
526 u8 lro_timer_supported_periods[4][0x20]; 526 u8 lro_timer_supported_periods[4][0x20];
527 527
528 u8 reserved_8[0x600]; 528 u8 reserved_at_200[0x600];
529}; 529};
530 530
531struct mlx5_ifc_roce_cap_bits { 531struct mlx5_ifc_roce_cap_bits {
532 u8 roce_apm[0x1]; 532 u8 roce_apm[0x1];
533 u8 reserved_0[0x1f]; 533 u8 reserved_at_1[0x1f];
534 534
535 u8 reserved_1[0x60]; 535 u8 reserved_at_20[0x60];
536 536
537 u8 reserved_2[0xc]; 537 u8 reserved_at_80[0xc];
538 u8 l3_type[0x4]; 538 u8 l3_type[0x4];
539 u8 reserved_3[0x8]; 539 u8 reserved_at_90[0x8];
540 u8 roce_version[0x8]; 540 u8 roce_version[0x8];
541 541
542 u8 reserved_4[0x10]; 542 u8 reserved_at_a0[0x10];
543 u8 r_roce_dest_udp_port[0x10]; 543 u8 r_roce_dest_udp_port[0x10];
544 544
545 u8 r_roce_max_src_udp_port[0x10]; 545 u8 r_roce_max_src_udp_port[0x10];
546 u8 r_roce_min_src_udp_port[0x10]; 546 u8 r_roce_min_src_udp_port[0x10];
547 547
548 u8 reserved_5[0x10]; 548 u8 reserved_at_e0[0x10];
549 u8 roce_address_table_size[0x10]; 549 u8 roce_address_table_size[0x10];
550 550
551 u8 reserved_6[0x700]; 551 u8 reserved_at_100[0x700];
552}; 552};
553 553
554enum { 554enum {
@@ -576,35 +576,35 @@ enum {
576}; 576};
577 577
578struct mlx5_ifc_atomic_caps_bits { 578struct mlx5_ifc_atomic_caps_bits {
579 u8 reserved_0[0x40]; 579 u8 reserved_at_0[0x40];
580 580
581 u8 atomic_req_8B_endianess_mode[0x2]; 581 u8 atomic_req_8B_endianess_mode[0x2];
582 u8 reserved_1[0x4]; 582 u8 reserved_at_42[0x4];
583 u8 supported_atomic_req_8B_endianess_mode_1[0x1]; 583 u8 supported_atomic_req_8B_endianess_mode_1[0x1];
584 584
585 u8 reserved_2[0x19]; 585 u8 reserved_at_47[0x19];
586 586
587 u8 reserved_3[0x20]; 587 u8 reserved_at_60[0x20];
588 588
589 u8 reserved_4[0x10]; 589 u8 reserved_at_80[0x10];
590 u8 atomic_operations[0x10]; 590 u8 atomic_operations[0x10];
591 591
592 u8 reserved_5[0x10]; 592 u8 reserved_at_a0[0x10];
593 u8 atomic_size_qp[0x10]; 593 u8 atomic_size_qp[0x10];
594 594
595 u8 reserved_6[0x10]; 595 u8 reserved_at_c0[0x10];
596 u8 atomic_size_dc[0x10]; 596 u8 atomic_size_dc[0x10];
597 597
598 u8 reserved_7[0x720]; 598 u8 reserved_at_e0[0x720];
599}; 599};
600 600
601struct mlx5_ifc_odp_cap_bits { 601struct mlx5_ifc_odp_cap_bits {
602 u8 reserved_0[0x40]; 602 u8 reserved_at_0[0x40];
603 603
604 u8 sig[0x1]; 604 u8 sig[0x1];
605 u8 reserved_1[0x1f]; 605 u8 reserved_at_41[0x1f];
606 606
607 u8 reserved_2[0x20]; 607 u8 reserved_at_60[0x20];
608 608
609 struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; 609 struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
610 610
@@ -612,7 +612,7 @@ struct mlx5_ifc_odp_cap_bits {
612 612
613 struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; 613 struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
614 614
615 u8 reserved_3[0x720]; 615 u8 reserved_at_e0[0x720];
616}; 616};
617 617
618enum { 618enum {
@@ -660,55 +660,55 @@ enum {
660}; 660};
661 661
662struct mlx5_ifc_cmd_hca_cap_bits { 662struct mlx5_ifc_cmd_hca_cap_bits {
663 u8 reserved_0[0x80]; 663 u8 reserved_at_0[0x80];
664 664
665 u8 log_max_srq_sz[0x8]; 665 u8 log_max_srq_sz[0x8];
666 u8 log_max_qp_sz[0x8]; 666 u8 log_max_qp_sz[0x8];
667 u8 reserved_1[0xb]; 667 u8 reserved_at_90[0xb];
668 u8 log_max_qp[0x5]; 668 u8 log_max_qp[0x5];
669 669
670 u8 reserved_2[0xb]; 670 u8 reserved_at_a0[0xb];
671 u8 log_max_srq[0x5]; 671 u8 log_max_srq[0x5];
672 u8 reserved_3[0x10]; 672 u8 reserved_at_b0[0x10];
673 673
674 u8 reserved_4[0x8]; 674 u8 reserved_at_c0[0x8];
675 u8 log_max_cq_sz[0x8]; 675 u8 log_max_cq_sz[0x8];
676 u8 reserved_5[0xb]; 676 u8 reserved_at_d0[0xb];
677 u8 log_max_cq[0x5]; 677 u8 log_max_cq[0x5];
678 678
679 u8 log_max_eq_sz[0x8]; 679 u8 log_max_eq_sz[0x8];
680 u8 reserved_6[0x2]; 680 u8 reserved_at_e8[0x2];
681 u8 log_max_mkey[0x6]; 681 u8 log_max_mkey[0x6];
682 u8 reserved_7[0xc]; 682 u8 reserved_at_f0[0xc];
683 u8 log_max_eq[0x4]; 683 u8 log_max_eq[0x4];
684 684
685 u8 max_indirection[0x8]; 685 u8 max_indirection[0x8];
686 u8 reserved_8[0x1]; 686 u8 reserved_at_108[0x1];
687 u8 log_max_mrw_sz[0x7]; 687 u8 log_max_mrw_sz[0x7];
688 u8 reserved_9[0x2]; 688 u8 reserved_at_110[0x2];
689 u8 log_max_bsf_list_size[0x6]; 689 u8 log_max_bsf_list_size[0x6];
690 u8 reserved_10[0x2]; 690 u8 reserved_at_118[0x2];
691 u8 log_max_klm_list_size[0x6]; 691 u8 log_max_klm_list_size[0x6];
692 692
693 u8 reserved_11[0xa]; 693 u8 reserved_at_120[0xa];
694 u8 log_max_ra_req_dc[0x6]; 694 u8 log_max_ra_req_dc[0x6];
695 u8 reserved_12[0xa]; 695 u8 reserved_at_130[0xa];
696 u8 log_max_ra_res_dc[0x6]; 696 u8 log_max_ra_res_dc[0x6];
697 697
698 u8 reserved_13[0xa]; 698 u8 reserved_at_140[0xa];
699 u8 log_max_ra_req_qp[0x6]; 699 u8 log_max_ra_req_qp[0x6];
700 u8 reserved_14[0xa]; 700 u8 reserved_at_150[0xa];
701 u8 log_max_ra_res_qp[0x6]; 701 u8 log_max_ra_res_qp[0x6];
702 702
703 u8 pad_cap[0x1]; 703 u8 pad_cap[0x1];
704 u8 cc_query_allowed[0x1]; 704 u8 cc_query_allowed[0x1];
705 u8 cc_modify_allowed[0x1]; 705 u8 cc_modify_allowed[0x1];
706 u8 reserved_15[0xd]; 706 u8 reserved_at_163[0xd];
707 u8 gid_table_size[0x10]; 707 u8 gid_table_size[0x10];
708 708
709 u8 out_of_seq_cnt[0x1]; 709 u8 out_of_seq_cnt[0x1];
710 u8 vport_counters[0x1]; 710 u8 vport_counters[0x1];
711 u8 reserved_16[0x4]; 711 u8 reserved_at_182[0x4];
712 u8 max_qp_cnt[0xa]; 712 u8 max_qp_cnt[0xa];
713 u8 pkey_table_size[0x10]; 713 u8 pkey_table_size[0x10];
714 714
@@ -716,158 +716,158 @@ struct mlx5_ifc_cmd_hca_cap_bits {
716 u8 vhca_group_manager[0x1]; 716 u8 vhca_group_manager[0x1];
717 u8 ib_virt[0x1]; 717 u8 ib_virt[0x1];
718 u8 eth_virt[0x1]; 718 u8 eth_virt[0x1];
719 u8 reserved_17[0x1]; 719 u8 reserved_at_1a4[0x1];
720 u8 ets[0x1]; 720 u8 ets[0x1];
721 u8 nic_flow_table[0x1]; 721 u8 nic_flow_table[0x1];
722 u8 eswitch_flow_table[0x1]; 722 u8 eswitch_flow_table[0x1];
723 u8 early_vf_enable; 723 u8 early_vf_enable;
724 u8 reserved_18[0x2]; 724 u8 reserved_at_1a8[0x2];
725 u8 local_ca_ack_delay[0x5]; 725 u8 local_ca_ack_delay[0x5];
726 u8 reserved_19[0x6]; 726 u8 reserved_at_1af[0x6];
727 u8 port_type[0x2]; 727 u8 port_type[0x2];
728 u8 num_ports[0x8]; 728 u8 num_ports[0x8];
729 729
730 u8 reserved_20[0x3]; 730 u8 reserved_at_1bf[0x3];
731 u8 log_max_msg[0x5]; 731 u8 log_max_msg[0x5];
732 u8 reserved_21[0x18]; 732 u8 reserved_at_1c7[0x18];
733 733
734 u8 stat_rate_support[0x10]; 734 u8 stat_rate_support[0x10];
735 u8 reserved_22[0xc]; 735 u8 reserved_at_1ef[0xc];
736 u8 cqe_version[0x4]; 736 u8 cqe_version[0x4];
737 737
738 u8 compact_address_vector[0x1]; 738 u8 compact_address_vector[0x1];
739 u8 reserved_23[0xe]; 739 u8 reserved_at_200[0xe];
740 u8 drain_sigerr[0x1]; 740 u8 drain_sigerr[0x1];
741 u8 cmdif_checksum[0x2]; 741 u8 cmdif_checksum[0x2];
742 u8 sigerr_cqe[0x1]; 742 u8 sigerr_cqe[0x1];
743 u8 reserved_24[0x1]; 743 u8 reserved_at_212[0x1];
744 u8 wq_signature[0x1]; 744 u8 wq_signature[0x1];
745 u8 sctr_data_cqe[0x1]; 745 u8 sctr_data_cqe[0x1];
746 u8 reserved_25[0x1]; 746 u8 reserved_at_215[0x1];
747 u8 sho[0x1]; 747 u8 sho[0x1];
748 u8 tph[0x1]; 748 u8 tph[0x1];
749 u8 rf[0x1]; 749 u8 rf[0x1];
750 u8 dct[0x1]; 750 u8 dct[0x1];
751 u8 reserved_26[0x1]; 751 u8 reserved_at_21a[0x1];
752 u8 eth_net_offloads[0x1]; 752 u8 eth_net_offloads[0x1];
753 u8 roce[0x1]; 753 u8 roce[0x1];
754 u8 atomic[0x1]; 754 u8 atomic[0x1];
755 u8 reserved_27[0x1]; 755 u8 reserved_at_21e[0x1];
756 756
757 u8 cq_oi[0x1]; 757 u8 cq_oi[0x1];
758 u8 cq_resize[0x1]; 758 u8 cq_resize[0x1];
759 u8 cq_moderation[0x1]; 759 u8 cq_moderation[0x1];
760 u8 reserved_28[0x3]; 760 u8 reserved_at_222[0x3];
761 u8 cq_eq_remap[0x1]; 761 u8 cq_eq_remap[0x1];
762 u8 pg[0x1]; 762 u8 pg[0x1];
763 u8 block_lb_mc[0x1]; 763 u8 block_lb_mc[0x1];
764 u8 reserved_29[0x1]; 764 u8 reserved_at_228[0x1];
765 u8 scqe_break_moderation[0x1]; 765 u8 scqe_break_moderation[0x1];
766 u8 reserved_30[0x1]; 766 u8 reserved_at_22a[0x1];
767 u8 cd[0x1]; 767 u8 cd[0x1];
768 u8 reserved_31[0x1]; 768 u8 reserved_at_22c[0x1];
769 u8 apm[0x1]; 769 u8 apm[0x1];
770 u8 reserved_32[0x7]; 770 u8 reserved_at_22e[0x7];
771 u8 qkv[0x1]; 771 u8 qkv[0x1];
772 u8 pkv[0x1]; 772 u8 pkv[0x1];
773 u8 reserved_33[0x4]; 773 u8 reserved_at_237[0x4];
774 u8 xrc[0x1]; 774 u8 xrc[0x1];
775 u8 ud[0x1]; 775 u8 ud[0x1];
776 u8 uc[0x1]; 776 u8 uc[0x1];
777 u8 rc[0x1]; 777 u8 rc[0x1];
778 778
779 u8 reserved_34[0xa]; 779 u8 reserved_at_23f[0xa];
780 u8 uar_sz[0x6]; 780 u8 uar_sz[0x6];
781 u8 reserved_35[0x8]; 781 u8 reserved_at_24f[0x8];
782 u8 log_pg_sz[0x8]; 782 u8 log_pg_sz[0x8];
783 783
784 u8 bf[0x1]; 784 u8 bf[0x1];
785 u8 reserved_36[0x1]; 785 u8 reserved_at_260[0x1];
786 u8 pad_tx_eth_packet[0x1]; 786 u8 pad_tx_eth_packet[0x1];
787 u8 reserved_37[0x8]; 787 u8 reserved_at_262[0x8];
788 u8 log_bf_reg_size[0x5]; 788 u8 log_bf_reg_size[0x5];
789 u8 reserved_38[0x10]; 789 u8 reserved_at_26f[0x10];
790 790
791 u8 reserved_39[0x10]; 791 u8 reserved_at_27f[0x10];
792 u8 max_wqe_sz_sq[0x10]; 792 u8 max_wqe_sz_sq[0x10];
793 793
794 u8 reserved_40[0x10]; 794 u8 reserved_at_29f[0x10];
795 u8 max_wqe_sz_rq[0x10]; 795 u8 max_wqe_sz_rq[0x10];
796 796
797 u8 reserved_41[0x10]; 797 u8 reserved_at_2bf[0x10];
798 u8 max_wqe_sz_sq_dc[0x10]; 798 u8 max_wqe_sz_sq_dc[0x10];
799 799
800 u8 reserved_42[0x7]; 800 u8 reserved_at_2df[0x7];
801 u8 max_qp_mcg[0x19]; 801 u8 max_qp_mcg[0x19];
802 802
803 u8 reserved_43[0x18]; 803 u8 reserved_at_2ff[0x18];
804 u8 log_max_mcg[0x8]; 804 u8 log_max_mcg[0x8];
805 805
806 u8 reserved_44[0x3]; 806 u8 reserved_at_31f[0x3];
807 u8 log_max_transport_domain[0x5]; 807 u8 log_max_transport_domain[0x5];
808 u8 reserved_45[0x3]; 808 u8 reserved_at_327[0x3];
809 u8 log_max_pd[0x5]; 809 u8 log_max_pd[0x5];
810 u8 reserved_46[0xb]; 810 u8 reserved_at_32f[0xb];
811 u8 log_max_xrcd[0x5]; 811 u8 log_max_xrcd[0x5];
812 812
813 u8 reserved_47[0x20]; 813 u8 reserved_at_33f[0x20];
814 814
815 u8 reserved_48[0x3]; 815 u8 reserved_at_35f[0x3];
816 u8 log_max_rq[0x5]; 816 u8 log_max_rq[0x5];
817 u8 reserved_49[0x3]; 817 u8 reserved_at_367[0x3];
818 u8 log_max_sq[0x5]; 818 u8 log_max_sq[0x5];
819 u8 reserved_50[0x3]; 819 u8 reserved_at_36f[0x3];
820 u8 log_max_tir[0x5]; 820 u8 log_max_tir[0x5];
821 u8 reserved_51[0x3]; 821 u8 reserved_at_377[0x3];
822 u8 log_max_tis[0x5]; 822 u8 log_max_tis[0x5];
823 823
824 u8 basic_cyclic_rcv_wqe[0x1]; 824 u8 basic_cyclic_rcv_wqe[0x1];
825 u8 reserved_52[0x2]; 825 u8 reserved_at_380[0x2];
826 u8 log_max_rmp[0x5]; 826 u8 log_max_rmp[0x5];
827 u8 reserved_53[0x3]; 827 u8 reserved_at_387[0x3];
828 u8 log_max_rqt[0x5]; 828 u8 log_max_rqt[0x5];
829 u8 reserved_54[0x3]; 829 u8 reserved_at_38f[0x3];
830 u8 log_max_rqt_size[0x5]; 830 u8 log_max_rqt_size[0x5];
831 u8 reserved_55[0x3]; 831 u8 reserved_at_397[0x3];
832 u8 log_max_tis_per_sq[0x5]; 832 u8 log_max_tis_per_sq[0x5];
833 833
834 u8 reserved_56[0x3]; 834 u8 reserved_at_39f[0x3];
835 u8 log_max_stride_sz_rq[0x5]; 835 u8 log_max_stride_sz_rq[0x5];
836 u8 reserved_57[0x3]; 836 u8 reserved_at_3a7[0x3];
837 u8 log_min_stride_sz_rq[0x5]; 837 u8 log_min_stride_sz_rq[0x5];
838 u8 reserved_58[0x3]; 838 u8 reserved_at_3af[0x3];
839 u8 log_max_stride_sz_sq[0x5]; 839 u8 log_max_stride_sz_sq[0x5];
840 u8 reserved_59[0x3]; 840 u8 reserved_at_3b7[0x3];
841 u8 log_min_stride_sz_sq[0x5]; 841 u8 log_min_stride_sz_sq[0x5];
842 842
843 u8 reserved_60[0x1b]; 843 u8 reserved_at_3bf[0x1b];
844 u8 log_max_wq_sz[0x5]; 844 u8 log_max_wq_sz[0x5];
845 845
846 u8 nic_vport_change_event[0x1]; 846 u8 nic_vport_change_event[0x1];
847 u8 reserved_61[0xa]; 847 u8 reserved_at_3e0[0xa];
848 u8 log_max_vlan_list[0x5]; 848 u8 log_max_vlan_list[0x5];
849 u8 reserved_62[0x3]; 849 u8 reserved_at_3ef[0x3];
850 u8 log_max_current_mc_list[0x5]; 850 u8 log_max_current_mc_list[0x5];
851 u8 reserved_63[0x3]; 851 u8 reserved_at_3f7[0x3];
852 u8 log_max_current_uc_list[0x5]; 852 u8 log_max_current_uc_list[0x5];
853 853
854 u8 reserved_64[0x80]; 854 u8 reserved_at_3ff[0x80];
855 855
856 u8 reserved_65[0x3]; 856 u8 reserved_at_47f[0x3];
857 u8 log_max_l2_table[0x5]; 857 u8 log_max_l2_table[0x5];
858 u8 reserved_66[0x8]; 858 u8 reserved_at_487[0x8];
859 u8 log_uar_page_sz[0x10]; 859 u8 log_uar_page_sz[0x10];
860 860
861 u8 reserved_67[0x20]; 861 u8 reserved_at_49f[0x20];
862 u8 device_frequency_mhz[0x20]; 862 u8 device_frequency_mhz[0x20];
863 u8 device_frequency_khz[0x20]; 863 u8 device_frequency_khz[0x20];
864 u8 reserved_68[0x5f]; 864 u8 reserved_at_4ff[0x5f];
865 u8 cqe_zip[0x1]; 865 u8 cqe_zip[0x1];
866 866
867 u8 cqe_zip_timeout[0x10]; 867 u8 cqe_zip_timeout[0x10];
868 u8 cqe_zip_max_num[0x10]; 868 u8 cqe_zip_max_num[0x10];
869 869
870 u8 reserved_69[0x220]; 870 u8 reserved_at_57f[0x220];
871}; 871};
872 872
873enum mlx5_flow_destination_type { 873enum mlx5_flow_destination_type {
@@ -880,7 +880,7 @@ struct mlx5_ifc_dest_format_struct_bits {
880 u8 destination_type[0x8]; 880 u8 destination_type[0x8];
881 u8 destination_id[0x18]; 881 u8 destination_id[0x18];
882 882
883 u8 reserved_0[0x20]; 883 u8 reserved_at_20[0x20];
884}; 884};
885 885
886struct mlx5_ifc_fte_match_param_bits { 886struct mlx5_ifc_fte_match_param_bits {
@@ -890,7 +890,7 @@ struct mlx5_ifc_fte_match_param_bits {
890 890
891 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; 891 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
892 892
893 u8 reserved_0[0xa00]; 893 u8 reserved_at_600[0xa00];
894}; 894};
895 895
896enum { 896enum {
@@ -922,18 +922,18 @@ struct mlx5_ifc_wq_bits {
922 u8 wq_signature[0x1]; 922 u8 wq_signature[0x1];
923 u8 end_padding_mode[0x2]; 923 u8 end_padding_mode[0x2];
924 u8 cd_slave[0x1]; 924 u8 cd_slave[0x1];
925 u8 reserved_0[0x18]; 925 u8 reserved_at_8[0x18];
926 926
927 u8 hds_skip_first_sge[0x1]; 927 u8 hds_skip_first_sge[0x1];
928 u8 log2_hds_buf_size[0x3]; 928 u8 log2_hds_buf_size[0x3];
929 u8 reserved_1[0x7]; 929 u8 reserved_at_24[0x7];
930 u8 page_offset[0x5]; 930 u8 page_offset[0x5];
931 u8 lwm[0x10]; 931 u8 lwm[0x10];
932 932
933 u8 reserved_2[0x8]; 933 u8 reserved_at_40[0x8];
934 u8 pd[0x18]; 934 u8 pd[0x18];
935 935
936 u8 reserved_3[0x8]; 936 u8 reserved_at_60[0x8];
937 u8 uar_page[0x18]; 937 u8 uar_page[0x18];
938 938
939 u8 dbr_addr[0x40]; 939 u8 dbr_addr[0x40];
@@ -942,60 +942,60 @@ struct mlx5_ifc_wq_bits {
942 942
943 u8 sw_counter[0x20]; 943 u8 sw_counter[0x20];
944 944
945 u8 reserved_4[0xc]; 945 u8 reserved_at_100[0xc];
946 u8 log_wq_stride[0x4]; 946 u8 log_wq_stride[0x4];
947 u8 reserved_5[0x3]; 947 u8 reserved_at_110[0x3];
948 u8 log_wq_pg_sz[0x5]; 948 u8 log_wq_pg_sz[0x5];
949 u8 reserved_6[0x3]; 949 u8 reserved_at_118[0x3];
950 u8 log_wq_sz[0x5]; 950 u8 log_wq_sz[0x5];
951 951
952 u8 reserved_7[0x4e0]; 952 u8 reserved_at_120[0x4e0];
953 953
954 struct mlx5_ifc_cmd_pas_bits pas[0]; 954 struct mlx5_ifc_cmd_pas_bits pas[0];
955}; 955};
956 956
957struct mlx5_ifc_rq_num_bits { 957struct mlx5_ifc_rq_num_bits {
958 u8 reserved_0[0x8]; 958 u8 reserved_at_0[0x8];
959 u8 rq_num[0x18]; 959 u8 rq_num[0x18];
960}; 960};
961 961
962struct mlx5_ifc_mac_address_layout_bits { 962struct mlx5_ifc_mac_address_layout_bits {
963 u8 reserved_0[0x10]; 963 u8 reserved_at_0[0x10];
964 u8 mac_addr_47_32[0x10]; 964 u8 mac_addr_47_32[0x10];
965 965
966 u8 mac_addr_31_0[0x20]; 966 u8 mac_addr_31_0[0x20];
967}; 967};
968 968
969struct mlx5_ifc_vlan_layout_bits { 969struct mlx5_ifc_vlan_layout_bits {
970 u8 reserved_0[0x14]; 970 u8 reserved_at_0[0x14];
971 u8 vlan[0x0c]; 971 u8 vlan[0x0c];
972 972
973 u8 reserved_1[0x20]; 973 u8 reserved_at_20[0x20];
974}; 974};
975 975
976struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { 976struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
977 u8 reserved_0[0xa0]; 977 u8 reserved_at_0[0xa0];
978 978
979 u8 min_time_between_cnps[0x20]; 979 u8 min_time_between_cnps[0x20];
980 980
981 u8 reserved_1[0x12]; 981 u8 reserved_at_c0[0x12];
982 u8 cnp_dscp[0x6]; 982 u8 cnp_dscp[0x6];
983 u8 reserved_2[0x5]; 983 u8 reserved_at_d8[0x5];
984 u8 cnp_802p_prio[0x3]; 984 u8 cnp_802p_prio[0x3];
985 985
986 u8 reserved_3[0x720]; 986 u8 reserved_at_e0[0x720];
987}; 987};
988 988
989struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { 989struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
990 u8 reserved_0[0x60]; 990 u8 reserved_at_0[0x60];
991 991
992 u8 reserved_1[0x4]; 992 u8 reserved_at_60[0x4];
993 u8 clamp_tgt_rate[0x1]; 993 u8 clamp_tgt_rate[0x1];
994 u8 reserved_2[0x3]; 994 u8 reserved_at_65[0x3];
995 u8 clamp_tgt_rate_after_time_inc[0x1]; 995 u8 clamp_tgt_rate_after_time_inc[0x1];
996 u8 reserved_3[0x17]; 996 u8 reserved_at_69[0x17];
997 997
998 u8 reserved_4[0x20]; 998 u8 reserved_at_80[0x20];
999 999
1000 u8 rpg_time_reset[0x20]; 1000 u8 rpg_time_reset[0x20];
1001 1001
@@ -1015,7 +1015,7 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
1015 1015
1016 u8 rpg_min_rate[0x20]; 1016 u8 rpg_min_rate[0x20];
1017 1017
1018 u8 reserved_5[0xe0]; 1018 u8 reserved_at_1c0[0xe0];
1019 1019
1020 u8 rate_to_set_on_first_cnp[0x20]; 1020 u8 rate_to_set_on_first_cnp[0x20];
1021 1021
@@ -1025,15 +1025,15 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
1025 1025
1026 u8 rate_reduce_monitor_period[0x20]; 1026 u8 rate_reduce_monitor_period[0x20];
1027 1027
1028 u8 reserved_6[0x20]; 1028 u8 reserved_at_320[0x20];
1029 1029
1030 u8 initial_alpha_value[0x20]; 1030 u8 initial_alpha_value[0x20];
1031 1031
1032 u8 reserved_7[0x4a0]; 1032 u8 reserved_at_360[0x4a0];
1033}; 1033};
1034 1034
1035struct mlx5_ifc_cong_control_802_1qau_rp_bits { 1035struct mlx5_ifc_cong_control_802_1qau_rp_bits {
1036 u8 reserved_0[0x80]; 1036 u8 reserved_at_0[0x80];
1037 1037
1038 u8 rppp_max_rps[0x20]; 1038 u8 rppp_max_rps[0x20];
1039 1039
@@ -1055,7 +1055,7 @@ struct mlx5_ifc_cong_control_802_1qau_rp_bits {
1055 1055
1056 u8 rpg_min_rate[0x20]; 1056 u8 rpg_min_rate[0x20];
1057 1057
1058 u8 reserved_1[0x640]; 1058 u8 reserved_at_1c0[0x640];
1059}; 1059};
1060 1060
1061enum { 1061enum {
@@ -1205,7 +1205,7 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
1205 1205
1206 u8 successful_recovery_events[0x20]; 1206 u8 successful_recovery_events[0x20];
1207 1207
1208 u8 reserved_0[0x180]; 1208 u8 reserved_at_640[0x180];
1209}; 1209};
1210 1210
1211struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { 1211struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
@@ -1213,7 +1213,7 @@ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
1213 1213
1214 u8 transmit_queue_low[0x20]; 1214 u8 transmit_queue_low[0x20];
1215 1215
1216 u8 reserved_0[0x780]; 1216 u8 reserved_at_40[0x780];
1217}; 1217};
1218 1218
1219struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { 1219struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
@@ -1221,7 +1221,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1221 1221
1222 u8 rx_octets_low[0x20]; 1222 u8 rx_octets_low[0x20];
1223 1223
1224 u8 reserved_0[0xc0]; 1224 u8 reserved_at_40[0xc0];
1225 1225
1226 u8 rx_frames_high[0x20]; 1226 u8 rx_frames_high[0x20];
1227 1227
@@ -1231,7 +1231,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1231 1231
1232 u8 tx_octets_low[0x20]; 1232 u8 tx_octets_low[0x20];
1233 1233
1234 u8 reserved_1[0xc0]; 1234 u8 reserved_at_180[0xc0];
1235 1235
1236 u8 tx_frames_high[0x20]; 1236 u8 tx_frames_high[0x20];
1237 1237
@@ -1257,7 +1257,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1257 1257
1258 u8 rx_pause_transition_low[0x20]; 1258 u8 rx_pause_transition_low[0x20];
1259 1259
1260 u8 reserved_2[0x400]; 1260 u8 reserved_at_3c0[0x400];
1261}; 1261};
1262 1262
1263struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { 1263struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -1265,7 +1265,7 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
1265 1265
1266 u8 port_transmit_wait_low[0x20]; 1266 u8 port_transmit_wait_low[0x20];
1267 1267
1268 u8 reserved_0[0x780]; 1268 u8 reserved_at_40[0x780];
1269}; 1269};
1270 1270
1271struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { 1271struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -1333,7 +1333,7 @@ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
1333 1333
1334 u8 dot3out_pause_frames_low[0x20]; 1334 u8 dot3out_pause_frames_low[0x20];
1335 1335
1336 u8 reserved_0[0x3c0]; 1336 u8 reserved_at_400[0x3c0];
1337}; 1337};
1338 1338
1339struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { 1339struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
@@ -1421,7 +1421,7 @@ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
1421 1421
1422 u8 ether_stats_pkts8192to10239octets_low[0x20]; 1422 u8 ether_stats_pkts8192to10239octets_low[0x20];
1423 1423
1424 u8 reserved_0[0x280]; 1424 u8 reserved_at_540[0x280];
1425}; 1425};
1426 1426
1427struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { 1427struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
@@ -1477,7 +1477,7 @@ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
1477 1477
1478 u8 if_out_broadcast_pkts_low[0x20]; 1478 u8 if_out_broadcast_pkts_low[0x20];
1479 1479
1480 u8 reserved_0[0x480]; 1480 u8 reserved_at_340[0x480];
1481}; 1481};
1482 1482
1483struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { 1483struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
@@ -1557,54 +1557,54 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
1557 1557
1558 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; 1558 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
1559 1559
1560 u8 reserved_0[0x300]; 1560 u8 reserved_at_4c0[0x300];
1561}; 1561};
1562 1562
1563struct mlx5_ifc_cmd_inter_comp_event_bits { 1563struct mlx5_ifc_cmd_inter_comp_event_bits {
1564 u8 command_completion_vector[0x20]; 1564 u8 command_completion_vector[0x20];
1565 1565
1566 u8 reserved_0[0xc0]; 1566 u8 reserved_at_20[0xc0];
1567}; 1567};
1568 1568
1569struct mlx5_ifc_stall_vl_event_bits { 1569struct mlx5_ifc_stall_vl_event_bits {
1570 u8 reserved_0[0x18]; 1570 u8 reserved_at_0[0x18];
1571 u8 port_num[0x1]; 1571 u8 port_num[0x1];
1572 u8 reserved_1[0x3]; 1572 u8 reserved_at_19[0x3];
1573 u8 vl[0x4]; 1573 u8 vl[0x4];
1574 1574
1575 u8 reserved_2[0xa0]; 1575 u8 reserved_at_20[0xa0];
1576}; 1576};
1577 1577
1578struct mlx5_ifc_db_bf_congestion_event_bits { 1578struct mlx5_ifc_db_bf_congestion_event_bits {
1579 u8 event_subtype[0x8]; 1579 u8 event_subtype[0x8];
1580 u8 reserved_0[0x8]; 1580 u8 reserved_at_8[0x8];
1581 u8 congestion_level[0x8]; 1581 u8 congestion_level[0x8];
1582 u8 reserved_1[0x8]; 1582 u8 reserved_at_18[0x8];
1583 1583
1584 u8 reserved_2[0xa0]; 1584 u8 reserved_at_20[0xa0];
1585}; 1585};
1586 1586
1587struct mlx5_ifc_gpio_event_bits { 1587struct mlx5_ifc_gpio_event_bits {
1588 u8 reserved_0[0x60]; 1588 u8 reserved_at_0[0x60];
1589 1589
1590 u8 gpio_event_hi[0x20]; 1590 u8 gpio_event_hi[0x20];
1591 1591
1592 u8 gpio_event_lo[0x20]; 1592 u8 gpio_event_lo[0x20];
1593 1593
1594 u8 reserved_1[0x40]; 1594 u8 reserved_at_a0[0x40];
1595}; 1595};
1596 1596
1597struct mlx5_ifc_port_state_change_event_bits { 1597struct mlx5_ifc_port_state_change_event_bits {
1598 u8 reserved_0[0x40]; 1598 u8 reserved_at_0[0x40];
1599 1599
1600 u8 port_num[0x4]; 1600 u8 port_num[0x4];
1601 u8 reserved_1[0x1c]; 1601 u8 reserved_at_44[0x1c];
1602 1602
1603 u8 reserved_2[0x80]; 1603 u8 reserved_at_60[0x80];
1604}; 1604};
1605 1605
1606struct mlx5_ifc_dropped_packet_logged_bits { 1606struct mlx5_ifc_dropped_packet_logged_bits {
1607 u8 reserved_0[0xe0]; 1607 u8 reserved_at_0[0xe0];
1608}; 1608};
1609 1609
1610enum { 1610enum {
@@ -1613,15 +1613,15 @@ enum {
1613}; 1613};
1614 1614
1615struct mlx5_ifc_cq_error_bits { 1615struct mlx5_ifc_cq_error_bits {
1616 u8 reserved_0[0x8]; 1616 u8 reserved_at_0[0x8];
1617 u8 cqn[0x18]; 1617 u8 cqn[0x18];
1618 1618
1619 u8 reserved_1[0x20]; 1619 u8 reserved_at_20[0x20];
1620 1620
1621 u8 reserved_2[0x18]; 1621 u8 reserved_at_40[0x18];
1622 u8 syndrome[0x8]; 1622 u8 syndrome[0x8];
1623 1623
1624 u8 reserved_3[0x80]; 1624 u8 reserved_at_60[0x80];
1625}; 1625};
1626 1626
1627struct mlx5_ifc_rdma_page_fault_event_bits { 1627struct mlx5_ifc_rdma_page_fault_event_bits {
@@ -1629,14 +1629,14 @@ struct mlx5_ifc_rdma_page_fault_event_bits {
1629 1629
1630 u8 r_key[0x20]; 1630 u8 r_key[0x20];
1631 1631
1632 u8 reserved_0[0x10]; 1632 u8 reserved_at_40[0x10];
1633 u8 packet_len[0x10]; 1633 u8 packet_len[0x10];
1634 1634
1635 u8 rdma_op_len[0x20]; 1635 u8 rdma_op_len[0x20];
1636 1636
1637 u8 rdma_va[0x40]; 1637 u8 rdma_va[0x40];
1638 1638
1639 u8 reserved_1[0x5]; 1639 u8 reserved_at_c0[0x5];
1640 u8 rdma[0x1]; 1640 u8 rdma[0x1];
1641 u8 write[0x1]; 1641 u8 write[0x1];
1642 u8 requestor[0x1]; 1642 u8 requestor[0x1];
@@ -1646,15 +1646,15 @@ struct mlx5_ifc_rdma_page_fault_event_bits {
1646struct mlx5_ifc_wqe_associated_page_fault_event_bits { 1646struct mlx5_ifc_wqe_associated_page_fault_event_bits {
1647 u8 bytes_committed[0x20]; 1647 u8 bytes_committed[0x20];
1648 1648
1649 u8 reserved_0[0x10]; 1649 u8 reserved_at_20[0x10];
1650 u8 wqe_index[0x10]; 1650 u8 wqe_index[0x10];
1651 1651
1652 u8 reserved_1[0x10]; 1652 u8 reserved_at_40[0x10];
1653 u8 len[0x10]; 1653 u8 len[0x10];
1654 1654
1655 u8 reserved_2[0x60]; 1655 u8 reserved_at_60[0x60];
1656 1656
1657 u8 reserved_3[0x5]; 1657 u8 reserved_at_c0[0x5];
1658 u8 rdma[0x1]; 1658 u8 rdma[0x1];
1659 u8 write_read[0x1]; 1659 u8 write_read[0x1];
1660 u8 requestor[0x1]; 1660 u8 requestor[0x1];
@@ -1662,26 +1662,26 @@ struct mlx5_ifc_wqe_associated_page_fault_event_bits {
1662}; 1662};
1663 1663
1664struct mlx5_ifc_qp_events_bits { 1664struct mlx5_ifc_qp_events_bits {
1665 u8 reserved_0[0xa0]; 1665 u8 reserved_at_0[0xa0];
1666 1666
1667 u8 type[0x8]; 1667 u8 type[0x8];
1668 u8 reserved_1[0x18]; 1668 u8 reserved_at_a8[0x18];
1669 1669
1670 u8 reserved_2[0x8]; 1670 u8 reserved_at_c0[0x8];
1671 u8 qpn_rqn_sqn[0x18]; 1671 u8 qpn_rqn_sqn[0x18];
1672}; 1672};
1673 1673
1674struct mlx5_ifc_dct_events_bits { 1674struct mlx5_ifc_dct_events_bits {
1675 u8 reserved_0[0xc0]; 1675 u8 reserved_at_0[0xc0];
1676 1676
1677 u8 reserved_1[0x8]; 1677 u8 reserved_at_c0[0x8];
1678 u8 dct_number[0x18]; 1678 u8 dct_number[0x18];
1679}; 1679};
1680 1680
1681struct mlx5_ifc_comp_event_bits { 1681struct mlx5_ifc_comp_event_bits {
1682 u8 reserved_0[0xc0]; 1682 u8 reserved_at_0[0xc0];
1683 1683
1684 u8 reserved_1[0x8]; 1684 u8 reserved_at_c0[0x8];
1685 u8 cq_number[0x18]; 1685 u8 cq_number[0x18];
1686}; 1686};
1687 1687
@@ -1754,41 +1754,41 @@ enum {
1754 1754
1755struct mlx5_ifc_qpc_bits { 1755struct mlx5_ifc_qpc_bits {
1756 u8 state[0x4]; 1756 u8 state[0x4];
1757 u8 reserved_0[0x4]; 1757 u8 reserved_at_4[0x4];
1758 u8 st[0x8]; 1758 u8 st[0x8];
1759 u8 reserved_1[0x3]; 1759 u8 reserved_at_10[0x3];
1760 u8 pm_state[0x2]; 1760 u8 pm_state[0x2];
1761 u8 reserved_2[0x7]; 1761 u8 reserved_at_15[0x7];
1762 u8 end_padding_mode[0x2]; 1762 u8 end_padding_mode[0x2];
1763 u8 reserved_3[0x2]; 1763 u8 reserved_at_1e[0x2];
1764 1764
1765 u8 wq_signature[0x1]; 1765 u8 wq_signature[0x1];
1766 u8 block_lb_mc[0x1]; 1766 u8 block_lb_mc[0x1];
1767 u8 atomic_like_write_en[0x1]; 1767 u8 atomic_like_write_en[0x1];
1768 u8 latency_sensitive[0x1]; 1768 u8 latency_sensitive[0x1];
1769 u8 reserved_4[0x1]; 1769 u8 reserved_at_24[0x1];
1770 u8 drain_sigerr[0x1]; 1770 u8 drain_sigerr[0x1];
1771 u8 reserved_5[0x2]; 1771 u8 reserved_at_26[0x2];
1772 u8 pd[0x18]; 1772 u8 pd[0x18];
1773 1773
1774 u8 mtu[0x3]; 1774 u8 mtu[0x3];
1775 u8 log_msg_max[0x5]; 1775 u8 log_msg_max[0x5];
1776 u8 reserved_6[0x1]; 1776 u8 reserved_at_48[0x1];
1777 u8 log_rq_size[0x4]; 1777 u8 log_rq_size[0x4];
1778 u8 log_rq_stride[0x3]; 1778 u8 log_rq_stride[0x3];
1779 u8 no_sq[0x1]; 1779 u8 no_sq[0x1];
1780 u8 log_sq_size[0x4]; 1780 u8 log_sq_size[0x4];
1781 u8 reserved_7[0x6]; 1781 u8 reserved_at_55[0x6];
1782 u8 rlky[0x1]; 1782 u8 rlky[0x1];
1783 u8 reserved_8[0x4]; 1783 u8 reserved_at_5c[0x4];
1784 1784
1785 u8 counter_set_id[0x8]; 1785 u8 counter_set_id[0x8];
1786 u8 uar_page[0x18]; 1786 u8 uar_page[0x18];
1787 1787
1788 u8 reserved_9[0x8]; 1788 u8 reserved_at_80[0x8];
1789 u8 user_index[0x18]; 1789 u8 user_index[0x18];
1790 1790
1791 u8 reserved_10[0x3]; 1791 u8 reserved_at_a0[0x3];
1792 u8 log_page_size[0x5]; 1792 u8 log_page_size[0x5];
1793 u8 remote_qpn[0x18]; 1793 u8 remote_qpn[0x18];
1794 1794
@@ -1797,66 +1797,66 @@ struct mlx5_ifc_qpc_bits {
1797 struct mlx5_ifc_ads_bits secondary_address_path; 1797 struct mlx5_ifc_ads_bits secondary_address_path;
1798 1798
1799 u8 log_ack_req_freq[0x4]; 1799 u8 log_ack_req_freq[0x4];
1800 u8 reserved_11[0x4]; 1800 u8 reserved_at_384[0x4];
1801 u8 log_sra_max[0x3]; 1801 u8 log_sra_max[0x3];
1802 u8 reserved_12[0x2]; 1802 u8 reserved_at_38b[0x2];
1803 u8 retry_count[0x3]; 1803 u8 retry_count[0x3];
1804 u8 rnr_retry[0x3]; 1804 u8 rnr_retry[0x3];
1805 u8 reserved_13[0x1]; 1805 u8 reserved_at_393[0x1];
1806 u8 fre[0x1]; 1806 u8 fre[0x1];
1807 u8 cur_rnr_retry[0x3]; 1807 u8 cur_rnr_retry[0x3];
1808 u8 cur_retry_count[0x3]; 1808 u8 cur_retry_count[0x3];
1809 u8 reserved_14[0x5]; 1809 u8 reserved_at_39b[0x5];
1810 1810
1811 u8 reserved_15[0x20]; 1811 u8 reserved_at_3a0[0x20];
1812 1812
1813 u8 reserved_16[0x8]; 1813 u8 reserved_at_3c0[0x8];
1814 u8 next_send_psn[0x18]; 1814 u8 next_send_psn[0x18];
1815 1815
1816 u8 reserved_17[0x8]; 1816 u8 reserved_at_3e0[0x8];
1817 u8 cqn_snd[0x18]; 1817 u8 cqn_snd[0x18];
1818 1818
1819 u8 reserved_18[0x40]; 1819 u8 reserved_at_400[0x40];
1820 1820
1821 u8 reserved_19[0x8]; 1821 u8 reserved_at_440[0x8];
1822 u8 last_acked_psn[0x18]; 1822 u8 last_acked_psn[0x18];
1823 1823
1824 u8 reserved_20[0x8]; 1824 u8 reserved_at_460[0x8];
1825 u8 ssn[0x18]; 1825 u8 ssn[0x18];
1826 1826
1827 u8 reserved_21[0x8]; 1827 u8 reserved_at_480[0x8];
1828 u8 log_rra_max[0x3]; 1828 u8 log_rra_max[0x3];
1829 u8 reserved_22[0x1]; 1829 u8 reserved_at_48b[0x1];
1830 u8 atomic_mode[0x4]; 1830 u8 atomic_mode[0x4];
1831 u8 rre[0x1]; 1831 u8 rre[0x1];
1832 u8 rwe[0x1]; 1832 u8 rwe[0x1];
1833 u8 rae[0x1]; 1833 u8 rae[0x1];
1834 u8 reserved_23[0x1]; 1834 u8 reserved_at_493[0x1];
1835 u8 page_offset[0x6]; 1835 u8 page_offset[0x6];
1836 u8 reserved_24[0x3]; 1836 u8 reserved_at_49a[0x3];
1837 u8 cd_slave_receive[0x1]; 1837 u8 cd_slave_receive[0x1];
1838 u8 cd_slave_send[0x1]; 1838 u8 cd_slave_send[0x1];
1839 u8 cd_master[0x1]; 1839 u8 cd_master[0x1];
1840 1840
1841 u8 reserved_25[0x3]; 1841 u8 reserved_at_4a0[0x3];
1842 u8 min_rnr_nak[0x5]; 1842 u8 min_rnr_nak[0x5];
1843 u8 next_rcv_psn[0x18]; 1843 u8 next_rcv_psn[0x18];
1844 1844
1845 u8 reserved_26[0x8]; 1845 u8 reserved_at_4c0[0x8];
1846 u8 xrcd[0x18]; 1846 u8 xrcd[0x18];
1847 1847
1848 u8 reserved_27[0x8]; 1848 u8 reserved_at_4e0[0x8];
1849 u8 cqn_rcv[0x18]; 1849 u8 cqn_rcv[0x18];
1850 1850
1851 u8 dbr_addr[0x40]; 1851 u8 dbr_addr[0x40];
1852 1852
1853 u8 q_key[0x20]; 1853 u8 q_key[0x20];
1854 1854
1855 u8 reserved_28[0x5]; 1855 u8 reserved_at_560[0x5];
1856 u8 rq_type[0x3]; 1856 u8 rq_type[0x3];
1857 u8 srqn_rmpn[0x18]; 1857 u8 srqn_rmpn[0x18];
1858 1858
1859 u8 reserved_29[0x8]; 1859 u8 reserved_at_580[0x8];
1860 u8 rmsn[0x18]; 1860 u8 rmsn[0x18];
1861 1861
1862 u8 hw_sq_wqebb_counter[0x10]; 1862 u8 hw_sq_wqebb_counter[0x10];
@@ -1866,33 +1866,33 @@ struct mlx5_ifc_qpc_bits {
1866 1866
1867 u8 sw_rq_counter[0x20]; 1867 u8 sw_rq_counter[0x20];
1868 1868
1869 u8 reserved_30[0x20]; 1869 u8 reserved_at_600[0x20];
1870 1870
1871 u8 reserved_31[0xf]; 1871 u8 reserved_at_620[0xf];
1872 u8 cgs[0x1]; 1872 u8 cgs[0x1];
1873 u8 cs_req[0x8]; 1873 u8 cs_req[0x8];
1874 u8 cs_res[0x8]; 1874 u8 cs_res[0x8];
1875 1875
1876 u8 dc_access_key[0x40]; 1876 u8 dc_access_key[0x40];
1877 1877
1878 u8 reserved_32[0xc0]; 1878 u8 reserved_at_680[0xc0];
1879}; 1879};
1880 1880
1881struct mlx5_ifc_roce_addr_layout_bits { 1881struct mlx5_ifc_roce_addr_layout_bits {
1882 u8 source_l3_address[16][0x8]; 1882 u8 source_l3_address[16][0x8];
1883 1883
1884 u8 reserved_0[0x3]; 1884 u8 reserved_at_80[0x3];
1885 u8 vlan_valid[0x1]; 1885 u8 vlan_valid[0x1];
1886 u8 vlan_id[0xc]; 1886 u8 vlan_id[0xc];
1887 u8 source_mac_47_32[0x10]; 1887 u8 source_mac_47_32[0x10];
1888 1888
1889 u8 source_mac_31_0[0x20]; 1889 u8 source_mac_31_0[0x20];
1890 1890
1891 u8 reserved_1[0x14]; 1891 u8 reserved_at_c0[0x14];
1892 u8 roce_l3_type[0x4]; 1892 u8 roce_l3_type[0x4];
1893 u8 roce_version[0x8]; 1893 u8 roce_version[0x8];
1894 1894
1895 u8 reserved_2[0x20]; 1895 u8 reserved_at_e0[0x20];
1896}; 1896};
1897 1897
1898union mlx5_ifc_hca_cap_union_bits { 1898union mlx5_ifc_hca_cap_union_bits {
@@ -1904,7 +1904,7 @@ union mlx5_ifc_hca_cap_union_bits {
1904 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; 1904 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
1905 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; 1905 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
1906 struct mlx5_ifc_e_switch_cap_bits e_switch_cap; 1906 struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
1907 u8 reserved_0[0x8000]; 1907 u8 reserved_at_0[0x8000];
1908}; 1908};
1909 1909
1910enum { 1910enum {
@@ -1914,24 +1914,24 @@ enum {
1914}; 1914};
1915 1915
1916struct mlx5_ifc_flow_context_bits { 1916struct mlx5_ifc_flow_context_bits {
1917 u8 reserved_0[0x20]; 1917 u8 reserved_at_0[0x20];
1918 1918
1919 u8 group_id[0x20]; 1919 u8 group_id[0x20];
1920 1920
1921 u8 reserved_1[0x8]; 1921 u8 reserved_at_40[0x8];
1922 u8 flow_tag[0x18]; 1922 u8 flow_tag[0x18];
1923 1923
1924 u8 reserved_2[0x10]; 1924 u8 reserved_at_60[0x10];
1925 u8 action[0x10]; 1925 u8 action[0x10];
1926 1926
1927 u8 reserved_3[0x8]; 1927 u8 reserved_at_80[0x8];
1928 u8 destination_list_size[0x18]; 1928 u8 destination_list_size[0x18];
1929 1929
1930 u8 reserved_4[0x160]; 1930 u8 reserved_at_a0[0x160];
1931 1931
1932 struct mlx5_ifc_fte_match_param_bits match_value; 1932 struct mlx5_ifc_fte_match_param_bits match_value;
1933 1933
1934 u8 reserved_5[0x600]; 1934 u8 reserved_at_1200[0x600];
1935 1935
1936 struct mlx5_ifc_dest_format_struct_bits destination[0]; 1936 struct mlx5_ifc_dest_format_struct_bits destination[0];
1937}; 1937};
@@ -1944,43 +1944,43 @@ enum {
1944struct mlx5_ifc_xrc_srqc_bits { 1944struct mlx5_ifc_xrc_srqc_bits {
1945 u8 state[0x4]; 1945 u8 state[0x4];
1946 u8 log_xrc_srq_size[0x4]; 1946 u8 log_xrc_srq_size[0x4];
1947 u8 reserved_0[0x18]; 1947 u8 reserved_at_8[0x18];
1948 1948
1949 u8 wq_signature[0x1]; 1949 u8 wq_signature[0x1];
1950 u8 cont_srq[0x1]; 1950 u8 cont_srq[0x1];
1951 u8 reserved_1[0x1]; 1951 u8 reserved_at_22[0x1];
1952 u8 rlky[0x1]; 1952 u8 rlky[0x1];
1953 u8 basic_cyclic_rcv_wqe[0x1]; 1953 u8 basic_cyclic_rcv_wqe[0x1];
1954 u8 log_rq_stride[0x3]; 1954 u8 log_rq_stride[0x3];
1955 u8 xrcd[0x18]; 1955 u8 xrcd[0x18];
1956 1956
1957 u8 page_offset[0x6]; 1957 u8 page_offset[0x6];
1958 u8 reserved_2[0x2]; 1958 u8 reserved_at_46[0x2];
1959 u8 cqn[0x18]; 1959 u8 cqn[0x18];
1960 1960
1961 u8 reserved_3[0x20]; 1961 u8 reserved_at_60[0x20];
1962 1962
1963 u8 user_index_equal_xrc_srqn[0x1]; 1963 u8 user_index_equal_xrc_srqn[0x1];
1964 u8 reserved_4[0x1]; 1964 u8 reserved_at_81[0x1];
1965 u8 log_page_size[0x6]; 1965 u8 log_page_size[0x6];
1966 u8 user_index[0x18]; 1966 u8 user_index[0x18];
1967 1967
1968 u8 reserved_5[0x20]; 1968 u8 reserved_at_a0[0x20];
1969 1969
1970 u8 reserved_6[0x8]; 1970 u8 reserved_at_c0[0x8];
1971 u8 pd[0x18]; 1971 u8 pd[0x18];
1972 1972
1973 u8 lwm[0x10]; 1973 u8 lwm[0x10];
1974 u8 wqe_cnt[0x10]; 1974 u8 wqe_cnt[0x10];
1975 1975
1976 u8 reserved_7[0x40]; 1976 u8 reserved_at_100[0x40];
1977 1977
1978 u8 db_record_addr_h[0x20]; 1978 u8 db_record_addr_h[0x20];
1979 1979
1980 u8 db_record_addr_l[0x1e]; 1980 u8 db_record_addr_l[0x1e];
1981 u8 reserved_8[0x2]; 1981 u8 reserved_at_17e[0x2];
1982 1982
1983 u8 reserved_9[0x80]; 1983 u8 reserved_at_180[0x80];
1984}; 1984};
1985 1985
1986struct mlx5_ifc_traffic_counter_bits { 1986struct mlx5_ifc_traffic_counter_bits {
@@ -1990,16 +1990,16 @@ struct mlx5_ifc_traffic_counter_bits {
1990}; 1990};
1991 1991
1992struct mlx5_ifc_tisc_bits { 1992struct mlx5_ifc_tisc_bits {
1993 u8 reserved_0[0xc]; 1993 u8 reserved_at_0[0xc];
1994 u8 prio[0x4]; 1994 u8 prio[0x4];
1995 u8 reserved_1[0x10]; 1995 u8 reserved_at_10[0x10];
1996 1996
1997 u8 reserved_2[0x100]; 1997 u8 reserved_at_20[0x100];
1998 1998
1999 u8 reserved_3[0x8]; 1999 u8 reserved_at_120[0x8];
2000 u8 transport_domain[0x18]; 2000 u8 transport_domain[0x18];
2001 2001
2002 u8 reserved_4[0x3c0]; 2002 u8 reserved_at_140[0x3c0];
2003}; 2003};
2004 2004
2005enum { 2005enum {
@@ -2024,31 +2024,31 @@ enum {
2024}; 2024};
2025 2025
2026struct mlx5_ifc_tirc_bits { 2026struct mlx5_ifc_tirc_bits {
2027 u8 reserved_0[0x20]; 2027 u8 reserved_at_0[0x20];
2028 2028
2029 u8 disp_type[0x4]; 2029 u8 disp_type[0x4];
2030 u8 reserved_1[0x1c]; 2030 u8 reserved_at_24[0x1c];
2031 2031
2032 u8 reserved_2[0x40]; 2032 u8 reserved_at_40[0x40];
2033 2033
2034 u8 reserved_3[0x4]; 2034 u8 reserved_at_80[0x4];
2035 u8 lro_timeout_period_usecs[0x10]; 2035 u8 lro_timeout_period_usecs[0x10];
2036 u8 lro_enable_mask[0x4]; 2036 u8 lro_enable_mask[0x4];
2037 u8 lro_max_ip_payload_size[0x8]; 2037 u8 lro_max_ip_payload_size[0x8];
2038 2038
2039 u8 reserved_4[0x40]; 2039 u8 reserved_at_a0[0x40];
2040 2040
2041 u8 reserved_5[0x8]; 2041 u8 reserved_at_e0[0x8];
2042 u8 inline_rqn[0x18]; 2042 u8 inline_rqn[0x18];
2043 2043
2044 u8 rx_hash_symmetric[0x1]; 2044 u8 rx_hash_symmetric[0x1];
2045 u8 reserved_6[0x1]; 2045 u8 reserved_at_101[0x1];
2046 u8 tunneled_offload_en[0x1]; 2046 u8 tunneled_offload_en[0x1];
2047 u8 reserved_7[0x5]; 2047 u8 reserved_at_103[0x5];
2048 u8 indirect_table[0x18]; 2048 u8 indirect_table[0x18];
2049 2049
2050 u8 rx_hash_fn[0x4]; 2050 u8 rx_hash_fn[0x4];
2051 u8 reserved_8[0x2]; 2051 u8 reserved_at_124[0x2];
2052 u8 self_lb_block[0x2]; 2052 u8 self_lb_block[0x2];
2053 u8 transport_domain[0x18]; 2053 u8 transport_domain[0x18];
2054 2054
@@ -2058,7 +2058,7 @@ struct mlx5_ifc_tirc_bits {
2058 2058
2059 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; 2059 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
2060 2060
2061 u8 reserved_9[0x4c0]; 2061 u8 reserved_at_2c0[0x4c0];
2062}; 2062};
2063 2063
2064enum { 2064enum {
@@ -2069,39 +2069,39 @@ enum {
2069struct mlx5_ifc_srqc_bits { 2069struct mlx5_ifc_srqc_bits {
2070 u8 state[0x4]; 2070 u8 state[0x4];
2071 u8 log_srq_size[0x4]; 2071 u8 log_srq_size[0x4];
2072 u8 reserved_0[0x18]; 2072 u8 reserved_at_8[0x18];
2073 2073
2074 u8 wq_signature[0x1]; 2074 u8 wq_signature[0x1];
2075 u8 cont_srq[0x1]; 2075 u8 cont_srq[0x1];
2076 u8 reserved_1[0x1]; 2076 u8 reserved_at_22[0x1];
2077 u8 rlky[0x1]; 2077 u8 rlky[0x1];
2078 u8 reserved_2[0x1]; 2078 u8 reserved_at_24[0x1];
2079 u8 log_rq_stride[0x3]; 2079 u8 log_rq_stride[0x3];
2080 u8 xrcd[0x18]; 2080 u8 xrcd[0x18];
2081 2081
2082 u8 page_offset[0x6]; 2082 u8 page_offset[0x6];
2083 u8 reserved_3[0x2]; 2083 u8 reserved_at_46[0x2];
2084 u8 cqn[0x18]; 2084 u8 cqn[0x18];
2085 2085
2086 u8 reserved_4[0x20]; 2086 u8 reserved_at_60[0x20];
2087 2087
2088 u8 reserved_5[0x2]; 2088 u8 reserved_at_80[0x2];
2089 u8 log_page_size[0x6]; 2089 u8 log_page_size[0x6];
2090 u8 reserved_6[0x18]; 2090 u8 reserved_at_88[0x18];
2091 2091
2092 u8 reserved_7[0x20]; 2092 u8 reserved_at_a0[0x20];
2093 2093
2094 u8 reserved_8[0x8]; 2094 u8 reserved_at_c0[0x8];
2095 u8 pd[0x18]; 2095 u8 pd[0x18];
2096 2096
2097 u8 lwm[0x10]; 2097 u8 lwm[0x10];
2098 u8 wqe_cnt[0x10]; 2098 u8 wqe_cnt[0x10];
2099 2099
2100 u8 reserved_9[0x40]; 2100 u8 reserved_at_100[0x40];
2101 2101
2102 u8 dbr_addr[0x40]; 2102 u8 dbr_addr[0x40];
2103 2103
2104 u8 reserved_10[0x80]; 2104 u8 reserved_at_180[0x80];
2105}; 2105};
2106 2106
2107enum { 2107enum {
@@ -2115,39 +2115,39 @@ struct mlx5_ifc_sqc_bits {
2115 u8 cd_master[0x1]; 2115 u8 cd_master[0x1];
2116 u8 fre[0x1]; 2116 u8 fre[0x1];
2117 u8 flush_in_error_en[0x1]; 2117 u8 flush_in_error_en[0x1];
2118 u8 reserved_0[0x4]; 2118 u8 reserved_at_4[0x4];
2119 u8 state[0x4]; 2119 u8 state[0x4];
2120 u8 reserved_1[0x14]; 2120 u8 reserved_at_c[0x14];
2121 2121
2122 u8 reserved_2[0x8]; 2122 u8 reserved_at_20[0x8];
2123 u8 user_index[0x18]; 2123 u8 user_index[0x18];
2124 2124
2125 u8 reserved_3[0x8]; 2125 u8 reserved_at_40[0x8];
2126 u8 cqn[0x18]; 2126 u8 cqn[0x18];
2127 2127
2128 u8 reserved_4[0xa0]; 2128 u8 reserved_at_60[0xa0];
2129 2129
2130 u8 tis_lst_sz[0x10]; 2130 u8 tis_lst_sz[0x10];
2131 u8 reserved_5[0x10]; 2131 u8 reserved_at_110[0x10];
2132 2132
2133 u8 reserved_6[0x40]; 2133 u8 reserved_at_120[0x40];
2134 2134
2135 u8 reserved_7[0x8]; 2135 u8 reserved_at_160[0x8];
2136 u8 tis_num_0[0x18]; 2136 u8 tis_num_0[0x18];
2137 2137
2138 struct mlx5_ifc_wq_bits wq; 2138 struct mlx5_ifc_wq_bits wq;
2139}; 2139};
2140 2140
2141struct mlx5_ifc_rqtc_bits { 2141struct mlx5_ifc_rqtc_bits {
2142 u8 reserved_0[0xa0]; 2142 u8 reserved_at_0[0xa0];
2143 2143
2144 u8 reserved_1[0x10]; 2144 u8 reserved_at_a0[0x10];
2145 u8 rqt_max_size[0x10]; 2145 u8 rqt_max_size[0x10];
2146 2146
2147 u8 reserved_2[0x10]; 2147 u8 reserved_at_c0[0x10];
2148 u8 rqt_actual_size[0x10]; 2148 u8 rqt_actual_size[0x10];
2149 2149
2150 u8 reserved_3[0x6a0]; 2150 u8 reserved_at_e0[0x6a0];
2151 2151
2152 struct mlx5_ifc_rq_num_bits rq_num[0]; 2152 struct mlx5_ifc_rq_num_bits rq_num[0];
2153}; 2153};
@@ -2165,27 +2165,27 @@ enum {
2165 2165
2166struct mlx5_ifc_rqc_bits { 2166struct mlx5_ifc_rqc_bits {
2167 u8 rlky[0x1]; 2167 u8 rlky[0x1];
2168 u8 reserved_0[0x2]; 2168 u8 reserved_at_1[0x2];
2169 u8 vsd[0x1]; 2169 u8 vsd[0x1];
2170 u8 mem_rq_type[0x4]; 2170 u8 mem_rq_type[0x4];
2171 u8 state[0x4]; 2171 u8 state[0x4];
2172 u8 reserved_1[0x1]; 2172 u8 reserved_at_c[0x1];
2173 u8 flush_in_error_en[0x1]; 2173 u8 flush_in_error_en[0x1];
2174 u8 reserved_2[0x12]; 2174 u8 reserved_at_e[0x12];
2175 2175
2176 u8 reserved_3[0x8]; 2176 u8 reserved_at_20[0x8];
2177 u8 user_index[0x18]; 2177 u8 user_index[0x18];
2178 2178
2179 u8 reserved_4[0x8]; 2179 u8 reserved_at_40[0x8];
2180 u8 cqn[0x18]; 2180 u8 cqn[0x18];
2181 2181
2182 u8 counter_set_id[0x8]; 2182 u8 counter_set_id[0x8];
2183 u8 reserved_5[0x18]; 2183 u8 reserved_at_68[0x18];
2184 2184
2185 u8 reserved_6[0x8]; 2185 u8 reserved_at_80[0x8];
2186 u8 rmpn[0x18]; 2186 u8 rmpn[0x18];
2187 2187
2188 u8 reserved_7[0xe0]; 2188 u8 reserved_at_a0[0xe0];
2189 2189
2190 struct mlx5_ifc_wq_bits wq; 2190 struct mlx5_ifc_wq_bits wq;
2191}; 2191};
@@ -2196,31 +2196,31 @@ enum {
2196}; 2196};
2197 2197
2198struct mlx5_ifc_rmpc_bits { 2198struct mlx5_ifc_rmpc_bits {
2199 u8 reserved_0[0x8]; 2199 u8 reserved_at_0[0x8];
2200 u8 state[0x4]; 2200 u8 state[0x4];
2201 u8 reserved_1[0x14]; 2201 u8 reserved_at_c[0x14];
2202 2202
2203 u8 basic_cyclic_rcv_wqe[0x1]; 2203 u8 basic_cyclic_rcv_wqe[0x1];
2204 u8 reserved_2[0x1f]; 2204 u8 reserved_at_21[0x1f];
2205 2205
2206 u8 reserved_3[0x140]; 2206 u8 reserved_at_40[0x140];
2207 2207
2208 struct mlx5_ifc_wq_bits wq; 2208 struct mlx5_ifc_wq_bits wq;
2209}; 2209};
2210 2210
2211struct mlx5_ifc_nic_vport_context_bits { 2211struct mlx5_ifc_nic_vport_context_bits {
2212 u8 reserved_0[0x1f]; 2212 u8 reserved_at_0[0x1f];
2213 u8 roce_en[0x1]; 2213 u8 roce_en[0x1];
2214 2214
2215 u8 arm_change_event[0x1]; 2215 u8 arm_change_event[0x1];
2216 u8 reserved_1[0x1a]; 2216 u8 reserved_at_21[0x1a];
2217 u8 event_on_mtu[0x1]; 2217 u8 event_on_mtu[0x1];
2218 u8 event_on_promisc_change[0x1]; 2218 u8 event_on_promisc_change[0x1];
2219 u8 event_on_vlan_change[0x1]; 2219 u8 event_on_vlan_change[0x1];
2220 u8 event_on_mc_address_change[0x1]; 2220 u8 event_on_mc_address_change[0x1];
2221 u8 event_on_uc_address_change[0x1]; 2221 u8 event_on_uc_address_change[0x1];
2222 2222
2223 u8 reserved_2[0xf0]; 2223 u8 reserved_at_40[0xf0];
2224 2224
2225 u8 mtu[0x10]; 2225 u8 mtu[0x10];
2226 2226
@@ -2228,21 +2228,21 @@ struct mlx5_ifc_nic_vport_context_bits {
2228 u8 port_guid[0x40]; 2228 u8 port_guid[0x40];
2229 u8 node_guid[0x40]; 2229 u8 node_guid[0x40];
2230 2230
2231 u8 reserved_3[0x140]; 2231 u8 reserved_at_200[0x140];
2232 u8 qkey_violation_counter[0x10]; 2232 u8 qkey_violation_counter[0x10];
2233 u8 reserved_4[0x430]; 2233 u8 reserved_at_350[0x430];
2234 2234
2235 u8 promisc_uc[0x1]; 2235 u8 promisc_uc[0x1];
2236 u8 promisc_mc[0x1]; 2236 u8 promisc_mc[0x1];
2237 u8 promisc_all[0x1]; 2237 u8 promisc_all[0x1];
2238 u8 reserved_5[0x2]; 2238 u8 reserved_at_783[0x2];
2239 u8 allowed_list_type[0x3]; 2239 u8 allowed_list_type[0x3];
2240 u8 reserved_6[0xc]; 2240 u8 reserved_at_788[0xc];
2241 u8 allowed_list_size[0xc]; 2241 u8 allowed_list_size[0xc];
2242 2242
2243 struct mlx5_ifc_mac_address_layout_bits permanent_address; 2243 struct mlx5_ifc_mac_address_layout_bits permanent_address;
2244 2244
2245 u8 reserved_7[0x20]; 2245 u8 reserved_at_7e0[0x20];
2246 2246
2247 u8 current_uc_mac_address[0][0x40]; 2247 u8 current_uc_mac_address[0][0x40];
2248}; 2248};
@@ -2254,9 +2254,9 @@ enum {
2254}; 2254};
2255 2255
2256struct mlx5_ifc_mkc_bits { 2256struct mlx5_ifc_mkc_bits {
2257 u8 reserved_0[0x1]; 2257 u8 reserved_at_0[0x1];
2258 u8 free[0x1]; 2258 u8 free[0x1];
2259 u8 reserved_1[0xd]; 2259 u8 reserved_at_2[0xd];
2260 u8 small_fence_on_rdma_read_response[0x1]; 2260 u8 small_fence_on_rdma_read_response[0x1];
2261 u8 umr_en[0x1]; 2261 u8 umr_en[0x1];
2262 u8 a[0x1]; 2262 u8 a[0x1];
@@ -2265,19 +2265,19 @@ struct mlx5_ifc_mkc_bits {
2265 u8 lw[0x1]; 2265 u8 lw[0x1];
2266 u8 lr[0x1]; 2266 u8 lr[0x1];
2267 u8 access_mode[0x2]; 2267 u8 access_mode[0x2];
2268 u8 reserved_2[0x8]; 2268 u8 reserved_at_18[0x8];
2269 2269
2270 u8 qpn[0x18]; 2270 u8 qpn[0x18];
2271 u8 mkey_7_0[0x8]; 2271 u8 mkey_7_0[0x8];
2272 2272
2273 u8 reserved_3[0x20]; 2273 u8 reserved_at_40[0x20];
2274 2274
2275 u8 length64[0x1]; 2275 u8 length64[0x1];
2276 u8 bsf_en[0x1]; 2276 u8 bsf_en[0x1];
2277 u8 sync_umr[0x1]; 2277 u8 sync_umr[0x1];
2278 u8 reserved_4[0x2]; 2278 u8 reserved_at_63[0x2];
2279 u8 expected_sigerr_count[0x1]; 2279 u8 expected_sigerr_count[0x1];
2280 u8 reserved_5[0x1]; 2280 u8 reserved_at_66[0x1];
2281 u8 en_rinval[0x1]; 2281 u8 en_rinval[0x1];
2282 u8 pd[0x18]; 2282 u8 pd[0x18];
2283 2283
@@ -2287,18 +2287,18 @@ struct mlx5_ifc_mkc_bits {
2287 2287
2288 u8 bsf_octword_size[0x20]; 2288 u8 bsf_octword_size[0x20];
2289 2289
2290 u8 reserved_6[0x80]; 2290 u8 reserved_at_120[0x80];
2291 2291
2292 u8 translations_octword_size[0x20]; 2292 u8 translations_octword_size[0x20];
2293 2293
2294 u8 reserved_7[0x1b]; 2294 u8 reserved_at_1c0[0x1b];
2295 u8 log_page_size[0x5]; 2295 u8 log_page_size[0x5];
2296 2296
2297 u8 reserved_8[0x20]; 2297 u8 reserved_at_1e0[0x20];
2298}; 2298};
2299 2299
2300struct mlx5_ifc_pkey_bits { 2300struct mlx5_ifc_pkey_bits {
2301 u8 reserved_0[0x10]; 2301 u8 reserved_at_0[0x10];
2302 u8 pkey[0x10]; 2302 u8 pkey[0x10];
2303}; 2303};
2304 2304
@@ -2309,19 +2309,19 @@ struct mlx5_ifc_array128_auto_bits {
2309struct mlx5_ifc_hca_vport_context_bits { 2309struct mlx5_ifc_hca_vport_context_bits {
2310 u8 field_select[0x20]; 2310 u8 field_select[0x20];
2311 2311
2312 u8 reserved_0[0xe0]; 2312 u8 reserved_at_20[0xe0];
2313 2313
2314 u8 sm_virt_aware[0x1]; 2314 u8 sm_virt_aware[0x1];
2315 u8 has_smi[0x1]; 2315 u8 has_smi[0x1];
2316 u8 has_raw[0x1]; 2316 u8 has_raw[0x1];
2317 u8 grh_required[0x1]; 2317 u8 grh_required[0x1];
2318 u8 reserved_1[0xc]; 2318 u8 reserved_at_104[0xc];
2319 u8 port_physical_state[0x4]; 2319 u8 port_physical_state[0x4];
2320 u8 vport_state_policy[0x4]; 2320 u8 vport_state_policy[0x4];
2321 u8 port_state[0x4]; 2321 u8 port_state[0x4];
2322 u8 vport_state[0x4]; 2322 u8 vport_state[0x4];
2323 2323
2324 u8 reserved_2[0x20]; 2324 u8 reserved_at_120[0x20];
2325 2325
2326 u8 system_image_guid[0x40]; 2326 u8 system_image_guid[0x40];
2327 2327
@@ -2337,33 +2337,33 @@ struct mlx5_ifc_hca_vport_context_bits {
2337 2337
2338 u8 cap_mask2_field_select[0x20]; 2338 u8 cap_mask2_field_select[0x20];
2339 2339
2340 u8 reserved_3[0x80]; 2340 u8 reserved_at_280[0x80];
2341 2341
2342 u8 lid[0x10]; 2342 u8 lid[0x10];
2343 u8 reserved_4[0x4]; 2343 u8 reserved_at_310[0x4];
2344 u8 init_type_reply[0x4]; 2344 u8 init_type_reply[0x4];
2345 u8 lmc[0x3]; 2345 u8 lmc[0x3];
2346 u8 subnet_timeout[0x5]; 2346 u8 subnet_timeout[0x5];
2347 2347
2348 u8 sm_lid[0x10]; 2348 u8 sm_lid[0x10];
2349 u8 sm_sl[0x4]; 2349 u8 sm_sl[0x4];
2350 u8 reserved_5[0xc]; 2350 u8 reserved_at_334[0xc];
2351 2351
2352 u8 qkey_violation_counter[0x10]; 2352 u8 qkey_violation_counter[0x10];
2353 u8 pkey_violation_counter[0x10]; 2353 u8 pkey_violation_counter[0x10];
2354 2354
2355 u8 reserved_6[0xca0]; 2355 u8 reserved_at_360[0xca0];
2356}; 2356};
2357 2357
2358struct mlx5_ifc_esw_vport_context_bits { 2358struct mlx5_ifc_esw_vport_context_bits {
2359 u8 reserved_0[0x3]; 2359 u8 reserved_at_0[0x3];
2360 u8 vport_svlan_strip[0x1]; 2360 u8 vport_svlan_strip[0x1];
2361 u8 vport_cvlan_strip[0x1]; 2361 u8 vport_cvlan_strip[0x1];
2362 u8 vport_svlan_insert[0x1]; 2362 u8 vport_svlan_insert[0x1];
2363 u8 vport_cvlan_insert[0x2]; 2363 u8 vport_cvlan_insert[0x2];
2364 u8 reserved_1[0x18]; 2364 u8 reserved_at_8[0x18];
2365 2365
2366 u8 reserved_2[0x20]; 2366 u8 reserved_at_20[0x20];
2367 2367
2368 u8 svlan_cfi[0x1]; 2368 u8 svlan_cfi[0x1];
2369 u8 svlan_pcp[0x3]; 2369 u8 svlan_pcp[0x3];
@@ -2372,7 +2372,7 @@ struct mlx5_ifc_esw_vport_context_bits {
2372 u8 cvlan_pcp[0x3]; 2372 u8 cvlan_pcp[0x3];
2373 u8 cvlan_id[0xc]; 2373 u8 cvlan_id[0xc];
2374 2374
2375 u8 reserved_3[0x7a0]; 2375 u8 reserved_at_60[0x7a0];
2376}; 2376};
2377 2377
2378enum { 2378enum {
@@ -2387,41 +2387,41 @@ enum {
2387 2387
2388struct mlx5_ifc_eqc_bits { 2388struct mlx5_ifc_eqc_bits {
2389 u8 status[0x4]; 2389 u8 status[0x4];
2390 u8 reserved_0[0x9]; 2390 u8 reserved_at_4[0x9];
2391 u8 ec[0x1]; 2391 u8 ec[0x1];
2392 u8 oi[0x1]; 2392 u8 oi[0x1];
2393 u8 reserved_1[0x5]; 2393 u8 reserved_at_f[0x5];
2394 u8 st[0x4]; 2394 u8 st[0x4];
2395 u8 reserved_2[0x8]; 2395 u8 reserved_at_18[0x8];
2396 2396
2397 u8 reserved_3[0x20]; 2397 u8 reserved_at_20[0x20];
2398 2398
2399 u8 reserved_4[0x14]; 2399 u8 reserved_at_40[0x14];
2400 u8 page_offset[0x6]; 2400 u8 page_offset[0x6];
2401 u8 reserved_5[0x6]; 2401 u8 reserved_at_5a[0x6];
2402 2402
2403 u8 reserved_6[0x3]; 2403 u8 reserved_at_60[0x3];
2404 u8 log_eq_size[0x5]; 2404 u8 log_eq_size[0x5];
2405 u8 uar_page[0x18]; 2405 u8 uar_page[0x18];
2406 2406
2407 u8 reserved_7[0x20]; 2407 u8 reserved_at_80[0x20];
2408 2408
2409 u8 reserved_8[0x18]; 2409 u8 reserved_at_a0[0x18];
2410 u8 intr[0x8]; 2410 u8 intr[0x8];
2411 2411
2412 u8 reserved_9[0x3]; 2412 u8 reserved_at_c0[0x3];
2413 u8 log_page_size[0x5]; 2413 u8 log_page_size[0x5];
2414 u8 reserved_10[0x18]; 2414 u8 reserved_at_c8[0x18];
2415 2415
2416 u8 reserved_11[0x60]; 2416 u8 reserved_at_e0[0x60];
2417 2417
2418 u8 reserved_12[0x8]; 2418 u8 reserved_at_140[0x8];
2419 u8 consumer_counter[0x18]; 2419 u8 consumer_counter[0x18];
2420 2420
2421 u8 reserved_13[0x8]; 2421 u8 reserved_at_160[0x8];
2422 u8 producer_counter[0x18]; 2422 u8 producer_counter[0x18];
2423 2423
2424 u8 reserved_14[0x80]; 2424 u8 reserved_at_180[0x80];
2425}; 2425};
2426 2426
2427enum { 2427enum {
@@ -2445,14 +2445,14 @@ enum {
2445}; 2445};
2446 2446
2447struct mlx5_ifc_dctc_bits { 2447struct mlx5_ifc_dctc_bits {
2448 u8 reserved_0[0x4]; 2448 u8 reserved_at_0[0x4];
2449 u8 state[0x4]; 2449 u8 state[0x4];
2450 u8 reserved_1[0x18]; 2450 u8 reserved_at_8[0x18];
2451 2451
2452 u8 reserved_2[0x8]; 2452 u8 reserved_at_20[0x8];
2453 u8 user_index[0x18]; 2453 u8 user_index[0x18];
2454 2454
2455 u8 reserved_3[0x8]; 2455 u8 reserved_at_40[0x8];
2456 u8 cqn[0x18]; 2456 u8 cqn[0x18];
2457 2457
2458 u8 counter_set_id[0x8]; 2458 u8 counter_set_id[0x8];
@@ -2464,45 +2464,45 @@ struct mlx5_ifc_dctc_bits {
2464 u8 latency_sensitive[0x1]; 2464 u8 latency_sensitive[0x1];
2465 u8 rlky[0x1]; 2465 u8 rlky[0x1];
2466 u8 free_ar[0x1]; 2466 u8 free_ar[0x1];
2467 u8 reserved_4[0xd]; 2467 u8 reserved_at_73[0xd];
2468 2468
2469 u8 reserved_5[0x8]; 2469 u8 reserved_at_80[0x8];
2470 u8 cs_res[0x8]; 2470 u8 cs_res[0x8];
2471 u8 reserved_6[0x3]; 2471 u8 reserved_at_90[0x3];
2472 u8 min_rnr_nak[0x5]; 2472 u8 min_rnr_nak[0x5];
2473 u8 reserved_7[0x8]; 2473 u8 reserved_at_98[0x8];
2474 2474
2475 u8 reserved_8[0x8]; 2475 u8 reserved_at_a0[0x8];
2476 u8 srqn[0x18]; 2476 u8 srqn[0x18];
2477 2477
2478 u8 reserved_9[0x8]; 2478 u8 reserved_at_c0[0x8];
2479 u8 pd[0x18]; 2479 u8 pd[0x18];
2480 2480
2481 u8 tclass[0x8]; 2481 u8 tclass[0x8];
2482 u8 reserved_10[0x4]; 2482 u8 reserved_at_e8[0x4];
2483 u8 flow_label[0x14]; 2483 u8 flow_label[0x14];
2484 2484
2485 u8 dc_access_key[0x40]; 2485 u8 dc_access_key[0x40];
2486 2486
2487 u8 reserved_11[0x5]; 2487 u8 reserved_at_140[0x5];
2488 u8 mtu[0x3]; 2488 u8 mtu[0x3];
2489 u8 port[0x8]; 2489 u8 port[0x8];
2490 u8 pkey_index[0x10]; 2490 u8 pkey_index[0x10];
2491 2491
2492 u8 reserved_12[0x8]; 2492 u8 reserved_at_160[0x8];
2493 u8 my_addr_index[0x8]; 2493 u8 my_addr_index[0x8];
2494 u8 reserved_13[0x8]; 2494 u8 reserved_at_170[0x8];
2495 u8 hop_limit[0x8]; 2495 u8 hop_limit[0x8];
2496 2496
2497 u8 dc_access_key_violation_count[0x20]; 2497 u8 dc_access_key_violation_count[0x20];
2498 2498
2499 u8 reserved_14[0x14]; 2499 u8 reserved_at_1a0[0x14];
2500 u8 dei_cfi[0x1]; 2500 u8 dei_cfi[0x1];
2501 u8 eth_prio[0x3]; 2501 u8 eth_prio[0x3];
2502 u8 ecn[0x2]; 2502 u8 ecn[0x2];
2503 u8 dscp[0x6]; 2503 u8 dscp[0x6];
2504 2504
2505 u8 reserved_15[0x40]; 2505 u8 reserved_at_1c0[0x40];
2506}; 2506};
2507 2507
2508enum { 2508enum {
@@ -2524,54 +2524,54 @@ enum {
2524 2524
2525struct mlx5_ifc_cqc_bits { 2525struct mlx5_ifc_cqc_bits {
2526 u8 status[0x4]; 2526 u8 status[0x4];
2527 u8 reserved_0[0x4]; 2527 u8 reserved_at_4[0x4];
2528 u8 cqe_sz[0x3]; 2528 u8 cqe_sz[0x3];
2529 u8 cc[0x1]; 2529 u8 cc[0x1];
2530 u8 reserved_1[0x1]; 2530 u8 reserved_at_c[0x1];
2531 u8 scqe_break_moderation_en[0x1]; 2531 u8 scqe_break_moderation_en[0x1];
2532 u8 oi[0x1]; 2532 u8 oi[0x1];
2533 u8 reserved_2[0x2]; 2533 u8 reserved_at_f[0x2];
2534 u8 cqe_zip_en[0x1]; 2534 u8 cqe_zip_en[0x1];
2535 u8 mini_cqe_res_format[0x2]; 2535 u8 mini_cqe_res_format[0x2];
2536 u8 st[0x4]; 2536 u8 st[0x4];
2537 u8 reserved_3[0x8]; 2537 u8 reserved_at_18[0x8];
2538 2538
2539 u8 reserved_4[0x20]; 2539 u8 reserved_at_20[0x20];
2540 2540
2541 u8 reserved_5[0x14]; 2541 u8 reserved_at_40[0x14];
2542 u8 page_offset[0x6]; 2542 u8 page_offset[0x6];
2543 u8 reserved_6[0x6]; 2543 u8 reserved_at_5a[0x6];
2544 2544
2545 u8 reserved_7[0x3]; 2545 u8 reserved_at_60[0x3];
2546 u8 log_cq_size[0x5]; 2546 u8 log_cq_size[0x5];
2547 u8 uar_page[0x18]; 2547 u8 uar_page[0x18];
2548 2548
2549 u8 reserved_8[0x4]; 2549 u8 reserved_at_80[0x4];
2550 u8 cq_period[0xc]; 2550 u8 cq_period[0xc];
2551 u8 cq_max_count[0x10]; 2551 u8 cq_max_count[0x10];
2552 2552
2553 u8 reserved_9[0x18]; 2553 u8 reserved_at_a0[0x18];
2554 u8 c_eqn[0x8]; 2554 u8 c_eqn[0x8];
2555 2555
2556 u8 reserved_10[0x3]; 2556 u8 reserved_at_c0[0x3];
2557 u8 log_page_size[0x5]; 2557 u8 log_page_size[0x5];
2558 u8 reserved_11[0x18]; 2558 u8 reserved_at_c8[0x18];
2559 2559
2560 u8 reserved_12[0x20]; 2560 u8 reserved_at_e0[0x20];
2561 2561
2562 u8 reserved_13[0x8]; 2562 u8 reserved_at_100[0x8];
2563 u8 last_notified_index[0x18]; 2563 u8 last_notified_index[0x18];
2564 2564
2565 u8 reserved_14[0x8]; 2565 u8 reserved_at_120[0x8];
2566 u8 last_solicit_index[0x18]; 2566 u8 last_solicit_index[0x18];
2567 2567
2568 u8 reserved_15[0x8]; 2568 u8 reserved_at_140[0x8];
2569 u8 consumer_counter[0x18]; 2569 u8 consumer_counter[0x18];
2570 2570
2571 u8 reserved_16[0x8]; 2571 u8 reserved_at_160[0x8];
2572 u8 producer_counter[0x18]; 2572 u8 producer_counter[0x18];
2573 2573
2574 u8 reserved_17[0x40]; 2574 u8 reserved_at_180[0x40];
2575 2575
2576 u8 dbr_addr[0x40]; 2576 u8 dbr_addr[0x40];
2577}; 2577};
@@ -2580,16 +2580,16 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
2580 struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; 2580 struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
2581 struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; 2581 struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
2582 struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; 2582 struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
2583 u8 reserved_0[0x800]; 2583 u8 reserved_at_0[0x800];
2584}; 2584};
2585 2585
2586struct mlx5_ifc_query_adapter_param_block_bits { 2586struct mlx5_ifc_query_adapter_param_block_bits {
2587 u8 reserved_0[0xc0]; 2587 u8 reserved_at_0[0xc0];
2588 2588
2589 u8 reserved_1[0x8]; 2589 u8 reserved_at_c0[0x8];
2590 u8 ieee_vendor_id[0x18]; 2590 u8 ieee_vendor_id[0x18];
2591 2591
2592 u8 reserved_2[0x10]; 2592 u8 reserved_at_e0[0x10];
2593 u8 vsd_vendor_id[0x10]; 2593 u8 vsd_vendor_id[0x10];
2594 2594
2595 u8 vsd[208][0x8]; 2595 u8 vsd[208][0x8];
@@ -2600,14 +2600,14 @@ struct mlx5_ifc_query_adapter_param_block_bits {
2600union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { 2600union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
2601 struct mlx5_ifc_modify_field_select_bits modify_field_select; 2601 struct mlx5_ifc_modify_field_select_bits modify_field_select;
2602 struct mlx5_ifc_resize_field_select_bits resize_field_select; 2602 struct mlx5_ifc_resize_field_select_bits resize_field_select;
2603 u8 reserved_0[0x20]; 2603 u8 reserved_at_0[0x20];
2604}; 2604};
2605 2605
2606union mlx5_ifc_field_select_802_1_r_roce_auto_bits { 2606union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
2607 struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; 2607 struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
2608 struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; 2608 struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
2609 struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; 2609 struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
2610 u8 reserved_0[0x20]; 2610 u8 reserved_at_0[0x20];
2611}; 2611};
2612 2612
2613union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { 2613union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
@@ -2619,7 +2619,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
2619 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; 2619 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
2620 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; 2620 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
2621 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; 2621 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
2622 u8 reserved_0[0x7c0]; 2622 u8 reserved_at_0[0x7c0];
2623}; 2623};
2624 2624
2625union mlx5_ifc_event_auto_bits { 2625union mlx5_ifc_event_auto_bits {
@@ -2635,23 +2635,23 @@ union mlx5_ifc_event_auto_bits {
2635 struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; 2635 struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
2636 struct mlx5_ifc_stall_vl_event_bits stall_vl_event; 2636 struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
2637 struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; 2637 struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
2638 u8 reserved_0[0xe0]; 2638 u8 reserved_at_0[0xe0];
2639}; 2639};
2640 2640
2641struct mlx5_ifc_health_buffer_bits { 2641struct mlx5_ifc_health_buffer_bits {
2642 u8 reserved_0[0x100]; 2642 u8 reserved_at_0[0x100];
2643 2643
2644 u8 assert_existptr[0x20]; 2644 u8 assert_existptr[0x20];
2645 2645
2646 u8 assert_callra[0x20]; 2646 u8 assert_callra[0x20];
2647 2647
2648 u8 reserved_1[0x40]; 2648 u8 reserved_at_140[0x40];
2649 2649
2650 u8 fw_version[0x20]; 2650 u8 fw_version[0x20];
2651 2651
2652 u8 hw_id[0x20]; 2652 u8 hw_id[0x20];
2653 2653
2654 u8 reserved_2[0x20]; 2654 u8 reserved_at_1c0[0x20];
2655 2655
2656 u8 irisc_index[0x8]; 2656 u8 irisc_index[0x8];
2657 u8 synd[0x8]; 2657 u8 synd[0x8];
@@ -2660,20 +2660,20 @@ struct mlx5_ifc_health_buffer_bits {
2660 2660
2661struct mlx5_ifc_register_loopback_control_bits { 2661struct mlx5_ifc_register_loopback_control_bits {
2662 u8 no_lb[0x1]; 2662 u8 no_lb[0x1];
2663 u8 reserved_0[0x7]; 2663 u8 reserved_at_1[0x7];
2664 u8 port[0x8]; 2664 u8 port[0x8];
2665 u8 reserved_1[0x10]; 2665 u8 reserved_at_10[0x10];
2666 2666
2667 u8 reserved_2[0x60]; 2667 u8 reserved_at_20[0x60];
2668}; 2668};
2669 2669
2670struct mlx5_ifc_teardown_hca_out_bits { 2670struct mlx5_ifc_teardown_hca_out_bits {
2671 u8 status[0x8]; 2671 u8 status[0x8];
2672 u8 reserved_0[0x18]; 2672 u8 reserved_at_8[0x18];
2673 2673
2674 u8 syndrome[0x20]; 2674 u8 syndrome[0x20];
2675 2675
2676 u8 reserved_1[0x40]; 2676 u8 reserved_at_40[0x40];
2677}; 2677};
2678 2678
2679enum { 2679enum {
@@ -2683,108 +2683,108 @@ enum {
2683 2683
2684struct mlx5_ifc_teardown_hca_in_bits { 2684struct mlx5_ifc_teardown_hca_in_bits {
2685 u8 opcode[0x10]; 2685 u8 opcode[0x10];
2686 u8 reserved_0[0x10]; 2686 u8 reserved_at_10[0x10];
2687 2687
2688 u8 reserved_1[0x10]; 2688 u8 reserved_at_20[0x10];
2689 u8 op_mod[0x10]; 2689 u8 op_mod[0x10];
2690 2690
2691 u8 reserved_2[0x10]; 2691 u8 reserved_at_40[0x10];
2692 u8 profile[0x10]; 2692 u8 profile[0x10];
2693 2693
2694 u8 reserved_3[0x20]; 2694 u8 reserved_at_60[0x20];
2695}; 2695};
2696 2696
2697struct mlx5_ifc_sqerr2rts_qp_out_bits { 2697struct mlx5_ifc_sqerr2rts_qp_out_bits {
2698 u8 status[0x8]; 2698 u8 status[0x8];
2699 u8 reserved_0[0x18]; 2699 u8 reserved_at_8[0x18];
2700 2700
2701 u8 syndrome[0x20]; 2701 u8 syndrome[0x20];
2702 2702
2703 u8 reserved_1[0x40]; 2703 u8 reserved_at_40[0x40];
2704}; 2704};
2705 2705
2706struct mlx5_ifc_sqerr2rts_qp_in_bits { 2706struct mlx5_ifc_sqerr2rts_qp_in_bits {
2707 u8 opcode[0x10]; 2707 u8 opcode[0x10];
2708 u8 reserved_0[0x10]; 2708 u8 reserved_at_10[0x10];
2709 2709
2710 u8 reserved_1[0x10]; 2710 u8 reserved_at_20[0x10];
2711 u8 op_mod[0x10]; 2711 u8 op_mod[0x10];
2712 2712
2713 u8 reserved_2[0x8]; 2713 u8 reserved_at_40[0x8];
2714 u8 qpn[0x18]; 2714 u8 qpn[0x18];
2715 2715
2716 u8 reserved_3[0x20]; 2716 u8 reserved_at_60[0x20];
2717 2717
2718 u8 opt_param_mask[0x20]; 2718 u8 opt_param_mask[0x20];
2719 2719
2720 u8 reserved_4[0x20]; 2720 u8 reserved_at_a0[0x20];
2721 2721
2722 struct mlx5_ifc_qpc_bits qpc; 2722 struct mlx5_ifc_qpc_bits qpc;
2723 2723
2724 u8 reserved_5[0x80]; 2724 u8 reserved_at_800[0x80];
2725}; 2725};
2726 2726
2727struct mlx5_ifc_sqd2rts_qp_out_bits { 2727struct mlx5_ifc_sqd2rts_qp_out_bits {
2728 u8 status[0x8]; 2728 u8 status[0x8];
2729 u8 reserved_0[0x18]; 2729 u8 reserved_at_8[0x18];
2730 2730
2731 u8 syndrome[0x20]; 2731 u8 syndrome[0x20];
2732 2732
2733 u8 reserved_1[0x40]; 2733 u8 reserved_at_40[0x40];
2734}; 2734};
2735 2735
2736struct mlx5_ifc_sqd2rts_qp_in_bits { 2736struct mlx5_ifc_sqd2rts_qp_in_bits {
2737 u8 opcode[0x10]; 2737 u8 opcode[0x10];
2738 u8 reserved_0[0x10]; 2738 u8 reserved_at_10[0x10];
2739 2739
2740 u8 reserved_1[0x10]; 2740 u8 reserved_at_20[0x10];
2741 u8 op_mod[0x10]; 2741 u8 op_mod[0x10];
2742 2742
2743 u8 reserved_2[0x8]; 2743 u8 reserved_at_40[0x8];
2744 u8 qpn[0x18]; 2744 u8 qpn[0x18];
2745 2745
2746 u8 reserved_3[0x20]; 2746 u8 reserved_at_60[0x20];
2747 2747
2748 u8 opt_param_mask[0x20]; 2748 u8 opt_param_mask[0x20];
2749 2749
2750 u8 reserved_4[0x20]; 2750 u8 reserved_at_a0[0x20];
2751 2751
2752 struct mlx5_ifc_qpc_bits qpc; 2752 struct mlx5_ifc_qpc_bits qpc;
2753 2753
2754 u8 reserved_5[0x80]; 2754 u8 reserved_at_800[0x80];
2755}; 2755};
2756 2756
2757struct mlx5_ifc_set_roce_address_out_bits { 2757struct mlx5_ifc_set_roce_address_out_bits {
2758 u8 status[0x8]; 2758 u8 status[0x8];
2759 u8 reserved_0[0x18]; 2759 u8 reserved_at_8[0x18];
2760 2760
2761 u8 syndrome[0x20]; 2761 u8 syndrome[0x20];
2762 2762
2763 u8 reserved_1[0x40]; 2763 u8 reserved_at_40[0x40];
2764}; 2764};
2765 2765
2766struct mlx5_ifc_set_roce_address_in_bits { 2766struct mlx5_ifc_set_roce_address_in_bits {
2767 u8 opcode[0x10]; 2767 u8 opcode[0x10];
2768 u8 reserved_0[0x10]; 2768 u8 reserved_at_10[0x10];
2769 2769
2770 u8 reserved_1[0x10]; 2770 u8 reserved_at_20[0x10];
2771 u8 op_mod[0x10]; 2771 u8 op_mod[0x10];
2772 2772
2773 u8 roce_address_index[0x10]; 2773 u8 roce_address_index[0x10];
2774 u8 reserved_2[0x10]; 2774 u8 reserved_at_50[0x10];
2775 2775
2776 u8 reserved_3[0x20]; 2776 u8 reserved_at_60[0x20];
2777 2777
2778 struct mlx5_ifc_roce_addr_layout_bits roce_address; 2778 struct mlx5_ifc_roce_addr_layout_bits roce_address;
2779}; 2779};
2780 2780
2781struct mlx5_ifc_set_mad_demux_out_bits { 2781struct mlx5_ifc_set_mad_demux_out_bits {
2782 u8 status[0x8]; 2782 u8 status[0x8];
2783 u8 reserved_0[0x18]; 2783 u8 reserved_at_8[0x18];
2784 2784
2785 u8 syndrome[0x20]; 2785 u8 syndrome[0x20];
2786 2786
2787 u8 reserved_1[0x40]; 2787 u8 reserved_at_40[0x40];
2788}; 2788};
2789 2789
2790enum { 2790enum {
@@ -2794,89 +2794,89 @@ enum {
2794 2794
2795struct mlx5_ifc_set_mad_demux_in_bits { 2795struct mlx5_ifc_set_mad_demux_in_bits {
2796 u8 opcode[0x10]; 2796 u8 opcode[0x10];
2797 u8 reserved_0[0x10]; 2797 u8 reserved_at_10[0x10];
2798 2798
2799 u8 reserved_1[0x10]; 2799 u8 reserved_at_20[0x10];
2800 u8 op_mod[0x10]; 2800 u8 op_mod[0x10];
2801 2801
2802 u8 reserved_2[0x20]; 2802 u8 reserved_at_40[0x20];
2803 2803
2804 u8 reserved_3[0x6]; 2804 u8 reserved_at_60[0x6];
2805 u8 demux_mode[0x2]; 2805 u8 demux_mode[0x2];
2806 u8 reserved_4[0x18]; 2806 u8 reserved_at_68[0x18];
2807}; 2807};
2808 2808
2809struct mlx5_ifc_set_l2_table_entry_out_bits { 2809struct mlx5_ifc_set_l2_table_entry_out_bits {
2810 u8 status[0x8]; 2810 u8 status[0x8];
2811 u8 reserved_0[0x18]; 2811 u8 reserved_at_8[0x18];
2812 2812
2813 u8 syndrome[0x20]; 2813 u8 syndrome[0x20];
2814 2814
2815 u8 reserved_1[0x40]; 2815 u8 reserved_at_40[0x40];
2816}; 2816};
2817 2817
2818struct mlx5_ifc_set_l2_table_entry_in_bits { 2818struct mlx5_ifc_set_l2_table_entry_in_bits {
2819 u8 opcode[0x10]; 2819 u8 opcode[0x10];
2820 u8 reserved_0[0x10]; 2820 u8 reserved_at_10[0x10];
2821 2821
2822 u8 reserved_1[0x10]; 2822 u8 reserved_at_20[0x10];
2823 u8 op_mod[0x10]; 2823 u8 op_mod[0x10];
2824 2824
2825 u8 reserved_2[0x60]; 2825 u8 reserved_at_40[0x60];
2826 2826
2827 u8 reserved_3[0x8]; 2827 u8 reserved_at_a0[0x8];
2828 u8 table_index[0x18]; 2828 u8 table_index[0x18];
2829 2829
2830 u8 reserved_4[0x20]; 2830 u8 reserved_at_c0[0x20];
2831 2831
2832 u8 reserved_5[0x13]; 2832 u8 reserved_at_e0[0x13];
2833 u8 vlan_valid[0x1]; 2833 u8 vlan_valid[0x1];
2834 u8 vlan[0xc]; 2834 u8 vlan[0xc];
2835 2835
2836 struct mlx5_ifc_mac_address_layout_bits mac_address; 2836 struct mlx5_ifc_mac_address_layout_bits mac_address;
2837 2837
2838 u8 reserved_6[0xc0]; 2838 u8 reserved_at_140[0xc0];
2839}; 2839};
2840 2840
2841struct mlx5_ifc_set_issi_out_bits { 2841struct mlx5_ifc_set_issi_out_bits {
2842 u8 status[0x8]; 2842 u8 status[0x8];
2843 u8 reserved_0[0x18]; 2843 u8 reserved_at_8[0x18];
2844 2844
2845 u8 syndrome[0x20]; 2845 u8 syndrome[0x20];
2846 2846
2847 u8 reserved_1[0x40]; 2847 u8 reserved_at_40[0x40];
2848}; 2848};
2849 2849
2850struct mlx5_ifc_set_issi_in_bits { 2850struct mlx5_ifc_set_issi_in_bits {
2851 u8 opcode[0x10]; 2851 u8 opcode[0x10];
2852 u8 reserved_0[0x10]; 2852 u8 reserved_at_10[0x10];
2853 2853
2854 u8 reserved_1[0x10]; 2854 u8 reserved_at_20[0x10];
2855 u8 op_mod[0x10]; 2855 u8 op_mod[0x10];
2856 2856
2857 u8 reserved_2[0x10]; 2857 u8 reserved_at_40[0x10];
2858 u8 current_issi[0x10]; 2858 u8 current_issi[0x10];
2859 2859
2860 u8 reserved_3[0x20]; 2860 u8 reserved_at_60[0x20];
2861}; 2861};
2862 2862
2863struct mlx5_ifc_set_hca_cap_out_bits { 2863struct mlx5_ifc_set_hca_cap_out_bits {
2864 u8 status[0x8]; 2864 u8 status[0x8];
2865 u8 reserved_0[0x18]; 2865 u8 reserved_at_8[0x18];
2866 2866
2867 u8 syndrome[0x20]; 2867 u8 syndrome[0x20];
2868 2868
2869 u8 reserved_1[0x40]; 2869 u8 reserved_at_40[0x40];
2870}; 2870};
2871 2871
2872struct mlx5_ifc_set_hca_cap_in_bits { 2872struct mlx5_ifc_set_hca_cap_in_bits {
2873 u8 opcode[0x10]; 2873 u8 opcode[0x10];
2874 u8 reserved_0[0x10]; 2874 u8 reserved_at_10[0x10];
2875 2875
2876 u8 reserved_1[0x10]; 2876 u8 reserved_at_20[0x10];
2877 u8 op_mod[0x10]; 2877 u8 op_mod[0x10];
2878 2878
2879 u8 reserved_2[0x40]; 2879 u8 reserved_at_40[0x40];
2880 2880
2881 union mlx5_ifc_hca_cap_union_bits capability; 2881 union mlx5_ifc_hca_cap_union_bits capability;
2882}; 2882};
@@ -2890,156 +2890,156 @@ enum {
2890 2890
2891struct mlx5_ifc_set_fte_out_bits { 2891struct mlx5_ifc_set_fte_out_bits {
2892 u8 status[0x8]; 2892 u8 status[0x8];
2893 u8 reserved_0[0x18]; 2893 u8 reserved_at_8[0x18];
2894 2894
2895 u8 syndrome[0x20]; 2895 u8 syndrome[0x20];
2896 2896
2897 u8 reserved_1[0x40]; 2897 u8 reserved_at_40[0x40];
2898}; 2898};
2899 2899
2900struct mlx5_ifc_set_fte_in_bits { 2900struct mlx5_ifc_set_fte_in_bits {
2901 u8 opcode[0x10]; 2901 u8 opcode[0x10];
2902 u8 reserved_0[0x10]; 2902 u8 reserved_at_10[0x10];
2903 2903
2904 u8 reserved_1[0x10]; 2904 u8 reserved_at_20[0x10];
2905 u8 op_mod[0x10]; 2905 u8 op_mod[0x10];
2906 2906
2907 u8 reserved_2[0x40]; 2907 u8 reserved_at_40[0x40];
2908 2908
2909 u8 table_type[0x8]; 2909 u8 table_type[0x8];
2910 u8 reserved_3[0x18]; 2910 u8 reserved_at_88[0x18];
2911 2911
2912 u8 reserved_4[0x8]; 2912 u8 reserved_at_a0[0x8];
2913 u8 table_id[0x18]; 2913 u8 table_id[0x18];
2914 2914
2915 u8 reserved_5[0x18]; 2915 u8 reserved_at_c0[0x18];
2916 u8 modify_enable_mask[0x8]; 2916 u8 modify_enable_mask[0x8];
2917 2917
2918 u8 reserved_6[0x20]; 2918 u8 reserved_at_e0[0x20];
2919 2919
2920 u8 flow_index[0x20]; 2920 u8 flow_index[0x20];
2921 2921
2922 u8 reserved_7[0xe0]; 2922 u8 reserved_at_120[0xe0];
2923 2923
2924 struct mlx5_ifc_flow_context_bits flow_context; 2924 struct mlx5_ifc_flow_context_bits flow_context;
2925}; 2925};
2926 2926
2927struct mlx5_ifc_rts2rts_qp_out_bits { 2927struct mlx5_ifc_rts2rts_qp_out_bits {
2928 u8 status[0x8]; 2928 u8 status[0x8];
2929 u8 reserved_0[0x18]; 2929 u8 reserved_at_8[0x18];
2930 2930
2931 u8 syndrome[0x20]; 2931 u8 syndrome[0x20];
2932 2932
2933 u8 reserved_1[0x40]; 2933 u8 reserved_at_40[0x40];
2934}; 2934};
2935 2935
2936struct mlx5_ifc_rts2rts_qp_in_bits { 2936struct mlx5_ifc_rts2rts_qp_in_bits {
2937 u8 opcode[0x10]; 2937 u8 opcode[0x10];
2938 u8 reserved_0[0x10]; 2938 u8 reserved_at_10[0x10];
2939 2939
2940 u8 reserved_1[0x10]; 2940 u8 reserved_at_20[0x10];
2941 u8 op_mod[0x10]; 2941 u8 op_mod[0x10];
2942 2942
2943 u8 reserved_2[0x8]; 2943 u8 reserved_at_40[0x8];
2944 u8 qpn[0x18]; 2944 u8 qpn[0x18];
2945 2945
2946 u8 reserved_3[0x20]; 2946 u8 reserved_at_60[0x20];
2947 2947
2948 u8 opt_param_mask[0x20]; 2948 u8 opt_param_mask[0x20];
2949 2949
2950 u8 reserved_4[0x20]; 2950 u8 reserved_at_a0[0x20];
2951 2951
2952 struct mlx5_ifc_qpc_bits qpc; 2952 struct mlx5_ifc_qpc_bits qpc;
2953 2953
2954 u8 reserved_5[0x80]; 2954 u8 reserved_at_800[0x80];
2955}; 2955};
2956 2956
2957struct mlx5_ifc_rtr2rts_qp_out_bits { 2957struct mlx5_ifc_rtr2rts_qp_out_bits {
2958 u8 status[0x8]; 2958 u8 status[0x8];
2959 u8 reserved_0[0x18]; 2959 u8 reserved_at_8[0x18];
2960 2960
2961 u8 syndrome[0x20]; 2961 u8 syndrome[0x20];
2962 2962
2963 u8 reserved_1[0x40]; 2963 u8 reserved_at_40[0x40];
2964}; 2964};
2965 2965
2966struct mlx5_ifc_rtr2rts_qp_in_bits { 2966struct mlx5_ifc_rtr2rts_qp_in_bits {
2967 u8 opcode[0x10]; 2967 u8 opcode[0x10];
2968 u8 reserved_0[0x10]; 2968 u8 reserved_at_10[0x10];
2969 2969
2970 u8 reserved_1[0x10]; 2970 u8 reserved_at_20[0x10];
2971 u8 op_mod[0x10]; 2971 u8 op_mod[0x10];
2972 2972
2973 u8 reserved_2[0x8]; 2973 u8 reserved_at_40[0x8];
2974 u8 qpn[0x18]; 2974 u8 qpn[0x18];
2975 2975
2976 u8 reserved_3[0x20]; 2976 u8 reserved_at_60[0x20];
2977 2977
2978 u8 opt_param_mask[0x20]; 2978 u8 opt_param_mask[0x20];
2979 2979
2980 u8 reserved_4[0x20]; 2980 u8 reserved_at_a0[0x20];
2981 2981
2982 struct mlx5_ifc_qpc_bits qpc; 2982 struct mlx5_ifc_qpc_bits qpc;
2983 2983
2984 u8 reserved_5[0x80]; 2984 u8 reserved_at_800[0x80];
2985}; 2985};
2986 2986
2987struct mlx5_ifc_rst2init_qp_out_bits { 2987struct mlx5_ifc_rst2init_qp_out_bits {
2988 u8 status[0x8]; 2988 u8 status[0x8];
2989 u8 reserved_0[0x18]; 2989 u8 reserved_at_8[0x18];
2990 2990
2991 u8 syndrome[0x20]; 2991 u8 syndrome[0x20];
2992 2992
2993 u8 reserved_1[0x40]; 2993 u8 reserved_at_40[0x40];
2994}; 2994};
2995 2995
2996struct mlx5_ifc_rst2init_qp_in_bits { 2996struct mlx5_ifc_rst2init_qp_in_bits {
2997 u8 opcode[0x10]; 2997 u8 opcode[0x10];
2998 u8 reserved_0[0x10]; 2998 u8 reserved_at_10[0x10];
2999 2999
3000 u8 reserved_1[0x10]; 3000 u8 reserved_at_20[0x10];
3001 u8 op_mod[0x10]; 3001 u8 op_mod[0x10];
3002 3002
3003 u8 reserved_2[0x8]; 3003 u8 reserved_at_40[0x8];
3004 u8 qpn[0x18]; 3004 u8 qpn[0x18];
3005 3005
3006 u8 reserved_3[0x20]; 3006 u8 reserved_at_60[0x20];
3007 3007
3008 u8 opt_param_mask[0x20]; 3008 u8 opt_param_mask[0x20];
3009 3009
3010 u8 reserved_4[0x20]; 3010 u8 reserved_at_a0[0x20];
3011 3011
3012 struct mlx5_ifc_qpc_bits qpc; 3012 struct mlx5_ifc_qpc_bits qpc;
3013 3013
3014 u8 reserved_5[0x80]; 3014 u8 reserved_at_800[0x80];
3015}; 3015};
3016 3016
3017struct mlx5_ifc_query_xrc_srq_out_bits { 3017struct mlx5_ifc_query_xrc_srq_out_bits {
3018 u8 status[0x8]; 3018 u8 status[0x8];
3019 u8 reserved_0[0x18]; 3019 u8 reserved_at_8[0x18];
3020 3020
3021 u8 syndrome[0x20]; 3021 u8 syndrome[0x20];
3022 3022
3023 u8 reserved_1[0x40]; 3023 u8 reserved_at_40[0x40];
3024 3024
3025 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 3025 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
3026 3026
3027 u8 reserved_2[0x600]; 3027 u8 reserved_at_280[0x600];
3028 3028
3029 u8 pas[0][0x40]; 3029 u8 pas[0][0x40];
3030}; 3030};
3031 3031
3032struct mlx5_ifc_query_xrc_srq_in_bits { 3032struct mlx5_ifc_query_xrc_srq_in_bits {
3033 u8 opcode[0x10]; 3033 u8 opcode[0x10];
3034 u8 reserved_0[0x10]; 3034 u8 reserved_at_10[0x10];
3035 3035
3036 u8 reserved_1[0x10]; 3036 u8 reserved_at_20[0x10];
3037 u8 op_mod[0x10]; 3037 u8 op_mod[0x10];
3038 3038
3039 u8 reserved_2[0x8]; 3039 u8 reserved_at_40[0x8];
3040 u8 xrc_srqn[0x18]; 3040 u8 xrc_srqn[0x18];
3041 3041
3042 u8 reserved_3[0x20]; 3042 u8 reserved_at_60[0x20];
3043}; 3043};
3044 3044
3045enum { 3045enum {
@@ -3049,13 +3049,13 @@ enum {
3049 3049
3050struct mlx5_ifc_query_vport_state_out_bits { 3050struct mlx5_ifc_query_vport_state_out_bits {
3051 u8 status[0x8]; 3051 u8 status[0x8];
3052 u8 reserved_0[0x18]; 3052 u8 reserved_at_8[0x18];
3053 3053
3054 u8 syndrome[0x20]; 3054 u8 syndrome[0x20];
3055 3055
3056 u8 reserved_1[0x20]; 3056 u8 reserved_at_40[0x20];
3057 3057
3058 u8 reserved_2[0x18]; 3058 u8 reserved_at_60[0x18];
3059 u8 admin_state[0x4]; 3059 u8 admin_state[0x4];
3060 u8 state[0x4]; 3060 u8 state[0x4];
3061}; 3061};
@@ -3067,25 +3067,25 @@ enum {
3067 3067
3068struct mlx5_ifc_query_vport_state_in_bits { 3068struct mlx5_ifc_query_vport_state_in_bits {
3069 u8 opcode[0x10]; 3069 u8 opcode[0x10];
3070 u8 reserved_0[0x10]; 3070 u8 reserved_at_10[0x10];
3071 3071
3072 u8 reserved_1[0x10]; 3072 u8 reserved_at_20[0x10];
3073 u8 op_mod[0x10]; 3073 u8 op_mod[0x10];
3074 3074
3075 u8 other_vport[0x1]; 3075 u8 other_vport[0x1];
3076 u8 reserved_2[0xf]; 3076 u8 reserved_at_41[0xf];
3077 u8 vport_number[0x10]; 3077 u8 vport_number[0x10];
3078 3078
3079 u8 reserved_3[0x20]; 3079 u8 reserved_at_60[0x20];
3080}; 3080};
3081 3081
3082struct mlx5_ifc_query_vport_counter_out_bits { 3082struct mlx5_ifc_query_vport_counter_out_bits {
3083 u8 status[0x8]; 3083 u8 status[0x8];
3084 u8 reserved_0[0x18]; 3084 u8 reserved_at_8[0x18];
3085 3085
3086 u8 syndrome[0x20]; 3086 u8 syndrome[0x20];
3087 3087
3088 u8 reserved_1[0x40]; 3088 u8 reserved_at_40[0x40];
3089 3089
3090 struct mlx5_ifc_traffic_counter_bits received_errors; 3090 struct mlx5_ifc_traffic_counter_bits received_errors;
3091 3091
@@ -3111,7 +3111,7 @@ struct mlx5_ifc_query_vport_counter_out_bits {
3111 3111
3112 struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; 3112 struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
3113 3113
3114 u8 reserved_2[0xa00]; 3114 u8 reserved_at_680[0xa00];
3115}; 3115};
3116 3116
3117enum { 3117enum {
@@ -3120,328 +3120,328 @@ enum {
3120 3120
3121struct mlx5_ifc_query_vport_counter_in_bits { 3121struct mlx5_ifc_query_vport_counter_in_bits {
3122 u8 opcode[0x10]; 3122 u8 opcode[0x10];
3123 u8 reserved_0[0x10]; 3123 u8 reserved_at_10[0x10];
3124 3124
3125 u8 reserved_1[0x10]; 3125 u8 reserved_at_20[0x10];
3126 u8 op_mod[0x10]; 3126 u8 op_mod[0x10];
3127 3127
3128 u8 other_vport[0x1]; 3128 u8 other_vport[0x1];
3129 u8 reserved_2[0xf]; 3129 u8 reserved_at_41[0xf];
3130 u8 vport_number[0x10]; 3130 u8 vport_number[0x10];
3131 3131
3132 u8 reserved_3[0x60]; 3132 u8 reserved_at_60[0x60];
3133 3133
3134 u8 clear[0x1]; 3134 u8 clear[0x1];
3135 u8 reserved_4[0x1f]; 3135 u8 reserved_at_c1[0x1f];
3136 3136
3137 u8 reserved_5[0x20]; 3137 u8 reserved_at_e0[0x20];
3138}; 3138};
3139 3139
3140struct mlx5_ifc_query_tis_out_bits { 3140struct mlx5_ifc_query_tis_out_bits {
3141 u8 status[0x8]; 3141 u8 status[0x8];
3142 u8 reserved_0[0x18]; 3142 u8 reserved_at_8[0x18];
3143 3143
3144 u8 syndrome[0x20]; 3144 u8 syndrome[0x20];
3145 3145
3146 u8 reserved_1[0x40]; 3146 u8 reserved_at_40[0x40];
3147 3147
3148 struct mlx5_ifc_tisc_bits tis_context; 3148 struct mlx5_ifc_tisc_bits tis_context;
3149}; 3149};
3150 3150
3151struct mlx5_ifc_query_tis_in_bits { 3151struct mlx5_ifc_query_tis_in_bits {
3152 u8 opcode[0x10]; 3152 u8 opcode[0x10];
3153 u8 reserved_0[0x10]; 3153 u8 reserved_at_10[0x10];
3154 3154
3155 u8 reserved_1[0x10]; 3155 u8 reserved_at_20[0x10];
3156 u8 op_mod[0x10]; 3156 u8 op_mod[0x10];
3157 3157
3158 u8 reserved_2[0x8]; 3158 u8 reserved_at_40[0x8];
3159 u8 tisn[0x18]; 3159 u8 tisn[0x18];
3160 3160
3161 u8 reserved_3[0x20]; 3161 u8 reserved_at_60[0x20];
3162}; 3162};
3163 3163
3164struct mlx5_ifc_query_tir_out_bits { 3164struct mlx5_ifc_query_tir_out_bits {
3165 u8 status[0x8]; 3165 u8 status[0x8];
3166 u8 reserved_0[0x18]; 3166 u8 reserved_at_8[0x18];
3167 3167
3168 u8 syndrome[0x20]; 3168 u8 syndrome[0x20];
3169 3169
3170 u8 reserved_1[0xc0]; 3170 u8 reserved_at_40[0xc0];
3171 3171
3172 struct mlx5_ifc_tirc_bits tir_context; 3172 struct mlx5_ifc_tirc_bits tir_context;
3173}; 3173};
3174 3174
3175struct mlx5_ifc_query_tir_in_bits { 3175struct mlx5_ifc_query_tir_in_bits {
3176 u8 opcode[0x10]; 3176 u8 opcode[0x10];
3177 u8 reserved_0[0x10]; 3177 u8 reserved_at_10[0x10];
3178 3178
3179 u8 reserved_1[0x10]; 3179 u8 reserved_at_20[0x10];
3180 u8 op_mod[0x10]; 3180 u8 op_mod[0x10];
3181 3181
3182 u8 reserved_2[0x8]; 3182 u8 reserved_at_40[0x8];
3183 u8 tirn[0x18]; 3183 u8 tirn[0x18];
3184 3184
3185 u8 reserved_3[0x20]; 3185 u8 reserved_at_60[0x20];
3186}; 3186};
3187 3187
3188struct mlx5_ifc_query_srq_out_bits { 3188struct mlx5_ifc_query_srq_out_bits {
3189 u8 status[0x8]; 3189 u8 status[0x8];
3190 u8 reserved_0[0x18]; 3190 u8 reserved_at_8[0x18];
3191 3191
3192 u8 syndrome[0x20]; 3192 u8 syndrome[0x20];
3193 3193
3194 u8 reserved_1[0x40]; 3194 u8 reserved_at_40[0x40];
3195 3195
3196 struct mlx5_ifc_srqc_bits srq_context_entry; 3196 struct mlx5_ifc_srqc_bits srq_context_entry;
3197 3197
3198 u8 reserved_2[0x600]; 3198 u8 reserved_at_280[0x600];
3199 3199
3200 u8 pas[0][0x40]; 3200 u8 pas[0][0x40];
3201}; 3201};
3202 3202
3203struct mlx5_ifc_query_srq_in_bits { 3203struct mlx5_ifc_query_srq_in_bits {
3204 u8 opcode[0x10]; 3204 u8 opcode[0x10];
3205 u8 reserved_0[0x10]; 3205 u8 reserved_at_10[0x10];
3206 3206
3207 u8 reserved_1[0x10]; 3207 u8 reserved_at_20[0x10];
3208 u8 op_mod[0x10]; 3208 u8 op_mod[0x10];
3209 3209
3210 u8 reserved_2[0x8]; 3210 u8 reserved_at_40[0x8];
3211 u8 srqn[0x18]; 3211 u8 srqn[0x18];
3212 3212
3213 u8 reserved_3[0x20]; 3213 u8 reserved_at_60[0x20];
3214}; 3214};
3215 3215
3216struct mlx5_ifc_query_sq_out_bits { 3216struct mlx5_ifc_query_sq_out_bits {
3217 u8 status[0x8]; 3217 u8 status[0x8];
3218 u8 reserved_0[0x18]; 3218 u8 reserved_at_8[0x18];
3219 3219
3220 u8 syndrome[0x20]; 3220 u8 syndrome[0x20];
3221 3221
3222 u8 reserved_1[0xc0]; 3222 u8 reserved_at_40[0xc0];
3223 3223
3224 struct mlx5_ifc_sqc_bits sq_context; 3224 struct mlx5_ifc_sqc_bits sq_context;
3225}; 3225};
3226 3226
3227struct mlx5_ifc_query_sq_in_bits { 3227struct mlx5_ifc_query_sq_in_bits {
3228 u8 opcode[0x10]; 3228 u8 opcode[0x10];
3229 u8 reserved_0[0x10]; 3229 u8 reserved_at_10[0x10];
3230 3230
3231 u8 reserved_1[0x10]; 3231 u8 reserved_at_20[0x10];
3232 u8 op_mod[0x10]; 3232 u8 op_mod[0x10];
3233 3233
3234 u8 reserved_2[0x8]; 3234 u8 reserved_at_40[0x8];
3235 u8 sqn[0x18]; 3235 u8 sqn[0x18];
3236 3236
3237 u8 reserved_3[0x20]; 3237 u8 reserved_at_60[0x20];
3238}; 3238};
3239 3239
3240struct mlx5_ifc_query_special_contexts_out_bits { 3240struct mlx5_ifc_query_special_contexts_out_bits {
3241 u8 status[0x8]; 3241 u8 status[0x8];
3242 u8 reserved_0[0x18]; 3242 u8 reserved_at_8[0x18];
3243 3243
3244 u8 syndrome[0x20]; 3244 u8 syndrome[0x20];
3245 3245
3246 u8 reserved_1[0x20]; 3246 u8 reserved_at_40[0x20];
3247 3247
3248 u8 resd_lkey[0x20]; 3248 u8 resd_lkey[0x20];
3249}; 3249};
3250 3250
3251struct mlx5_ifc_query_special_contexts_in_bits { 3251struct mlx5_ifc_query_special_contexts_in_bits {
3252 u8 opcode[0x10]; 3252 u8 opcode[0x10];
3253 u8 reserved_0[0x10]; 3253 u8 reserved_at_10[0x10];
3254 3254
3255 u8 reserved_1[0x10]; 3255 u8 reserved_at_20[0x10];
3256 u8 op_mod[0x10]; 3256 u8 op_mod[0x10];
3257 3257
3258 u8 reserved_2[0x40]; 3258 u8 reserved_at_40[0x40];
3259}; 3259};
3260 3260
3261struct mlx5_ifc_query_rqt_out_bits { 3261struct mlx5_ifc_query_rqt_out_bits {
3262 u8 status[0x8]; 3262 u8 status[0x8];
3263 u8 reserved_0[0x18]; 3263 u8 reserved_at_8[0x18];
3264 3264
3265 u8 syndrome[0x20]; 3265 u8 syndrome[0x20];
3266 3266
3267 u8 reserved_1[0xc0]; 3267 u8 reserved_at_40[0xc0];
3268 3268
3269 struct mlx5_ifc_rqtc_bits rqt_context; 3269 struct mlx5_ifc_rqtc_bits rqt_context;
3270}; 3270};
3271 3271
3272struct mlx5_ifc_query_rqt_in_bits { 3272struct mlx5_ifc_query_rqt_in_bits {
3273 u8 opcode[0x10]; 3273 u8 opcode[0x10];
3274 u8 reserved_0[0x10]; 3274 u8 reserved_at_10[0x10];
3275 3275
3276 u8 reserved_1[0x10]; 3276 u8 reserved_at_20[0x10];
3277 u8 op_mod[0x10]; 3277 u8 op_mod[0x10];
3278 3278
3279 u8 reserved_2[0x8]; 3279 u8 reserved_at_40[0x8];
3280 u8 rqtn[0x18]; 3280 u8 rqtn[0x18];
3281 3281
3282 u8 reserved_3[0x20]; 3282 u8 reserved_at_60[0x20];
3283}; 3283};
3284 3284
3285struct mlx5_ifc_query_rq_out_bits { 3285struct mlx5_ifc_query_rq_out_bits {
3286 u8 status[0x8]; 3286 u8 status[0x8];
3287 u8 reserved_0[0x18]; 3287 u8 reserved_at_8[0x18];
3288 3288
3289 u8 syndrome[0x20]; 3289 u8 syndrome[0x20];
3290 3290
3291 u8 reserved_1[0xc0]; 3291 u8 reserved_at_40[0xc0];
3292 3292
3293 struct mlx5_ifc_rqc_bits rq_context; 3293 struct mlx5_ifc_rqc_bits rq_context;
3294}; 3294};
3295 3295
3296struct mlx5_ifc_query_rq_in_bits { 3296struct mlx5_ifc_query_rq_in_bits {
3297 u8 opcode[0x10]; 3297 u8 opcode[0x10];
3298 u8 reserved_0[0x10]; 3298 u8 reserved_at_10[0x10];
3299 3299
3300 u8 reserved_1[0x10]; 3300 u8 reserved_at_20[0x10];
3301 u8 op_mod[0x10]; 3301 u8 op_mod[0x10];
3302 3302
3303 u8 reserved_2[0x8]; 3303 u8 reserved_at_40[0x8];
3304 u8 rqn[0x18]; 3304 u8 rqn[0x18];
3305 3305
3306 u8 reserved_3[0x20]; 3306 u8 reserved_at_60[0x20];
3307}; 3307};
3308 3308
3309struct mlx5_ifc_query_roce_address_out_bits { 3309struct mlx5_ifc_query_roce_address_out_bits {
3310 u8 status[0x8]; 3310 u8 status[0x8];
3311 u8 reserved_0[0x18]; 3311 u8 reserved_at_8[0x18];
3312 3312
3313 u8 syndrome[0x20]; 3313 u8 syndrome[0x20];
3314 3314
3315 u8 reserved_1[0x40]; 3315 u8 reserved_at_40[0x40];
3316 3316
3317 struct mlx5_ifc_roce_addr_layout_bits roce_address; 3317 struct mlx5_ifc_roce_addr_layout_bits roce_address;
3318}; 3318};
3319 3319
3320struct mlx5_ifc_query_roce_address_in_bits { 3320struct mlx5_ifc_query_roce_address_in_bits {
3321 u8 opcode[0x10]; 3321 u8 opcode[0x10];
3322 u8 reserved_0[0x10]; 3322 u8 reserved_at_10[0x10];
3323 3323
3324 u8 reserved_1[0x10]; 3324 u8 reserved_at_20[0x10];
3325 u8 op_mod[0x10]; 3325 u8 op_mod[0x10];
3326 3326
3327 u8 roce_address_index[0x10]; 3327 u8 roce_address_index[0x10];
3328 u8 reserved_2[0x10]; 3328 u8 reserved_at_50[0x10];
3329 3329
3330 u8 reserved_3[0x20]; 3330 u8 reserved_at_60[0x20];
3331}; 3331};
3332 3332
3333struct mlx5_ifc_query_rmp_out_bits { 3333struct mlx5_ifc_query_rmp_out_bits {
3334 u8 status[0x8]; 3334 u8 status[0x8];
3335 u8 reserved_0[0x18]; 3335 u8 reserved_at_8[0x18];
3336 3336
3337 u8 syndrome[0x20]; 3337 u8 syndrome[0x20];
3338 3338
3339 u8 reserved_1[0xc0]; 3339 u8 reserved_at_40[0xc0];
3340 3340
3341 struct mlx5_ifc_rmpc_bits rmp_context; 3341 struct mlx5_ifc_rmpc_bits rmp_context;
3342}; 3342};
3343 3343
3344struct mlx5_ifc_query_rmp_in_bits { 3344struct mlx5_ifc_query_rmp_in_bits {
3345 u8 opcode[0x10]; 3345 u8 opcode[0x10];
3346 u8 reserved_0[0x10]; 3346 u8 reserved_at_10[0x10];
3347 3347
3348 u8 reserved_1[0x10]; 3348 u8 reserved_at_20[0x10];
3349 u8 op_mod[0x10]; 3349 u8 op_mod[0x10];
3350 3350
3351 u8 reserved_2[0x8]; 3351 u8 reserved_at_40[0x8];
3352 u8 rmpn[0x18]; 3352 u8 rmpn[0x18];
3353 3353
3354 u8 reserved_3[0x20]; 3354 u8 reserved_at_60[0x20];
3355}; 3355};
3356 3356
3357struct mlx5_ifc_query_qp_out_bits { 3357struct mlx5_ifc_query_qp_out_bits {
3358 u8 status[0x8]; 3358 u8 status[0x8];
3359 u8 reserved_0[0x18]; 3359 u8 reserved_at_8[0x18];
3360 3360
3361 u8 syndrome[0x20]; 3361 u8 syndrome[0x20];
3362 3362
3363 u8 reserved_1[0x40]; 3363 u8 reserved_at_40[0x40];
3364 3364
3365 u8 opt_param_mask[0x20]; 3365 u8 opt_param_mask[0x20];
3366 3366
3367 u8 reserved_2[0x20]; 3367 u8 reserved_at_a0[0x20];
3368 3368
3369 struct mlx5_ifc_qpc_bits qpc; 3369 struct mlx5_ifc_qpc_bits qpc;
3370 3370
3371 u8 reserved_3[0x80]; 3371 u8 reserved_at_800[0x80];
3372 3372
3373 u8 pas[0][0x40]; 3373 u8 pas[0][0x40];
3374}; 3374};
3375 3375
3376struct mlx5_ifc_query_qp_in_bits { 3376struct mlx5_ifc_query_qp_in_bits {
3377 u8 opcode[0x10]; 3377 u8 opcode[0x10];
3378 u8 reserved_0[0x10]; 3378 u8 reserved_at_10[0x10];
3379 3379
3380 u8 reserved_1[0x10]; 3380 u8 reserved_at_20[0x10];
3381 u8 op_mod[0x10]; 3381 u8 op_mod[0x10];
3382 3382
3383 u8 reserved_2[0x8]; 3383 u8 reserved_at_40[0x8];
3384 u8 qpn[0x18]; 3384 u8 qpn[0x18];
3385 3385
3386 u8 reserved_3[0x20]; 3386 u8 reserved_at_60[0x20];
3387}; 3387};
3388 3388
3389struct mlx5_ifc_query_q_counter_out_bits { 3389struct mlx5_ifc_query_q_counter_out_bits {
3390 u8 status[0x8]; 3390 u8 status[0x8];
3391 u8 reserved_0[0x18]; 3391 u8 reserved_at_8[0x18];
3392 3392
3393 u8 syndrome[0x20]; 3393 u8 syndrome[0x20];
3394 3394
3395 u8 reserved_1[0x40]; 3395 u8 reserved_at_40[0x40];
3396 3396
3397 u8 rx_write_requests[0x20]; 3397 u8 rx_write_requests[0x20];
3398 3398
3399 u8 reserved_2[0x20]; 3399 u8 reserved_at_a0[0x20];
3400 3400
3401 u8 rx_read_requests[0x20]; 3401 u8 rx_read_requests[0x20];
3402 3402
3403 u8 reserved_3[0x20]; 3403 u8 reserved_at_e0[0x20];
3404 3404
3405 u8 rx_atomic_requests[0x20]; 3405 u8 rx_atomic_requests[0x20];
3406 3406
3407 u8 reserved_4[0x20]; 3407 u8 reserved_at_120[0x20];
3408 3408
3409 u8 rx_dct_connect[0x20]; 3409 u8 rx_dct_connect[0x20];
3410 3410
3411 u8 reserved_5[0x20]; 3411 u8 reserved_at_160[0x20];
3412 3412
3413 u8 out_of_buffer[0x20]; 3413 u8 out_of_buffer[0x20];
3414 3414
3415 u8 reserved_6[0x20]; 3415 u8 reserved_at_1a0[0x20];
3416 3416
3417 u8 out_of_sequence[0x20]; 3417 u8 out_of_sequence[0x20];
3418 3418
3419 u8 reserved_7[0x620]; 3419 u8 reserved_at_1e0[0x620];
3420}; 3420};
3421 3421
3422struct mlx5_ifc_query_q_counter_in_bits { 3422struct mlx5_ifc_query_q_counter_in_bits {
3423 u8 opcode[0x10]; 3423 u8 opcode[0x10];
3424 u8 reserved_0[0x10]; 3424 u8 reserved_at_10[0x10];
3425 3425
3426 u8 reserved_1[0x10]; 3426 u8 reserved_at_20[0x10];
3427 u8 op_mod[0x10]; 3427 u8 op_mod[0x10];
3428 3428
3429 u8 reserved_2[0x80]; 3429 u8 reserved_at_40[0x80];
3430 3430
3431 u8 clear[0x1]; 3431 u8 clear[0x1];
3432 u8 reserved_3[0x1f]; 3432 u8 reserved_at_c1[0x1f];
3433 3433
3434 u8 reserved_4[0x18]; 3434 u8 reserved_at_e0[0x18];
3435 u8 counter_set_id[0x8]; 3435 u8 counter_set_id[0x8];
3436}; 3436};
3437 3437
3438struct mlx5_ifc_query_pages_out_bits { 3438struct mlx5_ifc_query_pages_out_bits {
3439 u8 status[0x8]; 3439 u8 status[0x8];
3440 u8 reserved_0[0x18]; 3440 u8 reserved_at_8[0x18];
3441 3441
3442 u8 syndrome[0x20]; 3442 u8 syndrome[0x20];
3443 3443
3444 u8 reserved_1[0x10]; 3444 u8 reserved_at_40[0x10];
3445 u8 function_id[0x10]; 3445 u8 function_id[0x10];
3446 3446
3447 u8 num_pages[0x20]; 3447 u8 num_pages[0x20];
@@ -3455,55 +3455,55 @@ enum {
3455 3455
3456struct mlx5_ifc_query_pages_in_bits { 3456struct mlx5_ifc_query_pages_in_bits {
3457 u8 opcode[0x10]; 3457 u8 opcode[0x10];
3458 u8 reserved_0[0x10]; 3458 u8 reserved_at_10[0x10];
3459 3459
3460 u8 reserved_1[0x10]; 3460 u8 reserved_at_20[0x10];
3461 u8 op_mod[0x10]; 3461 u8 op_mod[0x10];
3462 3462
3463 u8 reserved_2[0x10]; 3463 u8 reserved_at_40[0x10];
3464 u8 function_id[0x10]; 3464 u8 function_id[0x10];
3465 3465
3466 u8 reserved_3[0x20]; 3466 u8 reserved_at_60[0x20];
3467}; 3467};
3468 3468
3469struct mlx5_ifc_query_nic_vport_context_out_bits { 3469struct mlx5_ifc_query_nic_vport_context_out_bits {
3470 u8 status[0x8]; 3470 u8 status[0x8];
3471 u8 reserved_0[0x18]; 3471 u8 reserved_at_8[0x18];
3472 3472
3473 u8 syndrome[0x20]; 3473 u8 syndrome[0x20];
3474 3474
3475 u8 reserved_1[0x40]; 3475 u8 reserved_at_40[0x40];
3476 3476
3477 struct mlx5_ifc_nic_vport_context_bits nic_vport_context; 3477 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
3478}; 3478};
3479 3479
3480struct mlx5_ifc_query_nic_vport_context_in_bits { 3480struct mlx5_ifc_query_nic_vport_context_in_bits {
3481 u8 opcode[0x10]; 3481 u8 opcode[0x10];
3482 u8 reserved_0[0x10]; 3482 u8 reserved_at_10[0x10];
3483 3483
3484 u8 reserved_1[0x10]; 3484 u8 reserved_at_20[0x10];
3485 u8 op_mod[0x10]; 3485 u8 op_mod[0x10];
3486 3486
3487 u8 other_vport[0x1]; 3487 u8 other_vport[0x1];
3488 u8 reserved_2[0xf]; 3488 u8 reserved_at_41[0xf];
3489 u8 vport_number[0x10]; 3489 u8 vport_number[0x10];
3490 3490
3491 u8 reserved_3[0x5]; 3491 u8 reserved_at_60[0x5];
3492 u8 allowed_list_type[0x3]; 3492 u8 allowed_list_type[0x3];
3493 u8 reserved_4[0x18]; 3493 u8 reserved_at_68[0x18];
3494}; 3494};
3495 3495
3496struct mlx5_ifc_query_mkey_out_bits { 3496struct mlx5_ifc_query_mkey_out_bits {
3497 u8 status[0x8]; 3497 u8 status[0x8];
3498 u8 reserved_0[0x18]; 3498 u8 reserved_at_8[0x18];
3499 3499
3500 u8 syndrome[0x20]; 3500 u8 syndrome[0x20];
3501 3501
3502 u8 reserved_1[0x40]; 3502 u8 reserved_at_40[0x40];
3503 3503
3504 struct mlx5_ifc_mkc_bits memory_key_mkey_entry; 3504 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
3505 3505
3506 u8 reserved_2[0x600]; 3506 u8 reserved_at_280[0x600];
3507 3507
3508 u8 bsf0_klm0_pas_mtt0_1[16][0x8]; 3508 u8 bsf0_klm0_pas_mtt0_1[16][0x8];
3509 3509
@@ -3512,265 +3512,265 @@ struct mlx5_ifc_query_mkey_out_bits {
3512 3512
3513struct mlx5_ifc_query_mkey_in_bits { 3513struct mlx5_ifc_query_mkey_in_bits {
3514 u8 opcode[0x10]; 3514 u8 opcode[0x10];
3515 u8 reserved_0[0x10]; 3515 u8 reserved_at_10[0x10];
3516 3516
3517 u8 reserved_1[0x10]; 3517 u8 reserved_at_20[0x10];
3518 u8 op_mod[0x10]; 3518 u8 op_mod[0x10];
3519 3519
3520 u8 reserved_2[0x8]; 3520 u8 reserved_at_40[0x8];
3521 u8 mkey_index[0x18]; 3521 u8 mkey_index[0x18];
3522 3522
3523 u8 pg_access[0x1]; 3523 u8 pg_access[0x1];
3524 u8 reserved_3[0x1f]; 3524 u8 reserved_at_61[0x1f];
3525}; 3525};
3526 3526
3527struct mlx5_ifc_query_mad_demux_out_bits { 3527struct mlx5_ifc_query_mad_demux_out_bits {
3528 u8 status[0x8]; 3528 u8 status[0x8];
3529 u8 reserved_0[0x18]; 3529 u8 reserved_at_8[0x18];
3530 3530
3531 u8 syndrome[0x20]; 3531 u8 syndrome[0x20];
3532 3532
3533 u8 reserved_1[0x40]; 3533 u8 reserved_at_40[0x40];
3534 3534
3535 u8 mad_dumux_parameters_block[0x20]; 3535 u8 mad_dumux_parameters_block[0x20];
3536}; 3536};
3537 3537
3538struct mlx5_ifc_query_mad_demux_in_bits { 3538struct mlx5_ifc_query_mad_demux_in_bits {
3539 u8 opcode[0x10]; 3539 u8 opcode[0x10];
3540 u8 reserved_0[0x10]; 3540 u8 reserved_at_10[0x10];
3541 3541
3542 u8 reserved_1[0x10]; 3542 u8 reserved_at_20[0x10];
3543 u8 op_mod[0x10]; 3543 u8 op_mod[0x10];
3544 3544
3545 u8 reserved_2[0x40]; 3545 u8 reserved_at_40[0x40];
3546}; 3546};
3547 3547
3548struct mlx5_ifc_query_l2_table_entry_out_bits { 3548struct mlx5_ifc_query_l2_table_entry_out_bits {
3549 u8 status[0x8]; 3549 u8 status[0x8];
3550 u8 reserved_0[0x18]; 3550 u8 reserved_at_8[0x18];
3551 3551
3552 u8 syndrome[0x20]; 3552 u8 syndrome[0x20];
3553 3553
3554 u8 reserved_1[0xa0]; 3554 u8 reserved_at_40[0xa0];
3555 3555
3556 u8 reserved_2[0x13]; 3556 u8 reserved_at_e0[0x13];
3557 u8 vlan_valid[0x1]; 3557 u8 vlan_valid[0x1];
3558 u8 vlan[0xc]; 3558 u8 vlan[0xc];
3559 3559
3560 struct mlx5_ifc_mac_address_layout_bits mac_address; 3560 struct mlx5_ifc_mac_address_layout_bits mac_address;
3561 3561
3562 u8 reserved_3[0xc0]; 3562 u8 reserved_at_140[0xc0];
3563}; 3563};
3564 3564
3565struct mlx5_ifc_query_l2_table_entry_in_bits { 3565struct mlx5_ifc_query_l2_table_entry_in_bits {
3566 u8 opcode[0x10]; 3566 u8 opcode[0x10];
3567 u8 reserved_0[0x10]; 3567 u8 reserved_at_10[0x10];
3568 3568
3569 u8 reserved_1[0x10]; 3569 u8 reserved_at_20[0x10];
3570 u8 op_mod[0x10]; 3570 u8 op_mod[0x10];
3571 3571
3572 u8 reserved_2[0x60]; 3572 u8 reserved_at_40[0x60];
3573 3573
3574 u8 reserved_3[0x8]; 3574 u8 reserved_at_a0[0x8];
3575 u8 table_index[0x18]; 3575 u8 table_index[0x18];
3576 3576
3577 u8 reserved_4[0x140]; 3577 u8 reserved_at_c0[0x140];
3578}; 3578};
3579 3579
3580struct mlx5_ifc_query_issi_out_bits { 3580struct mlx5_ifc_query_issi_out_bits {
3581 u8 status[0x8]; 3581 u8 status[0x8];
3582 u8 reserved_0[0x18]; 3582 u8 reserved_at_8[0x18];
3583 3583
3584 u8 syndrome[0x20]; 3584 u8 syndrome[0x20];
3585 3585
3586 u8 reserved_1[0x10]; 3586 u8 reserved_at_40[0x10];
3587 u8 current_issi[0x10]; 3587 u8 current_issi[0x10];
3588 3588
3589 u8 reserved_2[0xa0]; 3589 u8 reserved_at_60[0xa0];
3590 3590
3591 u8 supported_issi_reserved[76][0x8]; 3591 u8 reserved_at_100[76][0x8];
3592 u8 supported_issi_dw0[0x20]; 3592 u8 supported_issi_dw0[0x20];
3593}; 3593};
3594 3594
3595struct mlx5_ifc_query_issi_in_bits { 3595struct mlx5_ifc_query_issi_in_bits {
3596 u8 opcode[0x10]; 3596 u8 opcode[0x10];
3597 u8 reserved_0[0x10]; 3597 u8 reserved_at_10[0x10];
3598 3598
3599 u8 reserved_1[0x10]; 3599 u8 reserved_at_20[0x10];
3600 u8 op_mod[0x10]; 3600 u8 op_mod[0x10];
3601 3601
3602 u8 reserved_2[0x40]; 3602 u8 reserved_at_40[0x40];
3603}; 3603};
3604 3604
3605struct mlx5_ifc_query_hca_vport_pkey_out_bits { 3605struct mlx5_ifc_query_hca_vport_pkey_out_bits {
3606 u8 status[0x8]; 3606 u8 status[0x8];
3607 u8 reserved_0[0x18]; 3607 u8 reserved_at_8[0x18];
3608 3608
3609 u8 syndrome[0x20]; 3609 u8 syndrome[0x20];
3610 3610
3611 u8 reserved_1[0x40]; 3611 u8 reserved_at_40[0x40];
3612 3612
3613 struct mlx5_ifc_pkey_bits pkey[0]; 3613 struct mlx5_ifc_pkey_bits pkey[0];
3614}; 3614};
3615 3615
3616struct mlx5_ifc_query_hca_vport_pkey_in_bits { 3616struct mlx5_ifc_query_hca_vport_pkey_in_bits {
3617 u8 opcode[0x10]; 3617 u8 opcode[0x10];
3618 u8 reserved_0[0x10]; 3618 u8 reserved_at_10[0x10];
3619 3619
3620 u8 reserved_1[0x10]; 3620 u8 reserved_at_20[0x10];
3621 u8 op_mod[0x10]; 3621 u8 op_mod[0x10];
3622 3622
3623 u8 other_vport[0x1]; 3623 u8 other_vport[0x1];
3624 u8 reserved_2[0xb]; 3624 u8 reserved_at_41[0xb];
3625 u8 port_num[0x4]; 3625 u8 port_num[0x4];
3626 u8 vport_number[0x10]; 3626 u8 vport_number[0x10];
3627 3627
3628 u8 reserved_3[0x10]; 3628 u8 reserved_at_60[0x10];
3629 u8 pkey_index[0x10]; 3629 u8 pkey_index[0x10];
3630}; 3630};
3631 3631
3632struct mlx5_ifc_query_hca_vport_gid_out_bits { 3632struct mlx5_ifc_query_hca_vport_gid_out_bits {
3633 u8 status[0x8]; 3633 u8 status[0x8];
3634 u8 reserved_0[0x18]; 3634 u8 reserved_at_8[0x18];
3635 3635
3636 u8 syndrome[0x20]; 3636 u8 syndrome[0x20];
3637 3637
3638 u8 reserved_1[0x20]; 3638 u8 reserved_at_40[0x20];
3639 3639
3640 u8 gids_num[0x10]; 3640 u8 gids_num[0x10];
3641 u8 reserved_2[0x10]; 3641 u8 reserved_at_70[0x10];
3642 3642
3643 struct mlx5_ifc_array128_auto_bits gid[0]; 3643 struct mlx5_ifc_array128_auto_bits gid[0];
3644}; 3644};
3645 3645
3646struct mlx5_ifc_query_hca_vport_gid_in_bits { 3646struct mlx5_ifc_query_hca_vport_gid_in_bits {
3647 u8 opcode[0x10]; 3647 u8 opcode[0x10];
3648 u8 reserved_0[0x10]; 3648 u8 reserved_at_10[0x10];
3649 3649
3650 u8 reserved_1[0x10]; 3650 u8 reserved_at_20[0x10];
3651 u8 op_mod[0x10]; 3651 u8 op_mod[0x10];
3652 3652
3653 u8 other_vport[0x1]; 3653 u8 other_vport[0x1];
3654 u8 reserved_2[0xb]; 3654 u8 reserved_at_41[0xb];
3655 u8 port_num[0x4]; 3655 u8 port_num[0x4];
3656 u8 vport_number[0x10]; 3656 u8 vport_number[0x10];
3657 3657
3658 u8 reserved_3[0x10]; 3658 u8 reserved_at_60[0x10];
3659 u8 gid_index[0x10]; 3659 u8 gid_index[0x10];
3660}; 3660};
3661 3661
3662struct mlx5_ifc_query_hca_vport_context_out_bits { 3662struct mlx5_ifc_query_hca_vport_context_out_bits {
3663 u8 status[0x8]; 3663 u8 status[0x8];
3664 u8 reserved_0[0x18]; 3664 u8 reserved_at_8[0x18];
3665 3665
3666 u8 syndrome[0x20]; 3666 u8 syndrome[0x20];
3667 3667
3668 u8 reserved_1[0x40]; 3668 u8 reserved_at_40[0x40];
3669 3669
3670 struct mlx5_ifc_hca_vport_context_bits hca_vport_context; 3670 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
3671}; 3671};
3672 3672
3673struct mlx5_ifc_query_hca_vport_context_in_bits { 3673struct mlx5_ifc_query_hca_vport_context_in_bits {
3674 u8 opcode[0x10]; 3674 u8 opcode[0x10];
3675 u8 reserved_0[0x10]; 3675 u8 reserved_at_10[0x10];
3676 3676
3677 u8 reserved_1[0x10]; 3677 u8 reserved_at_20[0x10];
3678 u8 op_mod[0x10]; 3678 u8 op_mod[0x10];
3679 3679
3680 u8 other_vport[0x1]; 3680 u8 other_vport[0x1];
3681 u8 reserved_2[0xb]; 3681 u8 reserved_at_41[0xb];
3682 u8 port_num[0x4]; 3682 u8 port_num[0x4];
3683 u8 vport_number[0x10]; 3683 u8 vport_number[0x10];
3684 3684
3685 u8 reserved_3[0x20]; 3685 u8 reserved_at_60[0x20];
3686}; 3686};
3687 3687
3688struct mlx5_ifc_query_hca_cap_out_bits { 3688struct mlx5_ifc_query_hca_cap_out_bits {
3689 u8 status[0x8]; 3689 u8 status[0x8];
3690 u8 reserved_0[0x18]; 3690 u8 reserved_at_8[0x18];
3691 3691
3692 u8 syndrome[0x20]; 3692 u8 syndrome[0x20];
3693 3693
3694 u8 reserved_1[0x40]; 3694 u8 reserved_at_40[0x40];
3695 3695
3696 union mlx5_ifc_hca_cap_union_bits capability; 3696 union mlx5_ifc_hca_cap_union_bits capability;
3697}; 3697};
3698 3698
3699struct mlx5_ifc_query_hca_cap_in_bits { 3699struct mlx5_ifc_query_hca_cap_in_bits {
3700 u8 opcode[0x10]; 3700 u8 opcode[0x10];
3701 u8 reserved_0[0x10]; 3701 u8 reserved_at_10[0x10];
3702 3702
3703 u8 reserved_1[0x10]; 3703 u8 reserved_at_20[0x10];
3704 u8 op_mod[0x10]; 3704 u8 op_mod[0x10];
3705 3705
3706 u8 reserved_2[0x40]; 3706 u8 reserved_at_40[0x40];
3707}; 3707};
3708 3708
3709struct mlx5_ifc_query_flow_table_out_bits { 3709struct mlx5_ifc_query_flow_table_out_bits {
3710 u8 status[0x8]; 3710 u8 status[0x8];
3711 u8 reserved_0[0x18]; 3711 u8 reserved_at_8[0x18];
3712 3712
3713 u8 syndrome[0x20]; 3713 u8 syndrome[0x20];
3714 3714
3715 u8 reserved_1[0x80]; 3715 u8 reserved_at_40[0x80];
3716 3716
3717 u8 reserved_2[0x8]; 3717 u8 reserved_at_c0[0x8];
3718 u8 level[0x8]; 3718 u8 level[0x8];
3719 u8 reserved_3[0x8]; 3719 u8 reserved_at_d0[0x8];
3720 u8 log_size[0x8]; 3720 u8 log_size[0x8];
3721 3721
3722 u8 reserved_4[0x120]; 3722 u8 reserved_at_e0[0x120];
3723}; 3723};
3724 3724
3725struct mlx5_ifc_query_flow_table_in_bits { 3725struct mlx5_ifc_query_flow_table_in_bits {
3726 u8 opcode[0x10]; 3726 u8 opcode[0x10];
3727 u8 reserved_0[0x10]; 3727 u8 reserved_at_10[0x10];
3728 3728
3729 u8 reserved_1[0x10]; 3729 u8 reserved_at_20[0x10];
3730 u8 op_mod[0x10]; 3730 u8 op_mod[0x10];
3731 3731
3732 u8 reserved_2[0x40]; 3732 u8 reserved_at_40[0x40];
3733 3733
3734 u8 table_type[0x8]; 3734 u8 table_type[0x8];
3735 u8 reserved_3[0x18]; 3735 u8 reserved_at_88[0x18];
3736 3736
3737 u8 reserved_4[0x8]; 3737 u8 reserved_at_a0[0x8];
3738 u8 table_id[0x18]; 3738 u8 table_id[0x18];
3739 3739
3740 u8 reserved_5[0x140]; 3740 u8 reserved_at_c0[0x140];
3741}; 3741};
3742 3742
3743struct mlx5_ifc_query_fte_out_bits { 3743struct mlx5_ifc_query_fte_out_bits {
3744 u8 status[0x8]; 3744 u8 status[0x8];
3745 u8 reserved_0[0x18]; 3745 u8 reserved_at_8[0x18];
3746 3746
3747 u8 syndrome[0x20]; 3747 u8 syndrome[0x20];
3748 3748
3749 u8 reserved_1[0x1c0]; 3749 u8 reserved_at_40[0x1c0];
3750 3750
3751 struct mlx5_ifc_flow_context_bits flow_context; 3751 struct mlx5_ifc_flow_context_bits flow_context;
3752}; 3752};
3753 3753
3754struct mlx5_ifc_query_fte_in_bits { 3754struct mlx5_ifc_query_fte_in_bits {
3755 u8 opcode[0x10]; 3755 u8 opcode[0x10];
3756 u8 reserved_0[0x10]; 3756 u8 reserved_at_10[0x10];
3757 3757
3758 u8 reserved_1[0x10]; 3758 u8 reserved_at_20[0x10];
3759 u8 op_mod[0x10]; 3759 u8 op_mod[0x10];
3760 3760
3761 u8 reserved_2[0x40]; 3761 u8 reserved_at_40[0x40];
3762 3762
3763 u8 table_type[0x8]; 3763 u8 table_type[0x8];
3764 u8 reserved_3[0x18]; 3764 u8 reserved_at_88[0x18];
3765 3765
3766 u8 reserved_4[0x8]; 3766 u8 reserved_at_a0[0x8];
3767 u8 table_id[0x18]; 3767 u8 table_id[0x18];
3768 3768
3769 u8 reserved_5[0x40]; 3769 u8 reserved_at_c0[0x40];
3770 3770
3771 u8 flow_index[0x20]; 3771 u8 flow_index[0x20];
3772 3772
3773 u8 reserved_6[0xe0]; 3773 u8 reserved_at_120[0xe0];
3774}; 3774};
3775 3775
3776enum { 3776enum {
@@ -3781,84 +3781,84 @@ enum {
3781 3781
3782struct mlx5_ifc_query_flow_group_out_bits { 3782struct mlx5_ifc_query_flow_group_out_bits {
3783 u8 status[0x8]; 3783 u8 status[0x8];
3784 u8 reserved_0[0x18]; 3784 u8 reserved_at_8[0x18];
3785 3785
3786 u8 syndrome[0x20]; 3786 u8 syndrome[0x20];
3787 3787
3788 u8 reserved_1[0xa0]; 3788 u8 reserved_at_40[0xa0];
3789 3789
3790 u8 start_flow_index[0x20]; 3790 u8 start_flow_index[0x20];
3791 3791
3792 u8 reserved_2[0x20]; 3792 u8 reserved_at_100[0x20];
3793 3793
3794 u8 end_flow_index[0x20]; 3794 u8 end_flow_index[0x20];
3795 3795
3796 u8 reserved_3[0xa0]; 3796 u8 reserved_at_140[0xa0];
3797 3797
3798 u8 reserved_4[0x18]; 3798 u8 reserved_at_1e0[0x18];
3799 u8 match_criteria_enable[0x8]; 3799 u8 match_criteria_enable[0x8];
3800 3800
3801 struct mlx5_ifc_fte_match_param_bits match_criteria; 3801 struct mlx5_ifc_fte_match_param_bits match_criteria;
3802 3802
3803 u8 reserved_5[0xe00]; 3803 u8 reserved_at_1200[0xe00];
3804}; 3804};
3805 3805
3806struct mlx5_ifc_query_flow_group_in_bits { 3806struct mlx5_ifc_query_flow_group_in_bits {
3807 u8 opcode[0x10]; 3807 u8 opcode[0x10];
3808 u8 reserved_0[0x10]; 3808 u8 reserved_at_10[0x10];
3809 3809
3810 u8 reserved_1[0x10]; 3810 u8 reserved_at_20[0x10];
3811 u8 op_mod[0x10]; 3811 u8 op_mod[0x10];
3812 3812
3813 u8 reserved_2[0x40]; 3813 u8 reserved_at_40[0x40];
3814 3814
3815 u8 table_type[0x8]; 3815 u8 table_type[0x8];
3816 u8 reserved_3[0x18]; 3816 u8 reserved_at_88[0x18];
3817 3817
3818 u8 reserved_4[0x8]; 3818 u8 reserved_at_a0[0x8];
3819 u8 table_id[0x18]; 3819 u8 table_id[0x18];
3820 3820
3821 u8 group_id[0x20]; 3821 u8 group_id[0x20];
3822 3822
3823 u8 reserved_5[0x120]; 3823 u8 reserved_at_e0[0x120];
3824}; 3824};
3825 3825
3826struct mlx5_ifc_query_esw_vport_context_out_bits { 3826struct mlx5_ifc_query_esw_vport_context_out_bits {
3827 u8 status[0x8]; 3827 u8 status[0x8];
3828 u8 reserved_0[0x18]; 3828 u8 reserved_at_8[0x18];
3829 3829
3830 u8 syndrome[0x20]; 3830 u8 syndrome[0x20];
3831 3831
3832 u8 reserved_1[0x40]; 3832 u8 reserved_at_40[0x40];
3833 3833
3834 struct mlx5_ifc_esw_vport_context_bits esw_vport_context; 3834 struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
3835}; 3835};
3836 3836
3837struct mlx5_ifc_query_esw_vport_context_in_bits { 3837struct mlx5_ifc_query_esw_vport_context_in_bits {
3838 u8 opcode[0x10]; 3838 u8 opcode[0x10];
3839 u8 reserved_0[0x10]; 3839 u8 reserved_at_10[0x10];
3840 3840
3841 u8 reserved_1[0x10]; 3841 u8 reserved_at_20[0x10];
3842 u8 op_mod[0x10]; 3842 u8 op_mod[0x10];
3843 3843
3844 u8 other_vport[0x1]; 3844 u8 other_vport[0x1];
3845 u8 reserved_2[0xf]; 3845 u8 reserved_at_41[0xf];
3846 u8 vport_number[0x10]; 3846 u8 vport_number[0x10];
3847 3847
3848 u8 reserved_3[0x20]; 3848 u8 reserved_at_60[0x20];
3849}; 3849};
3850 3850
3851struct mlx5_ifc_modify_esw_vport_context_out_bits { 3851struct mlx5_ifc_modify_esw_vport_context_out_bits {
3852 u8 status[0x8]; 3852 u8 status[0x8];
3853 u8 reserved_0[0x18]; 3853 u8 reserved_at_8[0x18];
3854 3854
3855 u8 syndrome[0x20]; 3855 u8 syndrome[0x20];
3856 3856
3857 u8 reserved_1[0x40]; 3857 u8 reserved_at_40[0x40];
3858}; 3858};
3859 3859
3860struct mlx5_ifc_esw_vport_context_fields_select_bits { 3860struct mlx5_ifc_esw_vport_context_fields_select_bits {
3861 u8 reserved[0x1c]; 3861 u8 reserved_at_0[0x1c];
3862 u8 vport_cvlan_insert[0x1]; 3862 u8 vport_cvlan_insert[0x1];
3863 u8 vport_svlan_insert[0x1]; 3863 u8 vport_svlan_insert[0x1];
3864 u8 vport_cvlan_strip[0x1]; 3864 u8 vport_cvlan_strip[0x1];
@@ -3867,13 +3867,13 @@ struct mlx5_ifc_esw_vport_context_fields_select_bits {
3867 3867
3868struct mlx5_ifc_modify_esw_vport_context_in_bits { 3868struct mlx5_ifc_modify_esw_vport_context_in_bits {
3869 u8 opcode[0x10]; 3869 u8 opcode[0x10];
3870 u8 reserved_0[0x10]; 3870 u8 reserved_at_10[0x10];
3871 3871
3872 u8 reserved_1[0x10]; 3872 u8 reserved_at_20[0x10];
3873 u8 op_mod[0x10]; 3873 u8 op_mod[0x10];
3874 3874
3875 u8 other_vport[0x1]; 3875 u8 other_vport[0x1];
3876 u8 reserved_2[0xf]; 3876 u8 reserved_at_41[0xf];
3877 u8 vport_number[0x10]; 3877 u8 vport_number[0x10];
3878 3878
3879 struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; 3879 struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
@@ -3883,124 +3883,124 @@ struct mlx5_ifc_modify_esw_vport_context_in_bits {
3883 3883
3884struct mlx5_ifc_query_eq_out_bits { 3884struct mlx5_ifc_query_eq_out_bits {
3885 u8 status[0x8]; 3885 u8 status[0x8];
3886 u8 reserved_0[0x18]; 3886 u8 reserved_at_8[0x18];
3887 3887
3888 u8 syndrome[0x20]; 3888 u8 syndrome[0x20];
3889 3889
3890 u8 reserved_1[0x40]; 3890 u8 reserved_at_40[0x40];
3891 3891
3892 struct mlx5_ifc_eqc_bits eq_context_entry; 3892 struct mlx5_ifc_eqc_bits eq_context_entry;
3893 3893
3894 u8 reserved_2[0x40]; 3894 u8 reserved_at_280[0x40];
3895 3895
3896 u8 event_bitmask[0x40]; 3896 u8 event_bitmask[0x40];
3897 3897
3898 u8 reserved_3[0x580]; 3898 u8 reserved_at_300[0x580];
3899 3899
3900 u8 pas[0][0x40]; 3900 u8 pas[0][0x40];
3901}; 3901};
3902 3902
3903struct mlx5_ifc_query_eq_in_bits { 3903struct mlx5_ifc_query_eq_in_bits {
3904 u8 opcode[0x10]; 3904 u8 opcode[0x10];
3905 u8 reserved_0[0x10]; 3905 u8 reserved_at_10[0x10];
3906 3906
3907 u8 reserved_1[0x10]; 3907 u8 reserved_at_20[0x10];
3908 u8 op_mod[0x10]; 3908 u8 op_mod[0x10];
3909 3909
3910 u8 reserved_2[0x18]; 3910 u8 reserved_at_40[0x18];
3911 u8 eq_number[0x8]; 3911 u8 eq_number[0x8];
3912 3912
3913 u8 reserved_3[0x20]; 3913 u8 reserved_at_60[0x20];
3914}; 3914};
3915 3915
3916struct mlx5_ifc_query_dct_out_bits { 3916struct mlx5_ifc_query_dct_out_bits {
3917 u8 status[0x8]; 3917 u8 status[0x8];
3918 u8 reserved_0[0x18]; 3918 u8 reserved_at_8[0x18];
3919 3919
3920 u8 syndrome[0x20]; 3920 u8 syndrome[0x20];
3921 3921
3922 u8 reserved_1[0x40]; 3922 u8 reserved_at_40[0x40];
3923 3923
3924 struct mlx5_ifc_dctc_bits dct_context_entry; 3924 struct mlx5_ifc_dctc_bits dct_context_entry;
3925 3925
3926 u8 reserved_2[0x180]; 3926 u8 reserved_at_280[0x180];
3927}; 3927};
3928 3928
3929struct mlx5_ifc_query_dct_in_bits { 3929struct mlx5_ifc_query_dct_in_bits {
3930 u8 opcode[0x10]; 3930 u8 opcode[0x10];
3931 u8 reserved_0[0x10]; 3931 u8 reserved_at_10[0x10];
3932 3932
3933 u8 reserved_1[0x10]; 3933 u8 reserved_at_20[0x10];
3934 u8 op_mod[0x10]; 3934 u8 op_mod[0x10];
3935 3935
3936 u8 reserved_2[0x8]; 3936 u8 reserved_at_40[0x8];
3937 u8 dctn[0x18]; 3937 u8 dctn[0x18];
3938 3938
3939 u8 reserved_3[0x20]; 3939 u8 reserved_at_60[0x20];
3940}; 3940};
3941 3941
3942struct mlx5_ifc_query_cq_out_bits { 3942struct mlx5_ifc_query_cq_out_bits {
3943 u8 status[0x8]; 3943 u8 status[0x8];
3944 u8 reserved_0[0x18]; 3944 u8 reserved_at_8[0x18];
3945 3945
3946 u8 syndrome[0x20]; 3946 u8 syndrome[0x20];
3947 3947
3948 u8 reserved_1[0x40]; 3948 u8 reserved_at_40[0x40];
3949 3949
3950 struct mlx5_ifc_cqc_bits cq_context; 3950 struct mlx5_ifc_cqc_bits cq_context;
3951 3951
3952 u8 reserved_2[0x600]; 3952 u8 reserved_at_280[0x600];
3953 3953
3954 u8 pas[0][0x40]; 3954 u8 pas[0][0x40];
3955}; 3955};
3956 3956
3957struct mlx5_ifc_query_cq_in_bits { 3957struct mlx5_ifc_query_cq_in_bits {
3958 u8 opcode[0x10]; 3958 u8 opcode[0x10];
3959 u8 reserved_0[0x10]; 3959 u8 reserved_at_10[0x10];
3960 3960
3961 u8 reserved_1[0x10]; 3961 u8 reserved_at_20[0x10];
3962 u8 op_mod[0x10]; 3962 u8 op_mod[0x10];
3963 3963
3964 u8 reserved_2[0x8]; 3964 u8 reserved_at_40[0x8];
3965 u8 cqn[0x18]; 3965 u8 cqn[0x18];
3966 3966
3967 u8 reserved_3[0x20]; 3967 u8 reserved_at_60[0x20];
3968}; 3968};
3969 3969
3970struct mlx5_ifc_query_cong_status_out_bits { 3970struct mlx5_ifc_query_cong_status_out_bits {
3971 u8 status[0x8]; 3971 u8 status[0x8];
3972 u8 reserved_0[0x18]; 3972 u8 reserved_at_8[0x18];
3973 3973
3974 u8 syndrome[0x20]; 3974 u8 syndrome[0x20];
3975 3975
3976 u8 reserved_1[0x20]; 3976 u8 reserved_at_40[0x20];
3977 3977
3978 u8 enable[0x1]; 3978 u8 enable[0x1];
3979 u8 tag_enable[0x1]; 3979 u8 tag_enable[0x1];
3980 u8 reserved_2[0x1e]; 3980 u8 reserved_at_62[0x1e];
3981}; 3981};
3982 3982
3983struct mlx5_ifc_query_cong_status_in_bits { 3983struct mlx5_ifc_query_cong_status_in_bits {
3984 u8 opcode[0x10]; 3984 u8 opcode[0x10];
3985 u8 reserved_0[0x10]; 3985 u8 reserved_at_10[0x10];
3986 3986
3987 u8 reserved_1[0x10]; 3987 u8 reserved_at_20[0x10];
3988 u8 op_mod[0x10]; 3988 u8 op_mod[0x10];
3989 3989
3990 u8 reserved_2[0x18]; 3990 u8 reserved_at_40[0x18];
3991 u8 priority[0x4]; 3991 u8 priority[0x4];
3992 u8 cong_protocol[0x4]; 3992 u8 cong_protocol[0x4];
3993 3993
3994 u8 reserved_3[0x20]; 3994 u8 reserved_at_60[0x20];
3995}; 3995};
3996 3996
3997struct mlx5_ifc_query_cong_statistics_out_bits { 3997struct mlx5_ifc_query_cong_statistics_out_bits {
3998 u8 status[0x8]; 3998 u8 status[0x8];
3999 u8 reserved_0[0x18]; 3999 u8 reserved_at_8[0x18];
4000 4000
4001 u8 syndrome[0x20]; 4001 u8 syndrome[0x20];
4002 4002
4003 u8 reserved_1[0x40]; 4003 u8 reserved_at_40[0x40];
4004 4004
4005 u8 cur_flows[0x20]; 4005 u8 cur_flows[0x20];
4006 4006
@@ -4014,7 +4014,7 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
4014 4014
4015 u8 cnp_handled_low[0x20]; 4015 u8 cnp_handled_low[0x20];
4016 4016
4017 u8 reserved_2[0x100]; 4017 u8 reserved_at_140[0x100];
4018 4018
4019 u8 time_stamp_high[0x20]; 4019 u8 time_stamp_high[0x20];
4020 4020
@@ -4030,453 +4030,453 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
4030 4030
4031 u8 cnps_sent_low[0x20]; 4031 u8 cnps_sent_low[0x20];
4032 4032
4033 u8 reserved_3[0x560]; 4033 u8 reserved_at_320[0x560];
4034}; 4034};
4035 4035
4036struct mlx5_ifc_query_cong_statistics_in_bits { 4036struct mlx5_ifc_query_cong_statistics_in_bits {
4037 u8 opcode[0x10]; 4037 u8 opcode[0x10];
4038 u8 reserved_0[0x10]; 4038 u8 reserved_at_10[0x10];
4039 4039
4040 u8 reserved_1[0x10]; 4040 u8 reserved_at_20[0x10];
4041 u8 op_mod[0x10]; 4041 u8 op_mod[0x10];
4042 4042
4043 u8 clear[0x1]; 4043 u8 clear[0x1];
4044 u8 reserved_2[0x1f]; 4044 u8 reserved_at_41[0x1f];
4045 4045
4046 u8 reserved_3[0x20]; 4046 u8 reserved_at_60[0x20];
4047}; 4047};
4048 4048
4049struct mlx5_ifc_query_cong_params_out_bits { 4049struct mlx5_ifc_query_cong_params_out_bits {
4050 u8 status[0x8]; 4050 u8 status[0x8];
4051 u8 reserved_0[0x18]; 4051 u8 reserved_at_8[0x18];
4052 4052
4053 u8 syndrome[0x20]; 4053 u8 syndrome[0x20];
4054 4054
4055 u8 reserved_1[0x40]; 4055 u8 reserved_at_40[0x40];
4056 4056
4057 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; 4057 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
4058}; 4058};
4059 4059
4060struct mlx5_ifc_query_cong_params_in_bits { 4060struct mlx5_ifc_query_cong_params_in_bits {
4061 u8 opcode[0x10]; 4061 u8 opcode[0x10];
4062 u8 reserved_0[0x10]; 4062 u8 reserved_at_10[0x10];
4063 4063
4064 u8 reserved_1[0x10]; 4064 u8 reserved_at_20[0x10];
4065 u8 op_mod[0x10]; 4065 u8 op_mod[0x10];
4066 4066
4067 u8 reserved_2[0x1c]; 4067 u8 reserved_at_40[0x1c];
4068 u8 cong_protocol[0x4]; 4068 u8 cong_protocol[0x4];
4069 4069
4070 u8 reserved_3[0x20]; 4070 u8 reserved_at_60[0x20];
4071}; 4071};
4072 4072
4073struct mlx5_ifc_query_adapter_out_bits { 4073struct mlx5_ifc_query_adapter_out_bits {
4074 u8 status[0x8]; 4074 u8 status[0x8];
4075 u8 reserved_0[0x18]; 4075 u8 reserved_at_8[0x18];
4076 4076
4077 u8 syndrome[0x20]; 4077 u8 syndrome[0x20];
4078 4078
4079 u8 reserved_1[0x40]; 4079 u8 reserved_at_40[0x40];
4080 4080
4081 struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; 4081 struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
4082}; 4082};
4083 4083
4084struct mlx5_ifc_query_adapter_in_bits { 4084struct mlx5_ifc_query_adapter_in_bits {
4085 u8 opcode[0x10]; 4085 u8 opcode[0x10];
4086 u8 reserved_0[0x10]; 4086 u8 reserved_at_10[0x10];
4087 4087
4088 u8 reserved_1[0x10]; 4088 u8 reserved_at_20[0x10];
4089 u8 op_mod[0x10]; 4089 u8 op_mod[0x10];
4090 4090
4091 u8 reserved_2[0x40]; 4091 u8 reserved_at_40[0x40];
4092}; 4092};
4093 4093
4094struct mlx5_ifc_qp_2rst_out_bits { 4094struct mlx5_ifc_qp_2rst_out_bits {
4095 u8 status[0x8]; 4095 u8 status[0x8];
4096 u8 reserved_0[0x18]; 4096 u8 reserved_at_8[0x18];
4097 4097
4098 u8 syndrome[0x20]; 4098 u8 syndrome[0x20];
4099 4099
4100 u8 reserved_1[0x40]; 4100 u8 reserved_at_40[0x40];
4101}; 4101};
4102 4102
4103struct mlx5_ifc_qp_2rst_in_bits { 4103struct mlx5_ifc_qp_2rst_in_bits {
4104 u8 opcode[0x10]; 4104 u8 opcode[0x10];
4105 u8 reserved_0[0x10]; 4105 u8 reserved_at_10[0x10];
4106 4106
4107 u8 reserved_1[0x10]; 4107 u8 reserved_at_20[0x10];
4108 u8 op_mod[0x10]; 4108 u8 op_mod[0x10];
4109 4109
4110 u8 reserved_2[0x8]; 4110 u8 reserved_at_40[0x8];
4111 u8 qpn[0x18]; 4111 u8 qpn[0x18];
4112 4112
4113 u8 reserved_3[0x20]; 4113 u8 reserved_at_60[0x20];
4114}; 4114};
4115 4115
4116struct mlx5_ifc_qp_2err_out_bits { 4116struct mlx5_ifc_qp_2err_out_bits {
4117 u8 status[0x8]; 4117 u8 status[0x8];
4118 u8 reserved_0[0x18]; 4118 u8 reserved_at_8[0x18];
4119 4119
4120 u8 syndrome[0x20]; 4120 u8 syndrome[0x20];
4121 4121
4122 u8 reserved_1[0x40]; 4122 u8 reserved_at_40[0x40];
4123}; 4123};
4124 4124
4125struct mlx5_ifc_qp_2err_in_bits { 4125struct mlx5_ifc_qp_2err_in_bits {
4126 u8 opcode[0x10]; 4126 u8 opcode[0x10];
4127 u8 reserved_0[0x10]; 4127 u8 reserved_at_10[0x10];
4128 4128
4129 u8 reserved_1[0x10]; 4129 u8 reserved_at_20[0x10];
4130 u8 op_mod[0x10]; 4130 u8 op_mod[0x10];
4131 4131
4132 u8 reserved_2[0x8]; 4132 u8 reserved_at_40[0x8];
4133 u8 qpn[0x18]; 4133 u8 qpn[0x18];
4134 4134
4135 u8 reserved_3[0x20]; 4135 u8 reserved_at_60[0x20];
4136}; 4136};
4137 4137
4138struct mlx5_ifc_page_fault_resume_out_bits { 4138struct mlx5_ifc_page_fault_resume_out_bits {
4139 u8 status[0x8]; 4139 u8 status[0x8];
4140 u8 reserved_0[0x18]; 4140 u8 reserved_at_8[0x18];
4141 4141
4142 u8 syndrome[0x20]; 4142 u8 syndrome[0x20];
4143 4143
4144 u8 reserved_1[0x40]; 4144 u8 reserved_at_40[0x40];
4145}; 4145};
4146 4146
4147struct mlx5_ifc_page_fault_resume_in_bits { 4147struct mlx5_ifc_page_fault_resume_in_bits {
4148 u8 opcode[0x10]; 4148 u8 opcode[0x10];
4149 u8 reserved_0[0x10]; 4149 u8 reserved_at_10[0x10];
4150 4150
4151 u8 reserved_1[0x10]; 4151 u8 reserved_at_20[0x10];
4152 u8 op_mod[0x10]; 4152 u8 op_mod[0x10];
4153 4153
4154 u8 error[0x1]; 4154 u8 error[0x1];
4155 u8 reserved_2[0x4]; 4155 u8 reserved_at_41[0x4];
4156 u8 rdma[0x1]; 4156 u8 rdma[0x1];
4157 u8 read_write[0x1]; 4157 u8 read_write[0x1];
4158 u8 req_res[0x1]; 4158 u8 req_res[0x1];
4159 u8 qpn[0x18]; 4159 u8 qpn[0x18];
4160 4160
4161 u8 reserved_3[0x20]; 4161 u8 reserved_at_60[0x20];
4162}; 4162};
4163 4163
4164struct mlx5_ifc_nop_out_bits { 4164struct mlx5_ifc_nop_out_bits {
4165 u8 status[0x8]; 4165 u8 status[0x8];
4166 u8 reserved_0[0x18]; 4166 u8 reserved_at_8[0x18];
4167 4167
4168 u8 syndrome[0x20]; 4168 u8 syndrome[0x20];
4169 4169
4170 u8 reserved_1[0x40]; 4170 u8 reserved_at_40[0x40];
4171}; 4171};
4172 4172
4173struct mlx5_ifc_nop_in_bits { 4173struct mlx5_ifc_nop_in_bits {
4174 u8 opcode[0x10]; 4174 u8 opcode[0x10];
4175 u8 reserved_0[0x10]; 4175 u8 reserved_at_10[0x10];
4176 4176
4177 u8 reserved_1[0x10]; 4177 u8 reserved_at_20[0x10];
4178 u8 op_mod[0x10]; 4178 u8 op_mod[0x10];
4179 4179
4180 u8 reserved_2[0x40]; 4180 u8 reserved_at_40[0x40];
4181}; 4181};
4182 4182
4183struct mlx5_ifc_modify_vport_state_out_bits { 4183struct mlx5_ifc_modify_vport_state_out_bits {
4184 u8 status[0x8]; 4184 u8 status[0x8];
4185 u8 reserved_0[0x18]; 4185 u8 reserved_at_8[0x18];
4186 4186
4187 u8 syndrome[0x20]; 4187 u8 syndrome[0x20];
4188 4188
4189 u8 reserved_1[0x40]; 4189 u8 reserved_at_40[0x40];
4190}; 4190};
4191 4191
4192struct mlx5_ifc_modify_vport_state_in_bits { 4192struct mlx5_ifc_modify_vport_state_in_bits {
4193 u8 opcode[0x10]; 4193 u8 opcode[0x10];
4194 u8 reserved_0[0x10]; 4194 u8 reserved_at_10[0x10];
4195 4195
4196 u8 reserved_1[0x10]; 4196 u8 reserved_at_20[0x10];
4197 u8 op_mod[0x10]; 4197 u8 op_mod[0x10];
4198 4198
4199 u8 other_vport[0x1]; 4199 u8 other_vport[0x1];
4200 u8 reserved_2[0xf]; 4200 u8 reserved_at_41[0xf];
4201 u8 vport_number[0x10]; 4201 u8 vport_number[0x10];
4202 4202
4203 u8 reserved_3[0x18]; 4203 u8 reserved_at_60[0x18];
4204 u8 admin_state[0x4]; 4204 u8 admin_state[0x4];
4205 u8 reserved_4[0x4]; 4205 u8 reserved_at_7c[0x4];
4206}; 4206};
4207 4207
4208struct mlx5_ifc_modify_tis_out_bits { 4208struct mlx5_ifc_modify_tis_out_bits {
4209 u8 status[0x8]; 4209 u8 status[0x8];
4210 u8 reserved_0[0x18]; 4210 u8 reserved_at_8[0x18];
4211 4211
4212 u8 syndrome[0x20]; 4212 u8 syndrome[0x20];
4213 4213
4214 u8 reserved_1[0x40]; 4214 u8 reserved_at_40[0x40];
4215}; 4215};
4216 4216
4217struct mlx5_ifc_modify_tis_bitmask_bits { 4217struct mlx5_ifc_modify_tis_bitmask_bits {
4218 u8 reserved_0[0x20]; 4218 u8 reserved_at_0[0x20];
4219 4219
4220 u8 reserved_1[0x1f]; 4220 u8 reserved_at_20[0x1f];
4221 u8 prio[0x1]; 4221 u8 prio[0x1];
4222}; 4222};
4223 4223
4224struct mlx5_ifc_modify_tis_in_bits { 4224struct mlx5_ifc_modify_tis_in_bits {
4225 u8 opcode[0x10]; 4225 u8 opcode[0x10];
4226 u8 reserved_0[0x10]; 4226 u8 reserved_at_10[0x10];
4227 4227
4228 u8 reserved_1[0x10]; 4228 u8 reserved_at_20[0x10];
4229 u8 op_mod[0x10]; 4229 u8 op_mod[0x10];
4230 4230
4231 u8 reserved_2[0x8]; 4231 u8 reserved_at_40[0x8];
4232 u8 tisn[0x18]; 4232 u8 tisn[0x18];
4233 4233
4234 u8 reserved_3[0x20]; 4234 u8 reserved_at_60[0x20];
4235 4235
4236 struct mlx5_ifc_modify_tis_bitmask_bits bitmask; 4236 struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
4237 4237
4238 u8 reserved_4[0x40]; 4238 u8 reserved_at_c0[0x40];
4239 4239
4240 struct mlx5_ifc_tisc_bits ctx; 4240 struct mlx5_ifc_tisc_bits ctx;
4241}; 4241};
4242 4242
4243struct mlx5_ifc_modify_tir_bitmask_bits { 4243struct mlx5_ifc_modify_tir_bitmask_bits {
4244 u8 reserved_0[0x20]; 4244 u8 reserved_at_0[0x20];
4245 4245
4246 u8 reserved_1[0x1b]; 4246 u8 reserved_at_20[0x1b];
4247 u8 self_lb_en[0x1]; 4247 u8 self_lb_en[0x1];
4248 u8 reserved_2[0x3]; 4248 u8 reserved_at_3c[0x3];
4249 u8 lro[0x1]; 4249 u8 lro[0x1];
4250}; 4250};
4251 4251
4252struct mlx5_ifc_modify_tir_out_bits { 4252struct mlx5_ifc_modify_tir_out_bits {
4253 u8 status[0x8]; 4253 u8 status[0x8];
4254 u8 reserved_0[0x18]; 4254 u8 reserved_at_8[0x18];
4255 4255
4256 u8 syndrome[0x20]; 4256 u8 syndrome[0x20];
4257 4257
4258 u8 reserved_1[0x40]; 4258 u8 reserved_at_40[0x40];
4259}; 4259};
4260 4260
4261struct mlx5_ifc_modify_tir_in_bits { 4261struct mlx5_ifc_modify_tir_in_bits {
4262 u8 opcode[0x10]; 4262 u8 opcode[0x10];
4263 u8 reserved_0[0x10]; 4263 u8 reserved_at_10[0x10];
4264 4264
4265 u8 reserved_1[0x10]; 4265 u8 reserved_at_20[0x10];
4266 u8 op_mod[0x10]; 4266 u8 op_mod[0x10];
4267 4267
4268 u8 reserved_2[0x8]; 4268 u8 reserved_at_40[0x8];
4269 u8 tirn[0x18]; 4269 u8 tirn[0x18];
4270 4270
4271 u8 reserved_3[0x20]; 4271 u8 reserved_at_60[0x20];
4272 4272
4273 struct mlx5_ifc_modify_tir_bitmask_bits bitmask; 4273 struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
4274 4274
4275 u8 reserved_4[0x40]; 4275 u8 reserved_at_c0[0x40];
4276 4276
4277 struct mlx5_ifc_tirc_bits ctx; 4277 struct mlx5_ifc_tirc_bits ctx;
4278}; 4278};
4279 4279
4280struct mlx5_ifc_modify_sq_out_bits { 4280struct mlx5_ifc_modify_sq_out_bits {
4281 u8 status[0x8]; 4281 u8 status[0x8];
4282 u8 reserved_0[0x18]; 4282 u8 reserved_at_8[0x18];
4283 4283
4284 u8 syndrome[0x20]; 4284 u8 syndrome[0x20];
4285 4285
4286 u8 reserved_1[0x40]; 4286 u8 reserved_at_40[0x40];
4287}; 4287};
4288 4288
4289struct mlx5_ifc_modify_sq_in_bits { 4289struct mlx5_ifc_modify_sq_in_bits {
4290 u8 opcode[0x10]; 4290 u8 opcode[0x10];
4291 u8 reserved_0[0x10]; 4291 u8 reserved_at_10[0x10];
4292 4292
4293 u8 reserved_1[0x10]; 4293 u8 reserved_at_20[0x10];
4294 u8 op_mod[0x10]; 4294 u8 op_mod[0x10];
4295 4295
4296 u8 sq_state[0x4]; 4296 u8 sq_state[0x4];
4297 u8 reserved_2[0x4]; 4297 u8 reserved_at_44[0x4];
4298 u8 sqn[0x18]; 4298 u8 sqn[0x18];
4299 4299
4300 u8 reserved_3[0x20]; 4300 u8 reserved_at_60[0x20];
4301 4301
4302 u8 modify_bitmask[0x40]; 4302 u8 modify_bitmask[0x40];
4303 4303
4304 u8 reserved_4[0x40]; 4304 u8 reserved_at_c0[0x40];
4305 4305
4306 struct mlx5_ifc_sqc_bits ctx; 4306 struct mlx5_ifc_sqc_bits ctx;
4307}; 4307};
4308 4308
4309struct mlx5_ifc_modify_rqt_out_bits { 4309struct mlx5_ifc_modify_rqt_out_bits {
4310 u8 status[0x8]; 4310 u8 status[0x8];
4311 u8 reserved_0[0x18]; 4311 u8 reserved_at_8[0x18];
4312 4312
4313 u8 syndrome[0x20]; 4313 u8 syndrome[0x20];
4314 4314
4315 u8 reserved_1[0x40]; 4315 u8 reserved_at_40[0x40];
4316}; 4316};
4317 4317
4318struct mlx5_ifc_rqt_bitmask_bits { 4318struct mlx5_ifc_rqt_bitmask_bits {
4319 u8 reserved[0x20]; 4319 u8 reserved_at_0[0x20];
4320 4320
4321 u8 reserved1[0x1f]; 4321 u8 reserved_at_20[0x1f];
4322 u8 rqn_list[0x1]; 4322 u8 rqn_list[0x1];
4323}; 4323};
4324 4324
4325struct mlx5_ifc_modify_rqt_in_bits { 4325struct mlx5_ifc_modify_rqt_in_bits {
4326 u8 opcode[0x10]; 4326 u8 opcode[0x10];
4327 u8 reserved_0[0x10]; 4327 u8 reserved_at_10[0x10];
4328 4328
4329 u8 reserved_1[0x10]; 4329 u8 reserved_at_20[0x10];
4330 u8 op_mod[0x10]; 4330 u8 op_mod[0x10];
4331 4331
4332 u8 reserved_2[0x8]; 4332 u8 reserved_at_40[0x8];
4333 u8 rqtn[0x18]; 4333 u8 rqtn[0x18];
4334 4334
4335 u8 reserved_3[0x20]; 4335 u8 reserved_at_60[0x20];
4336 4336
4337 struct mlx5_ifc_rqt_bitmask_bits bitmask; 4337 struct mlx5_ifc_rqt_bitmask_bits bitmask;
4338 4338
4339 u8 reserved_4[0x40]; 4339 u8 reserved_at_c0[0x40];
4340 4340
4341 struct mlx5_ifc_rqtc_bits ctx; 4341 struct mlx5_ifc_rqtc_bits ctx;
4342}; 4342};
4343 4343
4344struct mlx5_ifc_modify_rq_out_bits { 4344struct mlx5_ifc_modify_rq_out_bits {
4345 u8 status[0x8]; 4345 u8 status[0x8];
4346 u8 reserved_0[0x18]; 4346 u8 reserved_at_8[0x18];
4347 4347
4348 u8 syndrome[0x20]; 4348 u8 syndrome[0x20];
4349 4349
4350 u8 reserved_1[0x40]; 4350 u8 reserved_at_40[0x40];
4351}; 4351};
4352 4352
4353struct mlx5_ifc_modify_rq_in_bits { 4353struct mlx5_ifc_modify_rq_in_bits {
4354 u8 opcode[0x10]; 4354 u8 opcode[0x10];
4355 u8 reserved_0[0x10]; 4355 u8 reserved_at_10[0x10];
4356 4356
4357 u8 reserved_1[0x10]; 4357 u8 reserved_at_20[0x10];
4358 u8 op_mod[0x10]; 4358 u8 op_mod[0x10];
4359 4359
4360 u8 rq_state[0x4]; 4360 u8 rq_state[0x4];
4361 u8 reserved_2[0x4]; 4361 u8 reserved_at_44[0x4];
4362 u8 rqn[0x18]; 4362 u8 rqn[0x18];
4363 4363
4364 u8 reserved_3[0x20]; 4364 u8 reserved_at_60[0x20];
4365 4365
4366 u8 modify_bitmask[0x40]; 4366 u8 modify_bitmask[0x40];
4367 4367
4368 u8 reserved_4[0x40]; 4368 u8 reserved_at_c0[0x40];
4369 4369
4370 struct mlx5_ifc_rqc_bits ctx; 4370 struct mlx5_ifc_rqc_bits ctx;
4371}; 4371};
4372 4372
4373struct mlx5_ifc_modify_rmp_out_bits { 4373struct mlx5_ifc_modify_rmp_out_bits {
4374 u8 status[0x8]; 4374 u8 status[0x8];
4375 u8 reserved_0[0x18]; 4375 u8 reserved_at_8[0x18];
4376 4376
4377 u8 syndrome[0x20]; 4377 u8 syndrome[0x20];
4378 4378
4379 u8 reserved_1[0x40]; 4379 u8 reserved_at_40[0x40];
4380}; 4380};
4381 4381
4382struct mlx5_ifc_rmp_bitmask_bits { 4382struct mlx5_ifc_rmp_bitmask_bits {
4383 u8 reserved[0x20]; 4383 u8 reserved_at_0[0x20];
4384 4384
4385 u8 reserved1[0x1f]; 4385 u8 reserved_at_20[0x1f];
4386 u8 lwm[0x1]; 4386 u8 lwm[0x1];
4387}; 4387};
4388 4388
4389struct mlx5_ifc_modify_rmp_in_bits { 4389struct mlx5_ifc_modify_rmp_in_bits {
4390 u8 opcode[0x10]; 4390 u8 opcode[0x10];
4391 u8 reserved_0[0x10]; 4391 u8 reserved_at_10[0x10];
4392 4392
4393 u8 reserved_1[0x10]; 4393 u8 reserved_at_20[0x10];
4394 u8 op_mod[0x10]; 4394 u8 op_mod[0x10];
4395 4395
4396 u8 rmp_state[0x4]; 4396 u8 rmp_state[0x4];
4397 u8 reserved_2[0x4]; 4397 u8 reserved_at_44[0x4];
4398 u8 rmpn[0x18]; 4398 u8 rmpn[0x18];
4399 4399
4400 u8 reserved_3[0x20]; 4400 u8 reserved_at_60[0x20];
4401 4401
4402 struct mlx5_ifc_rmp_bitmask_bits bitmask; 4402 struct mlx5_ifc_rmp_bitmask_bits bitmask;
4403 4403
4404 u8 reserved_4[0x40]; 4404 u8 reserved_at_c0[0x40];
4405 4405
4406 struct mlx5_ifc_rmpc_bits ctx; 4406 struct mlx5_ifc_rmpc_bits ctx;
4407}; 4407};
4408 4408
4409struct mlx5_ifc_modify_nic_vport_context_out_bits { 4409struct mlx5_ifc_modify_nic_vport_context_out_bits {
4410 u8 status[0x8]; 4410 u8 status[0x8];
4411 u8 reserved_0[0x18]; 4411 u8 reserved_at_8[0x18];
4412 4412
4413 u8 syndrome[0x20]; 4413 u8 syndrome[0x20];
4414 4414
4415 u8 reserved_1[0x40]; 4415 u8 reserved_at_40[0x40];
4416}; 4416};
4417 4417
4418struct mlx5_ifc_modify_nic_vport_field_select_bits { 4418struct mlx5_ifc_modify_nic_vport_field_select_bits {
4419 u8 reserved_0[0x19]; 4419 u8 reserved_at_0[0x19];
4420 u8 mtu[0x1]; 4420 u8 mtu[0x1];
4421 u8 change_event[0x1]; 4421 u8 change_event[0x1];
4422 u8 promisc[0x1]; 4422 u8 promisc[0x1];
4423 u8 permanent_address[0x1]; 4423 u8 permanent_address[0x1];
4424 u8 addresses_list[0x1]; 4424 u8 addresses_list[0x1];
4425 u8 roce_en[0x1]; 4425 u8 roce_en[0x1];
4426 u8 reserved_1[0x1]; 4426 u8 reserved_at_1f[0x1];
4427}; 4427};
4428 4428
4429struct mlx5_ifc_modify_nic_vport_context_in_bits { 4429struct mlx5_ifc_modify_nic_vport_context_in_bits {
4430 u8 opcode[0x10]; 4430 u8 opcode[0x10];
4431 u8 reserved_0[0x10]; 4431 u8 reserved_at_10[0x10];
4432 4432
4433 u8 reserved_1[0x10]; 4433 u8 reserved_at_20[0x10];
4434 u8 op_mod[0x10]; 4434 u8 op_mod[0x10];
4435 4435
4436 u8 other_vport[0x1]; 4436 u8 other_vport[0x1];
4437 u8 reserved_2[0xf]; 4437 u8 reserved_at_41[0xf];
4438 u8 vport_number[0x10]; 4438 u8 vport_number[0x10];
4439 4439
4440 struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; 4440 struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
4441 4441
4442 u8 reserved_3[0x780]; 4442 u8 reserved_at_80[0x780];
4443 4443
4444 struct mlx5_ifc_nic_vport_context_bits nic_vport_context; 4444 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
4445}; 4445};
4446 4446
4447struct mlx5_ifc_modify_hca_vport_context_out_bits { 4447struct mlx5_ifc_modify_hca_vport_context_out_bits {
4448 u8 status[0x8]; 4448 u8 status[0x8];
4449 u8 reserved_0[0x18]; 4449 u8 reserved_at_8[0x18];
4450 4450
4451 u8 syndrome[0x20]; 4451 u8 syndrome[0x20];
4452 4452
4453 u8 reserved_1[0x40]; 4453 u8 reserved_at_40[0x40];
4454}; 4454};
4455 4455
4456struct mlx5_ifc_modify_hca_vport_context_in_bits { 4456struct mlx5_ifc_modify_hca_vport_context_in_bits {
4457 u8 opcode[0x10]; 4457 u8 opcode[0x10];
4458 u8 reserved_0[0x10]; 4458 u8 reserved_at_10[0x10];
4459 4459
4460 u8 reserved_1[0x10]; 4460 u8 reserved_at_20[0x10];
4461 u8 op_mod[0x10]; 4461 u8 op_mod[0x10];
4462 4462
4463 u8 other_vport[0x1]; 4463 u8 other_vport[0x1];
4464 u8 reserved_2[0xb]; 4464 u8 reserved_at_41[0xb];
4465 u8 port_num[0x4]; 4465 u8 port_num[0x4];
4466 u8 vport_number[0x10]; 4466 u8 vport_number[0x10];
4467 4467
4468 u8 reserved_3[0x20]; 4468 u8 reserved_at_60[0x20];
4469 4469
4470 struct mlx5_ifc_hca_vport_context_bits hca_vport_context; 4470 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
4471}; 4471};
4472 4472
4473struct mlx5_ifc_modify_cq_out_bits { 4473struct mlx5_ifc_modify_cq_out_bits {
4474 u8 status[0x8]; 4474 u8 status[0x8];
4475 u8 reserved_0[0x18]; 4475 u8 reserved_at_8[0x18];
4476 4476
4477 u8 syndrome[0x20]; 4477 u8 syndrome[0x20];
4478 4478
4479 u8 reserved_1[0x40]; 4479 u8 reserved_at_40[0x40];
4480}; 4480};
4481 4481
4482enum { 4482enum {
@@ -4486,83 +4486,83 @@ enum {
4486 4486
4487struct mlx5_ifc_modify_cq_in_bits { 4487struct mlx5_ifc_modify_cq_in_bits {
4488 u8 opcode[0x10]; 4488 u8 opcode[0x10];
4489 u8 reserved_0[0x10]; 4489 u8 reserved_at_10[0x10];
4490 4490
4491 u8 reserved_1[0x10]; 4491 u8 reserved_at_20[0x10];
4492 u8 op_mod[0x10]; 4492 u8 op_mod[0x10];
4493 4493
4494 u8 reserved_2[0x8]; 4494 u8 reserved_at_40[0x8];
4495 u8 cqn[0x18]; 4495 u8 cqn[0x18];
4496 4496
4497 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; 4497 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
4498 4498
4499 struct mlx5_ifc_cqc_bits cq_context; 4499 struct mlx5_ifc_cqc_bits cq_context;
4500 4500
4501 u8 reserved_3[0x600]; 4501 u8 reserved_at_280[0x600];
4502 4502
4503 u8 pas[0][0x40]; 4503 u8 pas[0][0x40];
4504}; 4504};
4505 4505
4506struct mlx5_ifc_modify_cong_status_out_bits { 4506struct mlx5_ifc_modify_cong_status_out_bits {
4507 u8 status[0x8]; 4507 u8 status[0x8];
4508 u8 reserved_0[0x18]; 4508 u8 reserved_at_8[0x18];
4509 4509
4510 u8 syndrome[0x20]; 4510 u8 syndrome[0x20];
4511 4511
4512 u8 reserved_1[0x40]; 4512 u8 reserved_at_40[0x40];
4513}; 4513};
4514 4514
4515struct mlx5_ifc_modify_cong_status_in_bits { 4515struct mlx5_ifc_modify_cong_status_in_bits {
4516 u8 opcode[0x10]; 4516 u8 opcode[0x10];
4517 u8 reserved_0[0x10]; 4517 u8 reserved_at_10[0x10];
4518 4518
4519 u8 reserved_1[0x10]; 4519 u8 reserved_at_20[0x10];
4520 u8 op_mod[0x10]; 4520 u8 op_mod[0x10];
4521 4521
4522 u8 reserved_2[0x18]; 4522 u8 reserved_at_40[0x18];
4523 u8 priority[0x4]; 4523 u8 priority[0x4];
4524 u8 cong_protocol[0x4]; 4524 u8 cong_protocol[0x4];
4525 4525
4526 u8 enable[0x1]; 4526 u8 enable[0x1];
4527 u8 tag_enable[0x1]; 4527 u8 tag_enable[0x1];
4528 u8 reserved_3[0x1e]; 4528 u8 reserved_at_62[0x1e];
4529}; 4529};
4530 4530
4531struct mlx5_ifc_modify_cong_params_out_bits { 4531struct mlx5_ifc_modify_cong_params_out_bits {
4532 u8 status[0x8]; 4532 u8 status[0x8];
4533 u8 reserved_0[0x18]; 4533 u8 reserved_at_8[0x18];
4534 4534
4535 u8 syndrome[0x20]; 4535 u8 syndrome[0x20];
4536 4536
4537 u8 reserved_1[0x40]; 4537 u8 reserved_at_40[0x40];
4538}; 4538};
4539 4539
4540struct mlx5_ifc_modify_cong_params_in_bits { 4540struct mlx5_ifc_modify_cong_params_in_bits {
4541 u8 opcode[0x10]; 4541 u8 opcode[0x10];
4542 u8 reserved_0[0x10]; 4542 u8 reserved_at_10[0x10];
4543 4543
4544 u8 reserved_1[0x10]; 4544 u8 reserved_at_20[0x10];
4545 u8 op_mod[0x10]; 4545 u8 op_mod[0x10];
4546 4546
4547 u8 reserved_2[0x1c]; 4547 u8 reserved_at_40[0x1c];
4548 u8 cong_protocol[0x4]; 4548 u8 cong_protocol[0x4];
4549 4549
4550 union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; 4550 union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
4551 4551
4552 u8 reserved_3[0x80]; 4552 u8 reserved_at_80[0x80];
4553 4553
4554 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; 4554 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
4555}; 4555};
4556 4556
4557struct mlx5_ifc_manage_pages_out_bits { 4557struct mlx5_ifc_manage_pages_out_bits {
4558 u8 status[0x8]; 4558 u8 status[0x8];
4559 u8 reserved_0[0x18]; 4559 u8 reserved_at_8[0x18];
4560 4560
4561 u8 syndrome[0x20]; 4561 u8 syndrome[0x20];
4562 4562
4563 u8 output_num_entries[0x20]; 4563 u8 output_num_entries[0x20];
4564 4564
4565 u8 reserved_1[0x20]; 4565 u8 reserved_at_60[0x20];
4566 4566
4567 u8 pas[0][0x40]; 4567 u8 pas[0][0x40];
4568}; 4568};
@@ -4575,12 +4575,12 @@ enum {
4575 4575
4576struct mlx5_ifc_manage_pages_in_bits { 4576struct mlx5_ifc_manage_pages_in_bits {
4577 u8 opcode[0x10]; 4577 u8 opcode[0x10];
4578 u8 reserved_0[0x10]; 4578 u8 reserved_at_10[0x10];
4579 4579
4580 u8 reserved_1[0x10]; 4580 u8 reserved_at_20[0x10];
4581 u8 op_mod[0x10]; 4581 u8 op_mod[0x10];
4582 4582
4583 u8 reserved_2[0x10]; 4583 u8 reserved_at_40[0x10];
4584 u8 function_id[0x10]; 4584 u8 function_id[0x10];
4585 4585
4586 u8 input_num_entries[0x20]; 4586 u8 input_num_entries[0x20];
@@ -4590,117 +4590,117 @@ struct mlx5_ifc_manage_pages_in_bits {
4590 4590
4591struct mlx5_ifc_mad_ifc_out_bits { 4591struct mlx5_ifc_mad_ifc_out_bits {
4592 u8 status[0x8]; 4592 u8 status[0x8];
4593 u8 reserved_0[0x18]; 4593 u8 reserved_at_8[0x18];
4594 4594
4595 u8 syndrome[0x20]; 4595 u8 syndrome[0x20];
4596 4596
4597 u8 reserved_1[0x40]; 4597 u8 reserved_at_40[0x40];
4598 4598
4599 u8 response_mad_packet[256][0x8]; 4599 u8 response_mad_packet[256][0x8];
4600}; 4600};
4601 4601
4602struct mlx5_ifc_mad_ifc_in_bits { 4602struct mlx5_ifc_mad_ifc_in_bits {
4603 u8 opcode[0x10]; 4603 u8 opcode[0x10];
4604 u8 reserved_0[0x10]; 4604 u8 reserved_at_10[0x10];
4605 4605
4606 u8 reserved_1[0x10]; 4606 u8 reserved_at_20[0x10];
4607 u8 op_mod[0x10]; 4607 u8 op_mod[0x10];
4608 4608
4609 u8 remote_lid[0x10]; 4609 u8 remote_lid[0x10];
4610 u8 reserved_2[0x8]; 4610 u8 reserved_at_50[0x8];
4611 u8 port[0x8]; 4611 u8 port[0x8];
4612 4612
4613 u8 reserved_3[0x20]; 4613 u8 reserved_at_60[0x20];
4614 4614
4615 u8 mad[256][0x8]; 4615 u8 mad[256][0x8];
4616}; 4616};
4617 4617
4618struct mlx5_ifc_init_hca_out_bits { 4618struct mlx5_ifc_init_hca_out_bits {
4619 u8 status[0x8]; 4619 u8 status[0x8];
4620 u8 reserved_0[0x18]; 4620 u8 reserved_at_8[0x18];
4621 4621
4622 u8 syndrome[0x20]; 4622 u8 syndrome[0x20];
4623 4623
4624 u8 reserved_1[0x40]; 4624 u8 reserved_at_40[0x40];
4625}; 4625};
4626 4626
4627struct mlx5_ifc_init_hca_in_bits { 4627struct mlx5_ifc_init_hca_in_bits {
4628 u8 opcode[0x10]; 4628 u8 opcode[0x10];
4629 u8 reserved_0[0x10]; 4629 u8 reserved_at_10[0x10];
4630 4630
4631 u8 reserved_1[0x10]; 4631 u8 reserved_at_20[0x10];
4632 u8 op_mod[0x10]; 4632 u8 op_mod[0x10];
4633 4633
4634 u8 reserved_2[0x40]; 4634 u8 reserved_at_40[0x40];
4635}; 4635};
4636 4636
4637struct mlx5_ifc_init2rtr_qp_out_bits { 4637struct mlx5_ifc_init2rtr_qp_out_bits {
4638 u8 status[0x8]; 4638 u8 status[0x8];
4639 u8 reserved_0[0x18]; 4639 u8 reserved_at_8[0x18];
4640 4640
4641 u8 syndrome[0x20]; 4641 u8 syndrome[0x20];
4642 4642
4643 u8 reserved_1[0x40]; 4643 u8 reserved_at_40[0x40];
4644}; 4644};
4645 4645
4646struct mlx5_ifc_init2rtr_qp_in_bits { 4646struct mlx5_ifc_init2rtr_qp_in_bits {
4647 u8 opcode[0x10]; 4647 u8 opcode[0x10];
4648 u8 reserved_0[0x10]; 4648 u8 reserved_at_10[0x10];
4649 4649
4650 u8 reserved_1[0x10]; 4650 u8 reserved_at_20[0x10];
4651 u8 op_mod[0x10]; 4651 u8 op_mod[0x10];
4652 4652
4653 u8 reserved_2[0x8]; 4653 u8 reserved_at_40[0x8];
4654 u8 qpn[0x18]; 4654 u8 qpn[0x18];
4655 4655
4656 u8 reserved_3[0x20]; 4656 u8 reserved_at_60[0x20];
4657 4657
4658 u8 opt_param_mask[0x20]; 4658 u8 opt_param_mask[0x20];
4659 4659
4660 u8 reserved_4[0x20]; 4660 u8 reserved_at_a0[0x20];
4661 4661
4662 struct mlx5_ifc_qpc_bits qpc; 4662 struct mlx5_ifc_qpc_bits qpc;
4663 4663
4664 u8 reserved_5[0x80]; 4664 u8 reserved_at_800[0x80];
4665}; 4665};
4666 4666
4667struct mlx5_ifc_init2init_qp_out_bits { 4667struct mlx5_ifc_init2init_qp_out_bits {
4668 u8 status[0x8]; 4668 u8 status[0x8];
4669 u8 reserved_0[0x18]; 4669 u8 reserved_at_8[0x18];
4670 4670
4671 u8 syndrome[0x20]; 4671 u8 syndrome[0x20];
4672 4672
4673 u8 reserved_1[0x40]; 4673 u8 reserved_at_40[0x40];
4674}; 4674};
4675 4675
4676struct mlx5_ifc_init2init_qp_in_bits { 4676struct mlx5_ifc_init2init_qp_in_bits {
4677 u8 opcode[0x10]; 4677 u8 opcode[0x10];
4678 u8 reserved_0[0x10]; 4678 u8 reserved_at_10[0x10];
4679 4679
4680 u8 reserved_1[0x10]; 4680 u8 reserved_at_20[0x10];
4681 u8 op_mod[0x10]; 4681 u8 op_mod[0x10];
4682 4682
4683 u8 reserved_2[0x8]; 4683 u8 reserved_at_40[0x8];
4684 u8 qpn[0x18]; 4684 u8 qpn[0x18];
4685 4685
4686 u8 reserved_3[0x20]; 4686 u8 reserved_at_60[0x20];
4687 4687
4688 u8 opt_param_mask[0x20]; 4688 u8 opt_param_mask[0x20];
4689 4689
4690 u8 reserved_4[0x20]; 4690 u8 reserved_at_a0[0x20];
4691 4691
4692 struct mlx5_ifc_qpc_bits qpc; 4692 struct mlx5_ifc_qpc_bits qpc;
4693 4693
4694 u8 reserved_5[0x80]; 4694 u8 reserved_at_800[0x80];
4695}; 4695};
4696 4696
4697struct mlx5_ifc_get_dropped_packet_log_out_bits { 4697struct mlx5_ifc_get_dropped_packet_log_out_bits {
4698 u8 status[0x8]; 4698 u8 status[0x8];
4699 u8 reserved_0[0x18]; 4699 u8 reserved_at_8[0x18];
4700 4700
4701 u8 syndrome[0x20]; 4701 u8 syndrome[0x20];
4702 4702
4703 u8 reserved_1[0x40]; 4703 u8 reserved_at_40[0x40];
4704 4704
4705 u8 packet_headers_log[128][0x8]; 4705 u8 packet_headers_log[128][0x8];
4706 4706
@@ -4709,1029 +4709,1029 @@ struct mlx5_ifc_get_dropped_packet_log_out_bits {
4709 4709
4710struct mlx5_ifc_get_dropped_packet_log_in_bits { 4710struct mlx5_ifc_get_dropped_packet_log_in_bits {
4711 u8 opcode[0x10]; 4711 u8 opcode[0x10];
4712 u8 reserved_0[0x10]; 4712 u8 reserved_at_10[0x10];
4713 4713
4714 u8 reserved_1[0x10]; 4714 u8 reserved_at_20[0x10];
4715 u8 op_mod[0x10]; 4715 u8 op_mod[0x10];
4716 4716
4717 u8 reserved_2[0x40]; 4717 u8 reserved_at_40[0x40];
4718}; 4718};
4719 4719
4720struct mlx5_ifc_gen_eqe_in_bits { 4720struct mlx5_ifc_gen_eqe_in_bits {
4721 u8 opcode[0x10]; 4721 u8 opcode[0x10];
4722 u8 reserved_0[0x10]; 4722 u8 reserved_at_10[0x10];
4723 4723
4724 u8 reserved_1[0x10]; 4724 u8 reserved_at_20[0x10];
4725 u8 op_mod[0x10]; 4725 u8 op_mod[0x10];
4726 4726
4727 u8 reserved_2[0x18]; 4727 u8 reserved_at_40[0x18];
4728 u8 eq_number[0x8]; 4728 u8 eq_number[0x8];
4729 4729
4730 u8 reserved_3[0x20]; 4730 u8 reserved_at_60[0x20];
4731 4731
4732 u8 eqe[64][0x8]; 4732 u8 eqe[64][0x8];
4733}; 4733};
4734 4734
4735struct mlx5_ifc_gen_eq_out_bits { 4735struct mlx5_ifc_gen_eq_out_bits {
4736 u8 status[0x8]; 4736 u8 status[0x8];
4737 u8 reserved_0[0x18]; 4737 u8 reserved_at_8[0x18];
4738 4738
4739 u8 syndrome[0x20]; 4739 u8 syndrome[0x20];
4740 4740
4741 u8 reserved_1[0x40]; 4741 u8 reserved_at_40[0x40];
4742}; 4742};
4743 4743
4744struct mlx5_ifc_enable_hca_out_bits { 4744struct mlx5_ifc_enable_hca_out_bits {
4745 u8 status[0x8]; 4745 u8 status[0x8];
4746 u8 reserved_0[0x18]; 4746 u8 reserved_at_8[0x18];
4747 4747
4748 u8 syndrome[0x20]; 4748 u8 syndrome[0x20];
4749 4749
4750 u8 reserved_1[0x20]; 4750 u8 reserved_at_40[0x20];
4751}; 4751};
4752 4752
4753struct mlx5_ifc_enable_hca_in_bits { 4753struct mlx5_ifc_enable_hca_in_bits {
4754 u8 opcode[0x10]; 4754 u8 opcode[0x10];
4755 u8 reserved_0[0x10]; 4755 u8 reserved_at_10[0x10];
4756 4756
4757 u8 reserved_1[0x10]; 4757 u8 reserved_at_20[0x10];
4758 u8 op_mod[0x10]; 4758 u8 op_mod[0x10];
4759 4759
4760 u8 reserved_2[0x10]; 4760 u8 reserved_at_40[0x10];
4761 u8 function_id[0x10]; 4761 u8 function_id[0x10];
4762 4762
4763 u8 reserved_3[0x20]; 4763 u8 reserved_at_60[0x20];
4764}; 4764};
4765 4765
4766struct mlx5_ifc_drain_dct_out_bits { 4766struct mlx5_ifc_drain_dct_out_bits {
4767 u8 status[0x8]; 4767 u8 status[0x8];
4768 u8 reserved_0[0x18]; 4768 u8 reserved_at_8[0x18];
4769 4769
4770 u8 syndrome[0x20]; 4770 u8 syndrome[0x20];
4771 4771
4772 u8 reserved_1[0x40]; 4772 u8 reserved_at_40[0x40];
4773}; 4773};
4774 4774
4775struct mlx5_ifc_drain_dct_in_bits { 4775struct mlx5_ifc_drain_dct_in_bits {
4776 u8 opcode[0x10]; 4776 u8 opcode[0x10];
4777 u8 reserved_0[0x10]; 4777 u8 reserved_at_10[0x10];
4778 4778
4779 u8 reserved_1[0x10]; 4779 u8 reserved_at_20[0x10];
4780 u8 op_mod[0x10]; 4780 u8 op_mod[0x10];
4781 4781
4782 u8 reserved_2[0x8]; 4782 u8 reserved_at_40[0x8];
4783 u8 dctn[0x18]; 4783 u8 dctn[0x18];
4784 4784
4785 u8 reserved_3[0x20]; 4785 u8 reserved_at_60[0x20];
4786}; 4786};
4787 4787
4788struct mlx5_ifc_disable_hca_out_bits { 4788struct mlx5_ifc_disable_hca_out_bits {
4789 u8 status[0x8]; 4789 u8 status[0x8];
4790 u8 reserved_0[0x18]; 4790 u8 reserved_at_8[0x18];
4791 4791
4792 u8 syndrome[0x20]; 4792 u8 syndrome[0x20];
4793 4793
4794 u8 reserved_1[0x20]; 4794 u8 reserved_at_40[0x20];
4795}; 4795};
4796 4796
4797struct mlx5_ifc_disable_hca_in_bits { 4797struct mlx5_ifc_disable_hca_in_bits {
4798 u8 opcode[0x10]; 4798 u8 opcode[0x10];
4799 u8 reserved_0[0x10]; 4799 u8 reserved_at_10[0x10];
4800 4800
4801 u8 reserved_1[0x10]; 4801 u8 reserved_at_20[0x10];
4802 u8 op_mod[0x10]; 4802 u8 op_mod[0x10];
4803 4803
4804 u8 reserved_2[0x10]; 4804 u8 reserved_at_40[0x10];
4805 u8 function_id[0x10]; 4805 u8 function_id[0x10];
4806 4806
4807 u8 reserved_3[0x20]; 4807 u8 reserved_at_60[0x20];
4808}; 4808};
4809 4809
4810struct mlx5_ifc_detach_from_mcg_out_bits { 4810struct mlx5_ifc_detach_from_mcg_out_bits {
4811 u8 status[0x8]; 4811 u8 status[0x8];
4812 u8 reserved_0[0x18]; 4812 u8 reserved_at_8[0x18];
4813 4813
4814 u8 syndrome[0x20]; 4814 u8 syndrome[0x20];
4815 4815
4816 u8 reserved_1[0x40]; 4816 u8 reserved_at_40[0x40];
4817}; 4817};
4818 4818
4819struct mlx5_ifc_detach_from_mcg_in_bits { 4819struct mlx5_ifc_detach_from_mcg_in_bits {
4820 u8 opcode[0x10]; 4820 u8 opcode[0x10];
4821 u8 reserved_0[0x10]; 4821 u8 reserved_at_10[0x10];
4822 4822
4823 u8 reserved_1[0x10]; 4823 u8 reserved_at_20[0x10];
4824 u8 op_mod[0x10]; 4824 u8 op_mod[0x10];
4825 4825
4826 u8 reserved_2[0x8]; 4826 u8 reserved_at_40[0x8];
4827 u8 qpn[0x18]; 4827 u8 qpn[0x18];
4828 4828
4829 u8 reserved_3[0x20]; 4829 u8 reserved_at_60[0x20];
4830 4830
4831 u8 multicast_gid[16][0x8]; 4831 u8 multicast_gid[16][0x8];
4832}; 4832};
4833 4833
4834struct mlx5_ifc_destroy_xrc_srq_out_bits { 4834struct mlx5_ifc_destroy_xrc_srq_out_bits {
4835 u8 status[0x8]; 4835 u8 status[0x8];
4836 u8 reserved_0[0x18]; 4836 u8 reserved_at_8[0x18];
4837 4837
4838 u8 syndrome[0x20]; 4838 u8 syndrome[0x20];
4839 4839
4840 u8 reserved_1[0x40]; 4840 u8 reserved_at_40[0x40];
4841}; 4841};
4842 4842
4843struct mlx5_ifc_destroy_xrc_srq_in_bits { 4843struct mlx5_ifc_destroy_xrc_srq_in_bits {
4844 u8 opcode[0x10]; 4844 u8 opcode[0x10];
4845 u8 reserved_0[0x10]; 4845 u8 reserved_at_10[0x10];
4846 4846
4847 u8 reserved_1[0x10]; 4847 u8 reserved_at_20[0x10];
4848 u8 op_mod[0x10]; 4848 u8 op_mod[0x10];
4849 4849
4850 u8 reserved_2[0x8]; 4850 u8 reserved_at_40[0x8];
4851 u8 xrc_srqn[0x18]; 4851 u8 xrc_srqn[0x18];
4852 4852
4853 u8 reserved_3[0x20]; 4853 u8 reserved_at_60[0x20];
4854}; 4854};
4855 4855
4856struct mlx5_ifc_destroy_tis_out_bits { 4856struct mlx5_ifc_destroy_tis_out_bits {
4857 u8 status[0x8]; 4857 u8 status[0x8];
4858 u8 reserved_0[0x18]; 4858 u8 reserved_at_8[0x18];
4859 4859
4860 u8 syndrome[0x20]; 4860 u8 syndrome[0x20];
4861 4861
4862 u8 reserved_1[0x40]; 4862 u8 reserved_at_40[0x40];
4863}; 4863};
4864 4864
4865struct mlx5_ifc_destroy_tis_in_bits { 4865struct mlx5_ifc_destroy_tis_in_bits {
4866 u8 opcode[0x10]; 4866 u8 opcode[0x10];
4867 u8 reserved_0[0x10]; 4867 u8 reserved_at_10[0x10];
4868 4868
4869 u8 reserved_1[0x10]; 4869 u8 reserved_at_20[0x10];
4870 u8 op_mod[0x10]; 4870 u8 op_mod[0x10];
4871 4871
4872 u8 reserved_2[0x8]; 4872 u8 reserved_at_40[0x8];
4873 u8 tisn[0x18]; 4873 u8 tisn[0x18];
4874 4874
4875 u8 reserved_3[0x20]; 4875 u8 reserved_at_60[0x20];
4876}; 4876};
4877 4877
4878struct mlx5_ifc_destroy_tir_out_bits { 4878struct mlx5_ifc_destroy_tir_out_bits {
4879 u8 status[0x8]; 4879 u8 status[0x8];
4880 u8 reserved_0[0x18]; 4880 u8 reserved_at_8[0x18];
4881 4881
4882 u8 syndrome[0x20]; 4882 u8 syndrome[0x20];
4883 4883
4884 u8 reserved_1[0x40]; 4884 u8 reserved_at_40[0x40];
4885}; 4885};
4886 4886
4887struct mlx5_ifc_destroy_tir_in_bits { 4887struct mlx5_ifc_destroy_tir_in_bits {
4888 u8 opcode[0x10]; 4888 u8 opcode[0x10];
4889 u8 reserved_0[0x10]; 4889 u8 reserved_at_10[0x10];
4890 4890
4891 u8 reserved_1[0x10]; 4891 u8 reserved_at_20[0x10];
4892 u8 op_mod[0x10]; 4892 u8 op_mod[0x10];
4893 4893
4894 u8 reserved_2[0x8]; 4894 u8 reserved_at_40[0x8];
4895 u8 tirn[0x18]; 4895 u8 tirn[0x18];
4896 4896
4897 u8 reserved_3[0x20]; 4897 u8 reserved_at_60[0x20];
4898}; 4898};
4899 4899
4900struct mlx5_ifc_destroy_srq_out_bits { 4900struct mlx5_ifc_destroy_srq_out_bits {
4901 u8 status[0x8]; 4901 u8 status[0x8];
4902 u8 reserved_0[0x18]; 4902 u8 reserved_at_8[0x18];
4903 4903
4904 u8 syndrome[0x20]; 4904 u8 syndrome[0x20];
4905 4905
4906 u8 reserved_1[0x40]; 4906 u8 reserved_at_40[0x40];
4907}; 4907};
4908 4908
4909struct mlx5_ifc_destroy_srq_in_bits { 4909struct mlx5_ifc_destroy_srq_in_bits {
4910 u8 opcode[0x10]; 4910 u8 opcode[0x10];
4911 u8 reserved_0[0x10]; 4911 u8 reserved_at_10[0x10];
4912 4912
4913 u8 reserved_1[0x10]; 4913 u8 reserved_at_20[0x10];
4914 u8 op_mod[0x10]; 4914 u8 op_mod[0x10];
4915 4915
4916 u8 reserved_2[0x8]; 4916 u8 reserved_at_40[0x8];
4917 u8 srqn[0x18]; 4917 u8 srqn[0x18];
4918 4918
4919 u8 reserved_3[0x20]; 4919 u8 reserved_at_60[0x20];
4920}; 4920};
4921 4921
4922struct mlx5_ifc_destroy_sq_out_bits { 4922struct mlx5_ifc_destroy_sq_out_bits {
4923 u8 status[0x8]; 4923 u8 status[0x8];
4924 u8 reserved_0[0x18]; 4924 u8 reserved_at_8[0x18];
4925 4925
4926 u8 syndrome[0x20]; 4926 u8 syndrome[0x20];
4927 4927
4928 u8 reserved_1[0x40]; 4928 u8 reserved_at_40[0x40];
4929}; 4929};
4930 4930
4931struct mlx5_ifc_destroy_sq_in_bits { 4931struct mlx5_ifc_destroy_sq_in_bits {
4932 u8 opcode[0x10]; 4932 u8 opcode[0x10];
4933 u8 reserved_0[0x10]; 4933 u8 reserved_at_10[0x10];
4934 4934
4935 u8 reserved_1[0x10]; 4935 u8 reserved_at_20[0x10];
4936 u8 op_mod[0x10]; 4936 u8 op_mod[0x10];
4937 4937
4938 u8 reserved_2[0x8]; 4938 u8 reserved_at_40[0x8];
4939 u8 sqn[0x18]; 4939 u8 sqn[0x18];
4940 4940
4941 u8 reserved_3[0x20]; 4941 u8 reserved_at_60[0x20];
4942}; 4942};
4943 4943
4944struct mlx5_ifc_destroy_rqt_out_bits { 4944struct mlx5_ifc_destroy_rqt_out_bits {
4945 u8 status[0x8]; 4945 u8 status[0x8];
4946 u8 reserved_0[0x18]; 4946 u8 reserved_at_8[0x18];
4947 4947
4948 u8 syndrome[0x20]; 4948 u8 syndrome[0x20];
4949 4949
4950 u8 reserved_1[0x40]; 4950 u8 reserved_at_40[0x40];
4951}; 4951};
4952 4952
4953struct mlx5_ifc_destroy_rqt_in_bits { 4953struct mlx5_ifc_destroy_rqt_in_bits {
4954 u8 opcode[0x10]; 4954 u8 opcode[0x10];
4955 u8 reserved_0[0x10]; 4955 u8 reserved_at_10[0x10];
4956 4956
4957 u8 reserved_1[0x10]; 4957 u8 reserved_at_20[0x10];
4958 u8 op_mod[0x10]; 4958 u8 op_mod[0x10];
4959 4959
4960 u8 reserved_2[0x8]; 4960 u8 reserved_at_40[0x8];
4961 u8 rqtn[0x18]; 4961 u8 rqtn[0x18];
4962 4962
4963 u8 reserved_3[0x20]; 4963 u8 reserved_at_60[0x20];
4964}; 4964};
4965 4965
4966struct mlx5_ifc_destroy_rq_out_bits { 4966struct mlx5_ifc_destroy_rq_out_bits {
4967 u8 status[0x8]; 4967 u8 status[0x8];
4968 u8 reserved_0[0x18]; 4968 u8 reserved_at_8[0x18];
4969 4969
4970 u8 syndrome[0x20]; 4970 u8 syndrome[0x20];
4971 4971
4972 u8 reserved_1[0x40]; 4972 u8 reserved_at_40[0x40];
4973}; 4973};
4974 4974
4975struct mlx5_ifc_destroy_rq_in_bits { 4975struct mlx5_ifc_destroy_rq_in_bits {
4976 u8 opcode[0x10]; 4976 u8 opcode[0x10];
4977 u8 reserved_0[0x10]; 4977 u8 reserved_at_10[0x10];
4978 4978
4979 u8 reserved_1[0x10]; 4979 u8 reserved_at_20[0x10];
4980 u8 op_mod[0x10]; 4980 u8 op_mod[0x10];
4981 4981
4982 u8 reserved_2[0x8]; 4982 u8 reserved_at_40[0x8];
4983 u8 rqn[0x18]; 4983 u8 rqn[0x18];
4984 4984
4985 u8 reserved_3[0x20]; 4985 u8 reserved_at_60[0x20];
4986}; 4986};
4987 4987
4988struct mlx5_ifc_destroy_rmp_out_bits { 4988struct mlx5_ifc_destroy_rmp_out_bits {
4989 u8 status[0x8]; 4989 u8 status[0x8];
4990 u8 reserved_0[0x18]; 4990 u8 reserved_at_8[0x18];
4991 4991
4992 u8 syndrome[0x20]; 4992 u8 syndrome[0x20];
4993 4993
4994 u8 reserved_1[0x40]; 4994 u8 reserved_at_40[0x40];
4995}; 4995};
4996 4996
4997struct mlx5_ifc_destroy_rmp_in_bits { 4997struct mlx5_ifc_destroy_rmp_in_bits {
4998 u8 opcode[0x10]; 4998 u8 opcode[0x10];
4999 u8 reserved_0[0x10]; 4999 u8 reserved_at_10[0x10];
5000 5000
5001 u8 reserved_1[0x10]; 5001 u8 reserved_at_20[0x10];
5002 u8 op_mod[0x10]; 5002 u8 op_mod[0x10];
5003 5003
5004 u8 reserved_2[0x8]; 5004 u8 reserved_at_40[0x8];
5005 u8 rmpn[0x18]; 5005 u8 rmpn[0x18];
5006 5006
5007 u8 reserved_3[0x20]; 5007 u8 reserved_at_60[0x20];
5008}; 5008};
5009 5009
5010struct mlx5_ifc_destroy_qp_out_bits { 5010struct mlx5_ifc_destroy_qp_out_bits {
5011 u8 status[0x8]; 5011 u8 status[0x8];
5012 u8 reserved_0[0x18]; 5012 u8 reserved_at_8[0x18];
5013 5013
5014 u8 syndrome[0x20]; 5014 u8 syndrome[0x20];
5015 5015
5016 u8 reserved_1[0x40]; 5016 u8 reserved_at_40[0x40];
5017}; 5017};
5018 5018
5019struct mlx5_ifc_destroy_qp_in_bits { 5019struct mlx5_ifc_destroy_qp_in_bits {
5020 u8 opcode[0x10]; 5020 u8 opcode[0x10];
5021 u8 reserved_0[0x10]; 5021 u8 reserved_at_10[0x10];
5022 5022
5023 u8 reserved_1[0x10]; 5023 u8 reserved_at_20[0x10];
5024 u8 op_mod[0x10]; 5024 u8 op_mod[0x10];
5025 5025
5026 u8 reserved_2[0x8]; 5026 u8 reserved_at_40[0x8];
5027 u8 qpn[0x18]; 5027 u8 qpn[0x18];
5028 5028
5029 u8 reserved_3[0x20]; 5029 u8 reserved_at_60[0x20];
5030}; 5030};
5031 5031
5032struct mlx5_ifc_destroy_psv_out_bits { 5032struct mlx5_ifc_destroy_psv_out_bits {
5033 u8 status[0x8]; 5033 u8 status[0x8];
5034 u8 reserved_0[0x18]; 5034 u8 reserved_at_8[0x18];
5035 5035
5036 u8 syndrome[0x20]; 5036 u8 syndrome[0x20];
5037 5037
5038 u8 reserved_1[0x40]; 5038 u8 reserved_at_40[0x40];
5039}; 5039};
5040 5040
5041struct mlx5_ifc_destroy_psv_in_bits { 5041struct mlx5_ifc_destroy_psv_in_bits {
5042 u8 opcode[0x10]; 5042 u8 opcode[0x10];
5043 u8 reserved_0[0x10]; 5043 u8 reserved_at_10[0x10];
5044 5044
5045 u8 reserved_1[0x10]; 5045 u8 reserved_at_20[0x10];
5046 u8 op_mod[0x10]; 5046 u8 op_mod[0x10];
5047 5047
5048 u8 reserved_2[0x8]; 5048 u8 reserved_at_40[0x8];
5049 u8 psvn[0x18]; 5049 u8 psvn[0x18];
5050 5050
5051 u8 reserved_3[0x20]; 5051 u8 reserved_at_60[0x20];
5052}; 5052};
5053 5053
5054struct mlx5_ifc_destroy_mkey_out_bits { 5054struct mlx5_ifc_destroy_mkey_out_bits {
5055 u8 status[0x8]; 5055 u8 status[0x8];
5056 u8 reserved_0[0x18]; 5056 u8 reserved_at_8[0x18];
5057 5057
5058 u8 syndrome[0x20]; 5058 u8 syndrome[0x20];
5059 5059
5060 u8 reserved_1[0x40]; 5060 u8 reserved_at_40[0x40];
5061}; 5061};
5062 5062
5063struct mlx5_ifc_destroy_mkey_in_bits { 5063struct mlx5_ifc_destroy_mkey_in_bits {
5064 u8 opcode[0x10]; 5064 u8 opcode[0x10];
5065 u8 reserved_0[0x10]; 5065 u8 reserved_at_10[0x10];
5066 5066
5067 u8 reserved_1[0x10]; 5067 u8 reserved_at_20[0x10];
5068 u8 op_mod[0x10]; 5068 u8 op_mod[0x10];
5069 5069
5070 u8 reserved_2[0x8]; 5070 u8 reserved_at_40[0x8];
5071 u8 mkey_index[0x18]; 5071 u8 mkey_index[0x18];
5072 5072
5073 u8 reserved_3[0x20]; 5073 u8 reserved_at_60[0x20];
5074}; 5074};
5075 5075
5076struct mlx5_ifc_destroy_flow_table_out_bits { 5076struct mlx5_ifc_destroy_flow_table_out_bits {
5077 u8 status[0x8]; 5077 u8 status[0x8];
5078 u8 reserved_0[0x18]; 5078 u8 reserved_at_8[0x18];
5079 5079
5080 u8 syndrome[0x20]; 5080 u8 syndrome[0x20];
5081 5081
5082 u8 reserved_1[0x40]; 5082 u8 reserved_at_40[0x40];
5083}; 5083};
5084 5084
5085struct mlx5_ifc_destroy_flow_table_in_bits { 5085struct mlx5_ifc_destroy_flow_table_in_bits {
5086 u8 opcode[0x10]; 5086 u8 opcode[0x10];
5087 u8 reserved_0[0x10]; 5087 u8 reserved_at_10[0x10];
5088 5088
5089 u8 reserved_1[0x10]; 5089 u8 reserved_at_20[0x10];
5090 u8 op_mod[0x10]; 5090 u8 op_mod[0x10];
5091 5091
5092 u8 reserved_2[0x40]; 5092 u8 reserved_at_40[0x40];
5093 5093
5094 u8 table_type[0x8]; 5094 u8 table_type[0x8];
5095 u8 reserved_3[0x18]; 5095 u8 reserved_at_88[0x18];
5096 5096
5097 u8 reserved_4[0x8]; 5097 u8 reserved_at_a0[0x8];
5098 u8 table_id[0x18]; 5098 u8 table_id[0x18];
5099 5099
5100 u8 reserved_5[0x140]; 5100 u8 reserved_at_c0[0x140];
5101}; 5101};
5102 5102
5103struct mlx5_ifc_destroy_flow_group_out_bits { 5103struct mlx5_ifc_destroy_flow_group_out_bits {
5104 u8 status[0x8]; 5104 u8 status[0x8];
5105 u8 reserved_0[0x18]; 5105 u8 reserved_at_8[0x18];
5106 5106
5107 u8 syndrome[0x20]; 5107 u8 syndrome[0x20];
5108 5108
5109 u8 reserved_1[0x40]; 5109 u8 reserved_at_40[0x40];
5110}; 5110};
5111 5111
5112struct mlx5_ifc_destroy_flow_group_in_bits { 5112struct mlx5_ifc_destroy_flow_group_in_bits {
5113 u8 opcode[0x10]; 5113 u8 opcode[0x10];
5114 u8 reserved_0[0x10]; 5114 u8 reserved_at_10[0x10];
5115 5115
5116 u8 reserved_1[0x10]; 5116 u8 reserved_at_20[0x10];
5117 u8 op_mod[0x10]; 5117 u8 op_mod[0x10];
5118 5118
5119 u8 reserved_2[0x40]; 5119 u8 reserved_at_40[0x40];
5120 5120
5121 u8 table_type[0x8]; 5121 u8 table_type[0x8];
5122 u8 reserved_3[0x18]; 5122 u8 reserved_at_88[0x18];
5123 5123
5124 u8 reserved_4[0x8]; 5124 u8 reserved_at_a0[0x8];
5125 u8 table_id[0x18]; 5125 u8 table_id[0x18];
5126 5126
5127 u8 group_id[0x20]; 5127 u8 group_id[0x20];
5128 5128
5129 u8 reserved_5[0x120]; 5129 u8 reserved_at_e0[0x120];
5130}; 5130};
5131 5131
5132struct mlx5_ifc_destroy_eq_out_bits { 5132struct mlx5_ifc_destroy_eq_out_bits {
5133 u8 status[0x8]; 5133 u8 status[0x8];
5134 u8 reserved_0[0x18]; 5134 u8 reserved_at_8[0x18];
5135 5135
5136 u8 syndrome[0x20]; 5136 u8 syndrome[0x20];
5137 5137
5138 u8 reserved_1[0x40]; 5138 u8 reserved_at_40[0x40];
5139}; 5139};
5140 5140
5141struct mlx5_ifc_destroy_eq_in_bits { 5141struct mlx5_ifc_destroy_eq_in_bits {
5142 u8 opcode[0x10]; 5142 u8 opcode[0x10];
5143 u8 reserved_0[0x10]; 5143 u8 reserved_at_10[0x10];
5144 5144
5145 u8 reserved_1[0x10]; 5145 u8 reserved_at_20[0x10];
5146 u8 op_mod[0x10]; 5146 u8 op_mod[0x10];
5147 5147
5148 u8 reserved_2[0x18]; 5148 u8 reserved_at_40[0x18];
5149 u8 eq_number[0x8]; 5149 u8 eq_number[0x8];
5150 5150
5151 u8 reserved_3[0x20]; 5151 u8 reserved_at_60[0x20];
5152}; 5152};
5153 5153
5154struct mlx5_ifc_destroy_dct_out_bits { 5154struct mlx5_ifc_destroy_dct_out_bits {
5155 u8 status[0x8]; 5155 u8 status[0x8];
5156 u8 reserved_0[0x18]; 5156 u8 reserved_at_8[0x18];
5157 5157
5158 u8 syndrome[0x20]; 5158 u8 syndrome[0x20];
5159 5159
5160 u8 reserved_1[0x40]; 5160 u8 reserved_at_40[0x40];
5161}; 5161};
5162 5162
5163struct mlx5_ifc_destroy_dct_in_bits { 5163struct mlx5_ifc_destroy_dct_in_bits {
5164 u8 opcode[0x10]; 5164 u8 opcode[0x10];
5165 u8 reserved_0[0x10]; 5165 u8 reserved_at_10[0x10];
5166 5166
5167 u8 reserved_1[0x10]; 5167 u8 reserved_at_20[0x10];
5168 u8 op_mod[0x10]; 5168 u8 op_mod[0x10];
5169 5169
5170 u8 reserved_2[0x8]; 5170 u8 reserved_at_40[0x8];
5171 u8 dctn[0x18]; 5171 u8 dctn[0x18];
5172 5172
5173 u8 reserved_3[0x20]; 5173 u8 reserved_at_60[0x20];
5174}; 5174};
5175 5175
5176struct mlx5_ifc_destroy_cq_out_bits { 5176struct mlx5_ifc_destroy_cq_out_bits {
5177 u8 status[0x8]; 5177 u8 status[0x8];
5178 u8 reserved_0[0x18]; 5178 u8 reserved_at_8[0x18];
5179 5179
5180 u8 syndrome[0x20]; 5180 u8 syndrome[0x20];
5181 5181
5182 u8 reserved_1[0x40]; 5182 u8 reserved_at_40[0x40];
5183}; 5183};
5184 5184
5185struct mlx5_ifc_destroy_cq_in_bits { 5185struct mlx5_ifc_destroy_cq_in_bits {
5186 u8 opcode[0x10]; 5186 u8 opcode[0x10];
5187 u8 reserved_0[0x10]; 5187 u8 reserved_at_10[0x10];
5188 5188
5189 u8 reserved_1[0x10]; 5189 u8 reserved_at_20[0x10];
5190 u8 op_mod[0x10]; 5190 u8 op_mod[0x10];
5191 5191
5192 u8 reserved_2[0x8]; 5192 u8 reserved_at_40[0x8];
5193 u8 cqn[0x18]; 5193 u8 cqn[0x18];
5194 5194
5195 u8 reserved_3[0x20]; 5195 u8 reserved_at_60[0x20];
5196}; 5196};
5197 5197
5198struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { 5198struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
5199 u8 status[0x8]; 5199 u8 status[0x8];
5200 u8 reserved_0[0x18]; 5200 u8 reserved_at_8[0x18];
5201 5201
5202 u8 syndrome[0x20]; 5202 u8 syndrome[0x20];
5203 5203
5204 u8 reserved_1[0x40]; 5204 u8 reserved_at_40[0x40];
5205}; 5205};
5206 5206
5207struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { 5207struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
5208 u8 opcode[0x10]; 5208 u8 opcode[0x10];
5209 u8 reserved_0[0x10]; 5209 u8 reserved_at_10[0x10];
5210 5210
5211 u8 reserved_1[0x10]; 5211 u8 reserved_at_20[0x10];
5212 u8 op_mod[0x10]; 5212 u8 op_mod[0x10];
5213 5213
5214 u8 reserved_2[0x20]; 5214 u8 reserved_at_40[0x20];
5215 5215
5216 u8 reserved_3[0x10]; 5216 u8 reserved_at_60[0x10];
5217 u8 vxlan_udp_port[0x10]; 5217 u8 vxlan_udp_port[0x10];
5218}; 5218};
5219 5219
5220struct mlx5_ifc_delete_l2_table_entry_out_bits { 5220struct mlx5_ifc_delete_l2_table_entry_out_bits {
5221 u8 status[0x8]; 5221 u8 status[0x8];
5222 u8 reserved_0[0x18]; 5222 u8 reserved_at_8[0x18];
5223 5223
5224 u8 syndrome[0x20]; 5224 u8 syndrome[0x20];
5225 5225
5226 u8 reserved_1[0x40]; 5226 u8 reserved_at_40[0x40];
5227}; 5227};
5228 5228
5229struct mlx5_ifc_delete_l2_table_entry_in_bits { 5229struct mlx5_ifc_delete_l2_table_entry_in_bits {
5230 u8 opcode[0x10]; 5230 u8 opcode[0x10];
5231 u8 reserved_0[0x10]; 5231 u8 reserved_at_10[0x10];
5232 5232
5233 u8 reserved_1[0x10]; 5233 u8 reserved_at_20[0x10];
5234 u8 op_mod[0x10]; 5234 u8 op_mod[0x10];
5235 5235
5236 u8 reserved_2[0x60]; 5236 u8 reserved_at_40[0x60];
5237 5237
5238 u8 reserved_3[0x8]; 5238 u8 reserved_at_a0[0x8];
5239 u8 table_index[0x18]; 5239 u8 table_index[0x18];
5240 5240
5241 u8 reserved_4[0x140]; 5241 u8 reserved_at_c0[0x140];
5242}; 5242};
5243 5243
5244struct mlx5_ifc_delete_fte_out_bits { 5244struct mlx5_ifc_delete_fte_out_bits {
5245 u8 status[0x8]; 5245 u8 status[0x8];
5246 u8 reserved_0[0x18]; 5246 u8 reserved_at_8[0x18];
5247 5247
5248 u8 syndrome[0x20]; 5248 u8 syndrome[0x20];
5249 5249
5250 u8 reserved_1[0x40]; 5250 u8 reserved_at_40[0x40];
5251}; 5251};
5252 5252
5253struct mlx5_ifc_delete_fte_in_bits { 5253struct mlx5_ifc_delete_fte_in_bits {
5254 u8 opcode[0x10]; 5254 u8 opcode[0x10];
5255 u8 reserved_0[0x10]; 5255 u8 reserved_at_10[0x10];
5256 5256
5257 u8 reserved_1[0x10]; 5257 u8 reserved_at_20[0x10];
5258 u8 op_mod[0x10]; 5258 u8 op_mod[0x10];
5259 5259
5260 u8 reserved_2[0x40]; 5260 u8 reserved_at_40[0x40];
5261 5261
5262 u8 table_type[0x8]; 5262 u8 table_type[0x8];
5263 u8 reserved_3[0x18]; 5263 u8 reserved_at_88[0x18];
5264 5264
5265 u8 reserved_4[0x8]; 5265 u8 reserved_at_a0[0x8];
5266 u8 table_id[0x18]; 5266 u8 table_id[0x18];
5267 5267
5268 u8 reserved_5[0x40]; 5268 u8 reserved_at_c0[0x40];
5269 5269
5270 u8 flow_index[0x20]; 5270 u8 flow_index[0x20];
5271 5271
5272 u8 reserved_6[0xe0]; 5272 u8 reserved_at_120[0xe0];
5273}; 5273};
5274 5274
5275struct mlx5_ifc_dealloc_xrcd_out_bits { 5275struct mlx5_ifc_dealloc_xrcd_out_bits {
5276 u8 status[0x8]; 5276 u8 status[0x8];
5277 u8 reserved_0[0x18]; 5277 u8 reserved_at_8[0x18];
5278 5278
5279 u8 syndrome[0x20]; 5279 u8 syndrome[0x20];
5280 5280
5281 u8 reserved_1[0x40]; 5281 u8 reserved_at_40[0x40];
5282}; 5282};
5283 5283
5284struct mlx5_ifc_dealloc_xrcd_in_bits { 5284struct mlx5_ifc_dealloc_xrcd_in_bits {
5285 u8 opcode[0x10]; 5285 u8 opcode[0x10];
5286 u8 reserved_0[0x10]; 5286 u8 reserved_at_10[0x10];
5287 5287
5288 u8 reserved_1[0x10]; 5288 u8 reserved_at_20[0x10];
5289 u8 op_mod[0x10]; 5289 u8 op_mod[0x10];
5290 5290
5291 u8 reserved_2[0x8]; 5291 u8 reserved_at_40[0x8];
5292 u8 xrcd[0x18]; 5292 u8 xrcd[0x18];
5293 5293
5294 u8 reserved_3[0x20]; 5294 u8 reserved_at_60[0x20];
5295}; 5295};
5296 5296
5297struct mlx5_ifc_dealloc_uar_out_bits { 5297struct mlx5_ifc_dealloc_uar_out_bits {
5298 u8 status[0x8]; 5298 u8 status[0x8];
5299 u8 reserved_0[0x18]; 5299 u8 reserved_at_8[0x18];
5300 5300
5301 u8 syndrome[0x20]; 5301 u8 syndrome[0x20];
5302 5302
5303 u8 reserved_1[0x40]; 5303 u8 reserved_at_40[0x40];
5304}; 5304};
5305 5305
5306struct mlx5_ifc_dealloc_uar_in_bits { 5306struct mlx5_ifc_dealloc_uar_in_bits {
5307 u8 opcode[0x10]; 5307 u8 opcode[0x10];
5308 u8 reserved_0[0x10]; 5308 u8 reserved_at_10[0x10];
5309 5309
5310 u8 reserved_1[0x10]; 5310 u8 reserved_at_20[0x10];
5311 u8 op_mod[0x10]; 5311 u8 op_mod[0x10];
5312 5312
5313 u8 reserved_2[0x8]; 5313 u8 reserved_at_40[0x8];
5314 u8 uar[0x18]; 5314 u8 uar[0x18];
5315 5315
5316 u8 reserved_3[0x20]; 5316 u8 reserved_at_60[0x20];
5317}; 5317};
5318 5318
5319struct mlx5_ifc_dealloc_transport_domain_out_bits { 5319struct mlx5_ifc_dealloc_transport_domain_out_bits {
5320 u8 status[0x8]; 5320 u8 status[0x8];
5321 u8 reserved_0[0x18]; 5321 u8 reserved_at_8[0x18];
5322 5322
5323 u8 syndrome[0x20]; 5323 u8 syndrome[0x20];
5324 5324
5325 u8 reserved_1[0x40]; 5325 u8 reserved_at_40[0x40];
5326}; 5326};
5327 5327
5328struct mlx5_ifc_dealloc_transport_domain_in_bits { 5328struct mlx5_ifc_dealloc_transport_domain_in_bits {
5329 u8 opcode[0x10]; 5329 u8 opcode[0x10];
5330 u8 reserved_0[0x10]; 5330 u8 reserved_at_10[0x10];
5331 5331
5332 u8 reserved_1[0x10]; 5332 u8 reserved_at_20[0x10];
5333 u8 op_mod[0x10]; 5333 u8 op_mod[0x10];
5334 5334
5335 u8 reserved_2[0x8]; 5335 u8 reserved_at_40[0x8];
5336 u8 transport_domain[0x18]; 5336 u8 transport_domain[0x18];
5337 5337
5338 u8 reserved_3[0x20]; 5338 u8 reserved_at_60[0x20];
5339}; 5339};
5340 5340
5341struct mlx5_ifc_dealloc_q_counter_out_bits { 5341struct mlx5_ifc_dealloc_q_counter_out_bits {
5342 u8 status[0x8]; 5342 u8 status[0x8];
5343 u8 reserved_0[0x18]; 5343 u8 reserved_at_8[0x18];
5344 5344
5345 u8 syndrome[0x20]; 5345 u8 syndrome[0x20];
5346 5346
5347 u8 reserved_1[0x40]; 5347 u8 reserved_at_40[0x40];
5348}; 5348};
5349 5349
5350struct mlx5_ifc_dealloc_q_counter_in_bits { 5350struct mlx5_ifc_dealloc_q_counter_in_bits {
5351 u8 opcode[0x10]; 5351 u8 opcode[0x10];
5352 u8 reserved_0[0x10]; 5352 u8 reserved_at_10[0x10];
5353 5353
5354 u8 reserved_1[0x10]; 5354 u8 reserved_at_20[0x10];
5355 u8 op_mod[0x10]; 5355 u8 op_mod[0x10];
5356 5356
5357 u8 reserved_2[0x18]; 5357 u8 reserved_at_40[0x18];
5358 u8 counter_set_id[0x8]; 5358 u8 counter_set_id[0x8];
5359 5359
5360 u8 reserved_3[0x20]; 5360 u8 reserved_at_60[0x20];
5361}; 5361};
5362 5362
5363struct mlx5_ifc_dealloc_pd_out_bits { 5363struct mlx5_ifc_dealloc_pd_out_bits {
5364 u8 status[0x8]; 5364 u8 status[0x8];
5365 u8 reserved_0[0x18]; 5365 u8 reserved_at_8[0x18];
5366 5366
5367 u8 syndrome[0x20]; 5367 u8 syndrome[0x20];
5368 5368
5369 u8 reserved_1[0x40]; 5369 u8 reserved_at_40[0x40];
5370}; 5370};
5371 5371
5372struct mlx5_ifc_dealloc_pd_in_bits { 5372struct mlx5_ifc_dealloc_pd_in_bits {
5373 u8 opcode[0x10]; 5373 u8 opcode[0x10];
5374 u8 reserved_0[0x10]; 5374 u8 reserved_at_10[0x10];
5375 5375
5376 u8 reserved_1[0x10]; 5376 u8 reserved_at_20[0x10];
5377 u8 op_mod[0x10]; 5377 u8 op_mod[0x10];
5378 5378
5379 u8 reserved_2[0x8]; 5379 u8 reserved_at_40[0x8];
5380 u8 pd[0x18]; 5380 u8 pd[0x18];
5381 5381
5382 u8 reserved_3[0x20]; 5382 u8 reserved_at_60[0x20];
5383}; 5383};
5384 5384
5385struct mlx5_ifc_create_xrc_srq_out_bits { 5385struct mlx5_ifc_create_xrc_srq_out_bits {
5386 u8 status[0x8]; 5386 u8 status[0x8];
5387 u8 reserved_0[0x18]; 5387 u8 reserved_at_8[0x18];
5388 5388
5389 u8 syndrome[0x20]; 5389 u8 syndrome[0x20];
5390 5390
5391 u8 reserved_1[0x8]; 5391 u8 reserved_at_40[0x8];
5392 u8 xrc_srqn[0x18]; 5392 u8 xrc_srqn[0x18];
5393 5393
5394 u8 reserved_2[0x20]; 5394 u8 reserved_at_60[0x20];
5395}; 5395};
5396 5396
5397struct mlx5_ifc_create_xrc_srq_in_bits { 5397struct mlx5_ifc_create_xrc_srq_in_bits {
5398 u8 opcode[0x10]; 5398 u8 opcode[0x10];
5399 u8 reserved_0[0x10]; 5399 u8 reserved_at_10[0x10];
5400 5400
5401 u8 reserved_1[0x10]; 5401 u8 reserved_at_20[0x10];
5402 u8 op_mod[0x10]; 5402 u8 op_mod[0x10];
5403 5403
5404 u8 reserved_2[0x40]; 5404 u8 reserved_at_40[0x40];
5405 5405
5406 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 5406 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
5407 5407
5408 u8 reserved_3[0x600]; 5408 u8 reserved_at_280[0x600];
5409 5409
5410 u8 pas[0][0x40]; 5410 u8 pas[0][0x40];
5411}; 5411};
5412 5412
5413struct mlx5_ifc_create_tis_out_bits { 5413struct mlx5_ifc_create_tis_out_bits {
5414 u8 status[0x8]; 5414 u8 status[0x8];
5415 u8 reserved_0[0x18]; 5415 u8 reserved_at_8[0x18];
5416 5416
5417 u8 syndrome[0x20]; 5417 u8 syndrome[0x20];
5418 5418
5419 u8 reserved_1[0x8]; 5419 u8 reserved_at_40[0x8];
5420 u8 tisn[0x18]; 5420 u8 tisn[0x18];
5421 5421
5422 u8 reserved_2[0x20]; 5422 u8 reserved_at_60[0x20];
5423}; 5423};
5424 5424
5425struct mlx5_ifc_create_tis_in_bits { 5425struct mlx5_ifc_create_tis_in_bits {
5426 u8 opcode[0x10]; 5426 u8 opcode[0x10];
5427 u8 reserved_0[0x10]; 5427 u8 reserved_at_10[0x10];
5428 5428
5429 u8 reserved_1[0x10]; 5429 u8 reserved_at_20[0x10];
5430 u8 op_mod[0x10]; 5430 u8 op_mod[0x10];
5431 5431
5432 u8 reserved_2[0xc0]; 5432 u8 reserved_at_40[0xc0];
5433 5433
5434 struct mlx5_ifc_tisc_bits ctx; 5434 struct mlx5_ifc_tisc_bits ctx;
5435}; 5435};
5436 5436
5437struct mlx5_ifc_create_tir_out_bits { 5437struct mlx5_ifc_create_tir_out_bits {
5438 u8 status[0x8]; 5438 u8 status[0x8];
5439 u8 reserved_0[0x18]; 5439 u8 reserved_at_8[0x18];
5440 5440
5441 u8 syndrome[0x20]; 5441 u8 syndrome[0x20];
5442 5442
5443 u8 reserved_1[0x8]; 5443 u8 reserved_at_40[0x8];
5444 u8 tirn[0x18]; 5444 u8 tirn[0x18];
5445 5445
5446 u8 reserved_2[0x20]; 5446 u8 reserved_at_60[0x20];
5447}; 5447};
5448 5448
5449struct mlx5_ifc_create_tir_in_bits { 5449struct mlx5_ifc_create_tir_in_bits {
5450 u8 opcode[0x10]; 5450 u8 opcode[0x10];
5451 u8 reserved_0[0x10]; 5451 u8 reserved_at_10[0x10];
5452 5452
5453 u8 reserved_1[0x10]; 5453 u8 reserved_at_20[0x10];
5454 u8 op_mod[0x10]; 5454 u8 op_mod[0x10];
5455 5455
5456 u8 reserved_2[0xc0]; 5456 u8 reserved_at_40[0xc0];
5457 5457
5458 struct mlx5_ifc_tirc_bits ctx; 5458 struct mlx5_ifc_tirc_bits ctx;
5459}; 5459};
5460 5460
5461struct mlx5_ifc_create_srq_out_bits { 5461struct mlx5_ifc_create_srq_out_bits {
5462 u8 status[0x8]; 5462 u8 status[0x8];
5463 u8 reserved_0[0x18]; 5463 u8 reserved_at_8[0x18];
5464 5464
5465 u8 syndrome[0x20]; 5465 u8 syndrome[0x20];
5466 5466
5467 u8 reserved_1[0x8]; 5467 u8 reserved_at_40[0x8];
5468 u8 srqn[0x18]; 5468 u8 srqn[0x18];
5469 5469
5470 u8 reserved_2[0x20]; 5470 u8 reserved_at_60[0x20];
5471}; 5471};
5472 5472
5473struct mlx5_ifc_create_srq_in_bits { 5473struct mlx5_ifc_create_srq_in_bits {
5474 u8 opcode[0x10]; 5474 u8 opcode[0x10];
5475 u8 reserved_0[0x10]; 5475 u8 reserved_at_10[0x10];
5476 5476
5477 u8 reserved_1[0x10]; 5477 u8 reserved_at_20[0x10];
5478 u8 op_mod[0x10]; 5478 u8 op_mod[0x10];
5479 5479
5480 u8 reserved_2[0x40]; 5480 u8 reserved_at_40[0x40];
5481 5481
5482 struct mlx5_ifc_srqc_bits srq_context_entry; 5482 struct mlx5_ifc_srqc_bits srq_context_entry;
5483 5483
5484 u8 reserved_3[0x600]; 5484 u8 reserved_at_280[0x600];
5485 5485
5486 u8 pas[0][0x40]; 5486 u8 pas[0][0x40];
5487}; 5487};
5488 5488
5489struct mlx5_ifc_create_sq_out_bits { 5489struct mlx5_ifc_create_sq_out_bits {
5490 u8 status[0x8]; 5490 u8 status[0x8];
5491 u8 reserved_0[0x18]; 5491 u8 reserved_at_8[0x18];
5492 5492
5493 u8 syndrome[0x20]; 5493 u8 syndrome[0x20];
5494 5494
5495 u8 reserved_1[0x8]; 5495 u8 reserved_at_40[0x8];
5496 u8 sqn[0x18]; 5496 u8 sqn[0x18];
5497 5497
5498 u8 reserved_2[0x20]; 5498 u8 reserved_at_60[0x20];
5499}; 5499};
5500 5500
5501struct mlx5_ifc_create_sq_in_bits { 5501struct mlx5_ifc_create_sq_in_bits {
5502 u8 opcode[0x10]; 5502 u8 opcode[0x10];
5503 u8 reserved_0[0x10]; 5503 u8 reserved_at_10[0x10];
5504 5504
5505 u8 reserved_1[0x10]; 5505 u8 reserved_at_20[0x10];
5506 u8 op_mod[0x10]; 5506 u8 op_mod[0x10];
5507 5507
5508 u8 reserved_2[0xc0]; 5508 u8 reserved_at_40[0xc0];
5509 5509
5510 struct mlx5_ifc_sqc_bits ctx; 5510 struct mlx5_ifc_sqc_bits ctx;
5511}; 5511};
5512 5512
5513struct mlx5_ifc_create_rqt_out_bits { 5513struct mlx5_ifc_create_rqt_out_bits {
5514 u8 status[0x8]; 5514 u8 status[0x8];
5515 u8 reserved_0[0x18]; 5515 u8 reserved_at_8[0x18];
5516 5516
5517 u8 syndrome[0x20]; 5517 u8 syndrome[0x20];
5518 5518
5519 u8 reserved_1[0x8]; 5519 u8 reserved_at_40[0x8];
5520 u8 rqtn[0x18]; 5520 u8 rqtn[0x18];
5521 5521
5522 u8 reserved_2[0x20]; 5522 u8 reserved_at_60[0x20];
5523}; 5523};
5524 5524
5525struct mlx5_ifc_create_rqt_in_bits { 5525struct mlx5_ifc_create_rqt_in_bits {
5526 u8 opcode[0x10]; 5526 u8 opcode[0x10];
5527 u8 reserved_0[0x10]; 5527 u8 reserved_at_10[0x10];
5528 5528
5529 u8 reserved_1[0x10]; 5529 u8 reserved_at_20[0x10];
5530 u8 op_mod[0x10]; 5530 u8 op_mod[0x10];
5531 5531
5532 u8 reserved_2[0xc0]; 5532 u8 reserved_at_40[0xc0];
5533 5533
5534 struct mlx5_ifc_rqtc_bits rqt_context; 5534 struct mlx5_ifc_rqtc_bits rqt_context;
5535}; 5535};
5536 5536
5537struct mlx5_ifc_create_rq_out_bits { 5537struct mlx5_ifc_create_rq_out_bits {
5538 u8 status[0x8]; 5538 u8 status[0x8];
5539 u8 reserved_0[0x18]; 5539 u8 reserved_at_8[0x18];
5540 5540
5541 u8 syndrome[0x20]; 5541 u8 syndrome[0x20];
5542 5542
5543 u8 reserved_1[0x8]; 5543 u8 reserved_at_40[0x8];
5544 u8 rqn[0x18]; 5544 u8 rqn[0x18];
5545 5545
5546 u8 reserved_2[0x20]; 5546 u8 reserved_at_60[0x20];
5547}; 5547};
5548 5548
5549struct mlx5_ifc_create_rq_in_bits { 5549struct mlx5_ifc_create_rq_in_bits {
5550 u8 opcode[0x10]; 5550 u8 opcode[0x10];
5551 u8 reserved_0[0x10]; 5551 u8 reserved_at_10[0x10];
5552 5552
5553 u8 reserved_1[0x10]; 5553 u8 reserved_at_20[0x10];
5554 u8 op_mod[0x10]; 5554 u8 op_mod[0x10];
5555 5555
5556 u8 reserved_2[0xc0]; 5556 u8 reserved_at_40[0xc0];
5557 5557
5558 struct mlx5_ifc_rqc_bits ctx; 5558 struct mlx5_ifc_rqc_bits ctx;
5559}; 5559};
5560 5560
5561struct mlx5_ifc_create_rmp_out_bits { 5561struct mlx5_ifc_create_rmp_out_bits {
5562 u8 status[0x8]; 5562 u8 status[0x8];
5563 u8 reserved_0[0x18]; 5563 u8 reserved_at_8[0x18];
5564 5564
5565 u8 syndrome[0x20]; 5565 u8 syndrome[0x20];
5566 5566
5567 u8 reserved_1[0x8]; 5567 u8 reserved_at_40[0x8];
5568 u8 rmpn[0x18]; 5568 u8 rmpn[0x18];
5569 5569
5570 u8 reserved_2[0x20]; 5570 u8 reserved_at_60[0x20];
5571}; 5571};
5572 5572
5573struct mlx5_ifc_create_rmp_in_bits { 5573struct mlx5_ifc_create_rmp_in_bits {
5574 u8 opcode[0x10]; 5574 u8 opcode[0x10];
5575 u8 reserved_0[0x10]; 5575 u8 reserved_at_10[0x10];
5576 5576
5577 u8 reserved_1[0x10]; 5577 u8 reserved_at_20[0x10];
5578 u8 op_mod[0x10]; 5578 u8 op_mod[0x10];
5579 5579
5580 u8 reserved_2[0xc0]; 5580 u8 reserved_at_40[0xc0];
5581 5581
5582 struct mlx5_ifc_rmpc_bits ctx; 5582 struct mlx5_ifc_rmpc_bits ctx;
5583}; 5583};
5584 5584
5585struct mlx5_ifc_create_qp_out_bits { 5585struct mlx5_ifc_create_qp_out_bits {
5586 u8 status[0x8]; 5586 u8 status[0x8];
5587 u8 reserved_0[0x18]; 5587 u8 reserved_at_8[0x18];
5588 5588
5589 u8 syndrome[0x20]; 5589 u8 syndrome[0x20];
5590 5590
5591 u8 reserved_1[0x8]; 5591 u8 reserved_at_40[0x8];
5592 u8 qpn[0x18]; 5592 u8 qpn[0x18];
5593 5593
5594 u8 reserved_2[0x20]; 5594 u8 reserved_at_60[0x20];
5595}; 5595};
5596 5596
5597struct mlx5_ifc_create_qp_in_bits { 5597struct mlx5_ifc_create_qp_in_bits {
5598 u8 opcode[0x10]; 5598 u8 opcode[0x10];
5599 u8 reserved_0[0x10]; 5599 u8 reserved_at_10[0x10];
5600 5600
5601 u8 reserved_1[0x10]; 5601 u8 reserved_at_20[0x10];
5602 u8 op_mod[0x10]; 5602 u8 op_mod[0x10];
5603 5603
5604 u8 reserved_2[0x40]; 5604 u8 reserved_at_40[0x40];
5605 5605
5606 u8 opt_param_mask[0x20]; 5606 u8 opt_param_mask[0x20];
5607 5607
5608 u8 reserved_3[0x20]; 5608 u8 reserved_at_a0[0x20];
5609 5609
5610 struct mlx5_ifc_qpc_bits qpc; 5610 struct mlx5_ifc_qpc_bits qpc;
5611 5611
5612 u8 reserved_4[0x80]; 5612 u8 reserved_at_800[0x80];
5613 5613
5614 u8 pas[0][0x40]; 5614 u8 pas[0][0x40];
5615}; 5615};
5616 5616
5617struct mlx5_ifc_create_psv_out_bits { 5617struct mlx5_ifc_create_psv_out_bits {
5618 u8 status[0x8]; 5618 u8 status[0x8];
5619 u8 reserved_0[0x18]; 5619 u8 reserved_at_8[0x18];
5620 5620
5621 u8 syndrome[0x20]; 5621 u8 syndrome[0x20];
5622 5622
5623 u8 reserved_1[0x40]; 5623 u8 reserved_at_40[0x40];
5624 5624
5625 u8 reserved_2[0x8]; 5625 u8 reserved_at_80[0x8];
5626 u8 psv0_index[0x18]; 5626 u8 psv0_index[0x18];
5627 5627
5628 u8 reserved_3[0x8]; 5628 u8 reserved_at_a0[0x8];
5629 u8 psv1_index[0x18]; 5629 u8 psv1_index[0x18];
5630 5630
5631 u8 reserved_4[0x8]; 5631 u8 reserved_at_c0[0x8];
5632 u8 psv2_index[0x18]; 5632 u8 psv2_index[0x18];
5633 5633
5634 u8 reserved_5[0x8]; 5634 u8 reserved_at_e0[0x8];
5635 u8 psv3_index[0x18]; 5635 u8 psv3_index[0x18];
5636}; 5636};
5637 5637
5638struct mlx5_ifc_create_psv_in_bits { 5638struct mlx5_ifc_create_psv_in_bits {
5639 u8 opcode[0x10]; 5639 u8 opcode[0x10];
5640 u8 reserved_0[0x10]; 5640 u8 reserved_at_10[0x10];
5641 5641
5642 u8 reserved_1[0x10]; 5642 u8 reserved_at_20[0x10];
5643 u8 op_mod[0x10]; 5643 u8 op_mod[0x10];
5644 5644
5645 u8 num_psv[0x4]; 5645 u8 num_psv[0x4];
5646 u8 reserved_2[0x4]; 5646 u8 reserved_at_44[0x4];
5647 u8 pd[0x18]; 5647 u8 pd[0x18];
5648 5648
5649 u8 reserved_3[0x20]; 5649 u8 reserved_at_60[0x20];
5650}; 5650};
5651 5651
5652struct mlx5_ifc_create_mkey_out_bits { 5652struct mlx5_ifc_create_mkey_out_bits {
5653 u8 status[0x8]; 5653 u8 status[0x8];
5654 u8 reserved_0[0x18]; 5654 u8 reserved_at_8[0x18];
5655 5655
5656 u8 syndrome[0x20]; 5656 u8 syndrome[0x20];
5657 5657
5658 u8 reserved_1[0x8]; 5658 u8 reserved_at_40[0x8];
5659 u8 mkey_index[0x18]; 5659 u8 mkey_index[0x18];
5660 5660
5661 u8 reserved_2[0x20]; 5661 u8 reserved_at_60[0x20];
5662}; 5662};
5663 5663
5664struct mlx5_ifc_create_mkey_in_bits { 5664struct mlx5_ifc_create_mkey_in_bits {
5665 u8 opcode[0x10]; 5665 u8 opcode[0x10];
5666 u8 reserved_0[0x10]; 5666 u8 reserved_at_10[0x10];
5667 5667
5668 u8 reserved_1[0x10]; 5668 u8 reserved_at_20[0x10];
5669 u8 op_mod[0x10]; 5669 u8 op_mod[0x10];
5670 5670
5671 u8 reserved_2[0x20]; 5671 u8 reserved_at_40[0x20];
5672 5672
5673 u8 pg_access[0x1]; 5673 u8 pg_access[0x1];
5674 u8 reserved_3[0x1f]; 5674 u8 reserved_at_61[0x1f];
5675 5675
5676 struct mlx5_ifc_mkc_bits memory_key_mkey_entry; 5676 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
5677 5677
5678 u8 reserved_4[0x80]; 5678 u8 reserved_at_280[0x80];
5679 5679
5680 u8 translations_octword_actual_size[0x20]; 5680 u8 translations_octword_actual_size[0x20];
5681 5681
5682 u8 reserved_5[0x560]; 5682 u8 reserved_at_320[0x560];
5683 5683
5684 u8 klm_pas_mtt[0][0x20]; 5684 u8 klm_pas_mtt[0][0x20];
5685}; 5685};
5686 5686
5687struct mlx5_ifc_create_flow_table_out_bits { 5687struct mlx5_ifc_create_flow_table_out_bits {
5688 u8 status[0x8]; 5688 u8 status[0x8];
5689 u8 reserved_0[0x18]; 5689 u8 reserved_at_8[0x18];
5690 5690
5691 u8 syndrome[0x20]; 5691 u8 syndrome[0x20];
5692 5692
5693 u8 reserved_1[0x8]; 5693 u8 reserved_at_40[0x8];
5694 u8 table_id[0x18]; 5694 u8 table_id[0x18];
5695 5695
5696 u8 reserved_2[0x20]; 5696 u8 reserved_at_60[0x20];
5697}; 5697};
5698 5698
5699struct mlx5_ifc_create_flow_table_in_bits { 5699struct mlx5_ifc_create_flow_table_in_bits {
5700 u8 opcode[0x10]; 5700 u8 opcode[0x10];
5701 u8 reserved_0[0x10]; 5701 u8 reserved_at_10[0x10];
5702 5702
5703 u8 reserved_1[0x10]; 5703 u8 reserved_at_20[0x10];
5704 u8 op_mod[0x10]; 5704 u8 op_mod[0x10];
5705 5705
5706 u8 reserved_2[0x40]; 5706 u8 reserved_at_40[0x40];
5707 5707
5708 u8 table_type[0x8]; 5708 u8 table_type[0x8];
5709 u8 reserved_3[0x18]; 5709 u8 reserved_at_88[0x18];
5710 5710
5711 u8 reserved_4[0x20]; 5711 u8 reserved_at_a0[0x20];
5712 5712
5713 u8 reserved_5[0x4]; 5713 u8 reserved_at_c0[0x4];
5714 u8 table_miss_mode[0x4]; 5714 u8 table_miss_mode[0x4];
5715 u8 level[0x8]; 5715 u8 level[0x8];
5716 u8 reserved_6[0x8]; 5716 u8 reserved_at_d0[0x8];
5717 u8 log_size[0x8]; 5717 u8 log_size[0x8];
5718 5718
5719 u8 reserved_7[0x8]; 5719 u8 reserved_at_e0[0x8];
5720 u8 table_miss_id[0x18]; 5720 u8 table_miss_id[0x18];
5721 5721
5722 u8 reserved_8[0x100]; 5722 u8 reserved_at_100[0x100];
5723}; 5723};
5724 5724
5725struct mlx5_ifc_create_flow_group_out_bits { 5725struct mlx5_ifc_create_flow_group_out_bits {
5726 u8 status[0x8]; 5726 u8 status[0x8];
5727 u8 reserved_0[0x18]; 5727 u8 reserved_at_8[0x18];
5728 5728
5729 u8 syndrome[0x20]; 5729 u8 syndrome[0x20];
5730 5730
5731 u8 reserved_1[0x8]; 5731 u8 reserved_at_40[0x8];
5732 u8 group_id[0x18]; 5732 u8 group_id[0x18];
5733 5733
5734 u8 reserved_2[0x20]; 5734 u8 reserved_at_60[0x20];
5735}; 5735};
5736 5736
5737enum { 5737enum {
@@ -5742,134 +5742,134 @@ enum {
5742 5742
5743struct mlx5_ifc_create_flow_group_in_bits { 5743struct mlx5_ifc_create_flow_group_in_bits {
5744 u8 opcode[0x10]; 5744 u8 opcode[0x10];
5745 u8 reserved_0[0x10]; 5745 u8 reserved_at_10[0x10];
5746 5746
5747 u8 reserved_1[0x10]; 5747 u8 reserved_at_20[0x10];
5748 u8 op_mod[0x10]; 5748 u8 op_mod[0x10];
5749 5749
5750 u8 reserved_2[0x40]; 5750 u8 reserved_at_40[0x40];
5751 5751
5752 u8 table_type[0x8]; 5752 u8 table_type[0x8];
5753 u8 reserved_3[0x18]; 5753 u8 reserved_at_88[0x18];
5754 5754
5755 u8 reserved_4[0x8]; 5755 u8 reserved_at_a0[0x8];
5756 u8 table_id[0x18]; 5756 u8 table_id[0x18];
5757 5757
5758 u8 reserved_5[0x20]; 5758 u8 reserved_at_c0[0x20];
5759 5759
5760 u8 start_flow_index[0x20]; 5760 u8 start_flow_index[0x20];
5761 5761
5762 u8 reserved_6[0x20]; 5762 u8 reserved_at_100[0x20];
5763 5763
5764 u8 end_flow_index[0x20]; 5764 u8 end_flow_index[0x20];
5765 5765
5766 u8 reserved_7[0xa0]; 5766 u8 reserved_at_140[0xa0];
5767 5767
5768 u8 reserved_8[0x18]; 5768 u8 reserved_at_1e0[0x18];
5769 u8 match_criteria_enable[0x8]; 5769 u8 match_criteria_enable[0x8];
5770 5770
5771 struct mlx5_ifc_fte_match_param_bits match_criteria; 5771 struct mlx5_ifc_fte_match_param_bits match_criteria;
5772 5772
5773 u8 reserved_9[0xe00]; 5773 u8 reserved_at_1200[0xe00];
5774}; 5774};
5775 5775
5776struct mlx5_ifc_create_eq_out_bits { 5776struct mlx5_ifc_create_eq_out_bits {
5777 u8 status[0x8]; 5777 u8 status[0x8];
5778 u8 reserved_0[0x18]; 5778 u8 reserved_at_8[0x18];
5779 5779
5780 u8 syndrome[0x20]; 5780 u8 syndrome[0x20];
5781 5781
5782 u8 reserved_1[0x18]; 5782 u8 reserved_at_40[0x18];
5783 u8 eq_number[0x8]; 5783 u8 eq_number[0x8];
5784 5784
5785 u8 reserved_2[0x20]; 5785 u8 reserved_at_60[0x20];
5786}; 5786};
5787 5787
5788struct mlx5_ifc_create_eq_in_bits { 5788struct mlx5_ifc_create_eq_in_bits {
5789 u8 opcode[0x10]; 5789 u8 opcode[0x10];
5790 u8 reserved_0[0x10]; 5790 u8 reserved_at_10[0x10];
5791 5791
5792 u8 reserved_1[0x10]; 5792 u8 reserved_at_20[0x10];
5793 u8 op_mod[0x10]; 5793 u8 op_mod[0x10];
5794 5794
5795 u8 reserved_2[0x40]; 5795 u8 reserved_at_40[0x40];
5796 5796
5797 struct mlx5_ifc_eqc_bits eq_context_entry; 5797 struct mlx5_ifc_eqc_bits eq_context_entry;
5798 5798
5799 u8 reserved_3[0x40]; 5799 u8 reserved_at_280[0x40];
5800 5800
5801 u8 event_bitmask[0x40]; 5801 u8 event_bitmask[0x40];
5802 5802
5803 u8 reserved_4[0x580]; 5803 u8 reserved_at_300[0x580];
5804 5804
5805 u8 pas[0][0x40]; 5805 u8 pas[0][0x40];
5806}; 5806};
5807 5807
5808struct mlx5_ifc_create_dct_out_bits { 5808struct mlx5_ifc_create_dct_out_bits {
5809 u8 status[0x8]; 5809 u8 status[0x8];
5810 u8 reserved_0[0x18]; 5810 u8 reserved_at_8[0x18];
5811 5811
5812 u8 syndrome[0x20]; 5812 u8 syndrome[0x20];
5813 5813
5814 u8 reserved_1[0x8]; 5814 u8 reserved_at_40[0x8];
5815 u8 dctn[0x18]; 5815 u8 dctn[0x18];
5816 5816
5817 u8 reserved_2[0x20]; 5817 u8 reserved_at_60[0x20];
5818}; 5818};
5819 5819
5820struct mlx5_ifc_create_dct_in_bits { 5820struct mlx5_ifc_create_dct_in_bits {
5821 u8 opcode[0x10]; 5821 u8 opcode[0x10];
5822 u8 reserved_0[0x10]; 5822 u8 reserved_at_10[0x10];
5823 5823
5824 u8 reserved_1[0x10]; 5824 u8 reserved_at_20[0x10];
5825 u8 op_mod[0x10]; 5825 u8 op_mod[0x10];
5826 5826
5827 u8 reserved_2[0x40]; 5827 u8 reserved_at_40[0x40];
5828 5828
5829 struct mlx5_ifc_dctc_bits dct_context_entry; 5829 struct mlx5_ifc_dctc_bits dct_context_entry;
5830 5830
5831 u8 reserved_3[0x180]; 5831 u8 reserved_at_280[0x180];
5832}; 5832};
5833 5833
5834struct mlx5_ifc_create_cq_out_bits { 5834struct mlx5_ifc_create_cq_out_bits {
5835 u8 status[0x8]; 5835 u8 status[0x8];
5836 u8 reserved_0[0x18]; 5836 u8 reserved_at_8[0x18];
5837 5837
5838 u8 syndrome[0x20]; 5838 u8 syndrome[0x20];
5839 5839
5840 u8 reserved_1[0x8]; 5840 u8 reserved_at_40[0x8];
5841 u8 cqn[0x18]; 5841 u8 cqn[0x18];
5842 5842
5843 u8 reserved_2[0x20]; 5843 u8 reserved_at_60[0x20];
5844}; 5844};
5845 5845
5846struct mlx5_ifc_create_cq_in_bits { 5846struct mlx5_ifc_create_cq_in_bits {
5847 u8 opcode[0x10]; 5847 u8 opcode[0x10];
5848 u8 reserved_0[0x10]; 5848 u8 reserved_at_10[0x10];
5849 5849
5850 u8 reserved_1[0x10]; 5850 u8 reserved_at_20[0x10];
5851 u8 op_mod[0x10]; 5851 u8 op_mod[0x10];
5852 5852
5853 u8 reserved_2[0x40]; 5853 u8 reserved_at_40[0x40];
5854 5854
5855 struct mlx5_ifc_cqc_bits cq_context; 5855 struct mlx5_ifc_cqc_bits cq_context;
5856 5856
5857 u8 reserved_3[0x600]; 5857 u8 reserved_at_280[0x600];
5858 5858
5859 u8 pas[0][0x40]; 5859 u8 pas[0][0x40];
5860}; 5860};
5861 5861
5862struct mlx5_ifc_config_int_moderation_out_bits { 5862struct mlx5_ifc_config_int_moderation_out_bits {
5863 u8 status[0x8]; 5863 u8 status[0x8];
5864 u8 reserved_0[0x18]; 5864 u8 reserved_at_8[0x18];
5865 5865
5866 u8 syndrome[0x20]; 5866 u8 syndrome[0x20];
5867 5867
5868 u8 reserved_1[0x4]; 5868 u8 reserved_at_40[0x4];
5869 u8 min_delay[0xc]; 5869 u8 min_delay[0xc];
5870 u8 int_vector[0x10]; 5870 u8 int_vector[0x10];
5871 5871
5872 u8 reserved_2[0x20]; 5872 u8 reserved_at_60[0x20];
5873}; 5873};
5874 5874
5875enum { 5875enum {
@@ -5879,49 +5879,49 @@ enum {
5879 5879
5880struct mlx5_ifc_config_int_moderation_in_bits { 5880struct mlx5_ifc_config_int_moderation_in_bits {
5881 u8 opcode[0x10]; 5881 u8 opcode[0x10];
5882 u8 reserved_0[0x10]; 5882 u8 reserved_at_10[0x10];
5883 5883
5884 u8 reserved_1[0x10]; 5884 u8 reserved_at_20[0x10];
5885 u8 op_mod[0x10]; 5885 u8 op_mod[0x10];
5886 5886
5887 u8 reserved_2[0x4]; 5887 u8 reserved_at_40[0x4];
5888 u8 min_delay[0xc]; 5888 u8 min_delay[0xc];
5889 u8 int_vector[0x10]; 5889 u8 int_vector[0x10];
5890 5890
5891 u8 reserved_3[0x20]; 5891 u8 reserved_at_60[0x20];
5892}; 5892};
5893 5893
5894struct mlx5_ifc_attach_to_mcg_out_bits { 5894struct mlx5_ifc_attach_to_mcg_out_bits {
5895 u8 status[0x8]; 5895 u8 status[0x8];
5896 u8 reserved_0[0x18]; 5896 u8 reserved_at_8[0x18];
5897 5897
5898 u8 syndrome[0x20]; 5898 u8 syndrome[0x20];
5899 5899
5900 u8 reserved_1[0x40]; 5900 u8 reserved_at_40[0x40];
5901}; 5901};
5902 5902
5903struct mlx5_ifc_attach_to_mcg_in_bits { 5903struct mlx5_ifc_attach_to_mcg_in_bits {
5904 u8 opcode[0x10]; 5904 u8 opcode[0x10];
5905 u8 reserved_0[0x10]; 5905 u8 reserved_at_10[0x10];
5906 5906
5907 u8 reserved_1[0x10]; 5907 u8 reserved_at_20[0x10];
5908 u8 op_mod[0x10]; 5908 u8 op_mod[0x10];
5909 5909
5910 u8 reserved_2[0x8]; 5910 u8 reserved_at_40[0x8];
5911 u8 qpn[0x18]; 5911 u8 qpn[0x18];
5912 5912
5913 u8 reserved_3[0x20]; 5913 u8 reserved_at_60[0x20];
5914 5914
5915 u8 multicast_gid[16][0x8]; 5915 u8 multicast_gid[16][0x8];
5916}; 5916};
5917 5917
5918struct mlx5_ifc_arm_xrc_srq_out_bits { 5918struct mlx5_ifc_arm_xrc_srq_out_bits {
5919 u8 status[0x8]; 5919 u8 status[0x8];
5920 u8 reserved_0[0x18]; 5920 u8 reserved_at_8[0x18];
5921 5921
5922 u8 syndrome[0x20]; 5922 u8 syndrome[0x20];
5923 5923
5924 u8 reserved_1[0x40]; 5924 u8 reserved_at_40[0x40];
5925}; 5925};
5926 5926
5927enum { 5927enum {
@@ -5930,25 +5930,25 @@ enum {
5930 5930
5931struct mlx5_ifc_arm_xrc_srq_in_bits { 5931struct mlx5_ifc_arm_xrc_srq_in_bits {
5932 u8 opcode[0x10]; 5932 u8 opcode[0x10];
5933 u8 reserved_0[0x10]; 5933 u8 reserved_at_10[0x10];
5934 5934
5935 u8 reserved_1[0x10]; 5935 u8 reserved_at_20[0x10];
5936 u8 op_mod[0x10]; 5936 u8 op_mod[0x10];
5937 5937
5938 u8 reserved_2[0x8]; 5938 u8 reserved_at_40[0x8];
5939 u8 xrc_srqn[0x18]; 5939 u8 xrc_srqn[0x18];
5940 5940
5941 u8 reserved_3[0x10]; 5941 u8 reserved_at_60[0x10];
5942 u8 lwm[0x10]; 5942 u8 lwm[0x10];
5943}; 5943};
5944 5944
5945struct mlx5_ifc_arm_rq_out_bits { 5945struct mlx5_ifc_arm_rq_out_bits {
5946 u8 status[0x8]; 5946 u8 status[0x8];
5947 u8 reserved_0[0x18]; 5947 u8 reserved_at_8[0x18];
5948 5948
5949 u8 syndrome[0x20]; 5949 u8 syndrome[0x20];
5950 5950
5951 u8 reserved_1[0x40]; 5951 u8 reserved_at_40[0x40];
5952}; 5952};
5953 5953
5954enum { 5954enum {
@@ -5957,179 +5957,179 @@ enum {
5957 5957
5958struct mlx5_ifc_arm_rq_in_bits { 5958struct mlx5_ifc_arm_rq_in_bits {
5959 u8 opcode[0x10]; 5959 u8 opcode[0x10];
5960 u8 reserved_0[0x10]; 5960 u8 reserved_at_10[0x10];
5961 5961
5962 u8 reserved_1[0x10]; 5962 u8 reserved_at_20[0x10];
5963 u8 op_mod[0x10]; 5963 u8 op_mod[0x10];
5964 5964
5965 u8 reserved_2[0x8]; 5965 u8 reserved_at_40[0x8];
5966 u8 srq_number[0x18]; 5966 u8 srq_number[0x18];
5967 5967
5968 u8 reserved_3[0x10]; 5968 u8 reserved_at_60[0x10];
5969 u8 lwm[0x10]; 5969 u8 lwm[0x10];
5970}; 5970};
5971 5971
5972struct mlx5_ifc_arm_dct_out_bits { 5972struct mlx5_ifc_arm_dct_out_bits {
5973 u8 status[0x8]; 5973 u8 status[0x8];
5974 u8 reserved_0[0x18]; 5974 u8 reserved_at_8[0x18];
5975 5975
5976 u8 syndrome[0x20]; 5976 u8 syndrome[0x20];
5977 5977
5978 u8 reserved_1[0x40]; 5978 u8 reserved_at_40[0x40];
5979}; 5979};
5980 5980
5981struct mlx5_ifc_arm_dct_in_bits { 5981struct mlx5_ifc_arm_dct_in_bits {
5982 u8 opcode[0x10]; 5982 u8 opcode[0x10];
5983 u8 reserved_0[0x10]; 5983 u8 reserved_at_10[0x10];
5984 5984
5985 u8 reserved_1[0x10]; 5985 u8 reserved_at_20[0x10];
5986 u8 op_mod[0x10]; 5986 u8 op_mod[0x10];
5987 5987
5988 u8 reserved_2[0x8]; 5988 u8 reserved_at_40[0x8];
5989 u8 dct_number[0x18]; 5989 u8 dct_number[0x18];
5990 5990
5991 u8 reserved_3[0x20]; 5991 u8 reserved_at_60[0x20];
5992}; 5992};
5993 5993
5994struct mlx5_ifc_alloc_xrcd_out_bits { 5994struct mlx5_ifc_alloc_xrcd_out_bits {
5995 u8 status[0x8]; 5995 u8 status[0x8];
5996 u8 reserved_0[0x18]; 5996 u8 reserved_at_8[0x18];
5997 5997
5998 u8 syndrome[0x20]; 5998 u8 syndrome[0x20];
5999 5999
6000 u8 reserved_1[0x8]; 6000 u8 reserved_at_40[0x8];
6001 u8 xrcd[0x18]; 6001 u8 xrcd[0x18];
6002 6002
6003 u8 reserved_2[0x20]; 6003 u8 reserved_at_60[0x20];
6004}; 6004};
6005 6005
6006struct mlx5_ifc_alloc_xrcd_in_bits { 6006struct mlx5_ifc_alloc_xrcd_in_bits {
6007 u8 opcode[0x10]; 6007 u8 opcode[0x10];
6008 u8 reserved_0[0x10]; 6008 u8 reserved_at_10[0x10];
6009 6009
6010 u8 reserved_1[0x10]; 6010 u8 reserved_at_20[0x10];
6011 u8 op_mod[0x10]; 6011 u8 op_mod[0x10];
6012 6012
6013 u8 reserved_2[0x40]; 6013 u8 reserved_at_40[0x40];
6014}; 6014};
6015 6015
6016struct mlx5_ifc_alloc_uar_out_bits { 6016struct mlx5_ifc_alloc_uar_out_bits {
6017 u8 status[0x8]; 6017 u8 status[0x8];
6018 u8 reserved_0[0x18]; 6018 u8 reserved_at_8[0x18];
6019 6019
6020 u8 syndrome[0x20]; 6020 u8 syndrome[0x20];
6021 6021
6022 u8 reserved_1[0x8]; 6022 u8 reserved_at_40[0x8];
6023 u8 uar[0x18]; 6023 u8 uar[0x18];
6024 6024
6025 u8 reserved_2[0x20]; 6025 u8 reserved_at_60[0x20];
6026}; 6026};
6027 6027
6028struct mlx5_ifc_alloc_uar_in_bits { 6028struct mlx5_ifc_alloc_uar_in_bits {
6029 u8 opcode[0x10]; 6029 u8 opcode[0x10];
6030 u8 reserved_0[0x10]; 6030 u8 reserved_at_10[0x10];
6031 6031
6032 u8 reserved_1[0x10]; 6032 u8 reserved_at_20[0x10];
6033 u8 op_mod[0x10]; 6033 u8 op_mod[0x10];
6034 6034
6035 u8 reserved_2[0x40]; 6035 u8 reserved_at_40[0x40];
6036}; 6036};
6037 6037
6038struct mlx5_ifc_alloc_transport_domain_out_bits { 6038struct mlx5_ifc_alloc_transport_domain_out_bits {
6039 u8 status[0x8]; 6039 u8 status[0x8];
6040 u8 reserved_0[0x18]; 6040 u8 reserved_at_8[0x18];
6041 6041
6042 u8 syndrome[0x20]; 6042 u8 syndrome[0x20];
6043 6043
6044 u8 reserved_1[0x8]; 6044 u8 reserved_at_40[0x8];
6045 u8 transport_domain[0x18]; 6045 u8 transport_domain[0x18];
6046 6046
6047 u8 reserved_2[0x20]; 6047 u8 reserved_at_60[0x20];
6048}; 6048};
6049 6049
6050struct mlx5_ifc_alloc_transport_domain_in_bits { 6050struct mlx5_ifc_alloc_transport_domain_in_bits {
6051 u8 opcode[0x10]; 6051 u8 opcode[0x10];
6052 u8 reserved_0[0x10]; 6052 u8 reserved_at_10[0x10];
6053 6053
6054 u8 reserved_1[0x10]; 6054 u8 reserved_at_20[0x10];
6055 u8 op_mod[0x10]; 6055 u8 op_mod[0x10];
6056 6056
6057 u8 reserved_2[0x40]; 6057 u8 reserved_at_40[0x40];
6058}; 6058};
6059 6059
6060struct mlx5_ifc_alloc_q_counter_out_bits { 6060struct mlx5_ifc_alloc_q_counter_out_bits {
6061 u8 status[0x8]; 6061 u8 status[0x8];
6062 u8 reserved_0[0x18]; 6062 u8 reserved_at_8[0x18];
6063 6063
6064 u8 syndrome[0x20]; 6064 u8 syndrome[0x20];
6065 6065
6066 u8 reserved_1[0x18]; 6066 u8 reserved_at_40[0x18];
6067 u8 counter_set_id[0x8]; 6067 u8 counter_set_id[0x8];
6068 6068
6069 u8 reserved_2[0x20]; 6069 u8 reserved_at_60[0x20];
6070}; 6070};
6071 6071
6072struct mlx5_ifc_alloc_q_counter_in_bits { 6072struct mlx5_ifc_alloc_q_counter_in_bits {
6073 u8 opcode[0x10]; 6073 u8 opcode[0x10];
6074 u8 reserved_0[0x10]; 6074 u8 reserved_at_10[0x10];
6075 6075
6076 u8 reserved_1[0x10]; 6076 u8 reserved_at_20[0x10];
6077 u8 op_mod[0x10]; 6077 u8 op_mod[0x10];
6078 6078
6079 u8 reserved_2[0x40]; 6079 u8 reserved_at_40[0x40];
6080}; 6080};
6081 6081
6082struct mlx5_ifc_alloc_pd_out_bits { 6082struct mlx5_ifc_alloc_pd_out_bits {
6083 u8 status[0x8]; 6083 u8 status[0x8];
6084 u8 reserved_0[0x18]; 6084 u8 reserved_at_8[0x18];
6085 6085
6086 u8 syndrome[0x20]; 6086 u8 syndrome[0x20];
6087 6087
6088 u8 reserved_1[0x8]; 6088 u8 reserved_at_40[0x8];
6089 u8 pd[0x18]; 6089 u8 pd[0x18];
6090 6090
6091 u8 reserved_2[0x20]; 6091 u8 reserved_at_60[0x20];
6092}; 6092};
6093 6093
6094struct mlx5_ifc_alloc_pd_in_bits { 6094struct mlx5_ifc_alloc_pd_in_bits {
6095 u8 opcode[0x10]; 6095 u8 opcode[0x10];
6096 u8 reserved_0[0x10]; 6096 u8 reserved_at_10[0x10];
6097 6097
6098 u8 reserved_1[0x10]; 6098 u8 reserved_at_20[0x10];
6099 u8 op_mod[0x10]; 6099 u8 op_mod[0x10];
6100 6100
6101 u8 reserved_2[0x40]; 6101 u8 reserved_at_40[0x40];
6102}; 6102};
6103 6103
6104struct mlx5_ifc_add_vxlan_udp_dport_out_bits { 6104struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
6105 u8 status[0x8]; 6105 u8 status[0x8];
6106 u8 reserved_0[0x18]; 6106 u8 reserved_at_8[0x18];
6107 6107
6108 u8 syndrome[0x20]; 6108 u8 syndrome[0x20];
6109 6109
6110 u8 reserved_1[0x40]; 6110 u8 reserved_at_40[0x40];
6111}; 6111};
6112 6112
6113struct mlx5_ifc_add_vxlan_udp_dport_in_bits { 6113struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
6114 u8 opcode[0x10]; 6114 u8 opcode[0x10];
6115 u8 reserved_0[0x10]; 6115 u8 reserved_at_10[0x10];
6116 6116
6117 u8 reserved_1[0x10]; 6117 u8 reserved_at_20[0x10];
6118 u8 op_mod[0x10]; 6118 u8 op_mod[0x10];
6119 6119
6120 u8 reserved_2[0x20]; 6120 u8 reserved_at_40[0x20];
6121 6121
6122 u8 reserved_3[0x10]; 6122 u8 reserved_at_60[0x10];
6123 u8 vxlan_udp_port[0x10]; 6123 u8 vxlan_udp_port[0x10];
6124}; 6124};
6125 6125
6126struct mlx5_ifc_access_register_out_bits { 6126struct mlx5_ifc_access_register_out_bits {
6127 u8 status[0x8]; 6127 u8 status[0x8];
6128 u8 reserved_0[0x18]; 6128 u8 reserved_at_8[0x18];
6129 6129
6130 u8 syndrome[0x20]; 6130 u8 syndrome[0x20];
6131 6131
6132 u8 reserved_1[0x40]; 6132 u8 reserved_at_40[0x40];
6133 6133
6134 u8 register_data[0][0x20]; 6134 u8 register_data[0][0x20];
6135}; 6135};
@@ -6141,12 +6141,12 @@ enum {
6141 6141
6142struct mlx5_ifc_access_register_in_bits { 6142struct mlx5_ifc_access_register_in_bits {
6143 u8 opcode[0x10]; 6143 u8 opcode[0x10];
6144 u8 reserved_0[0x10]; 6144 u8 reserved_at_10[0x10];
6145 6145
6146 u8 reserved_1[0x10]; 6146 u8 reserved_at_20[0x10];
6147 u8 op_mod[0x10]; 6147 u8 op_mod[0x10];
6148 6148
6149 u8 reserved_2[0x10]; 6149 u8 reserved_at_40[0x10];
6150 u8 register_id[0x10]; 6150 u8 register_id[0x10];
6151 6151
6152 u8 argument[0x20]; 6152 u8 argument[0x20];
@@ -6159,24 +6159,24 @@ struct mlx5_ifc_sltp_reg_bits {
6159 u8 version[0x4]; 6159 u8 version[0x4];
6160 u8 local_port[0x8]; 6160 u8 local_port[0x8];
6161 u8 pnat[0x2]; 6161 u8 pnat[0x2];
6162 u8 reserved_0[0x2]; 6162 u8 reserved_at_12[0x2];
6163 u8 lane[0x4]; 6163 u8 lane[0x4];
6164 u8 reserved_1[0x8]; 6164 u8 reserved_at_18[0x8];
6165 6165
6166 u8 reserved_2[0x20]; 6166 u8 reserved_at_20[0x20];
6167 6167
6168 u8 reserved_3[0x7]; 6168 u8 reserved_at_40[0x7];
6169 u8 polarity[0x1]; 6169 u8 polarity[0x1];
6170 u8 ob_tap0[0x8]; 6170 u8 ob_tap0[0x8];
6171 u8 ob_tap1[0x8]; 6171 u8 ob_tap1[0x8];
6172 u8 ob_tap2[0x8]; 6172 u8 ob_tap2[0x8];
6173 6173
6174 u8 reserved_4[0xc]; 6174 u8 reserved_at_60[0xc];
6175 u8 ob_preemp_mode[0x4]; 6175 u8 ob_preemp_mode[0x4];
6176 u8 ob_reg[0x8]; 6176 u8 ob_reg[0x8];
6177 u8 ob_bias[0x8]; 6177 u8 ob_bias[0x8];
6178 6178
6179 u8 reserved_5[0x20]; 6179 u8 reserved_at_80[0x20];
6180}; 6180};
6181 6181
6182struct mlx5_ifc_slrg_reg_bits { 6182struct mlx5_ifc_slrg_reg_bits {
@@ -6184,36 +6184,36 @@ struct mlx5_ifc_slrg_reg_bits {
6184 u8 version[0x4]; 6184 u8 version[0x4];
6185 u8 local_port[0x8]; 6185 u8 local_port[0x8];
6186 u8 pnat[0x2]; 6186 u8 pnat[0x2];
6187 u8 reserved_0[0x2]; 6187 u8 reserved_at_12[0x2];
6188 u8 lane[0x4]; 6188 u8 lane[0x4];
6189 u8 reserved_1[0x8]; 6189 u8 reserved_at_18[0x8];
6190 6190
6191 u8 time_to_link_up[0x10]; 6191 u8 time_to_link_up[0x10];
6192 u8 reserved_2[0xc]; 6192 u8 reserved_at_30[0xc];
6193 u8 grade_lane_speed[0x4]; 6193 u8 grade_lane_speed[0x4];
6194 6194
6195 u8 grade_version[0x8]; 6195 u8 grade_version[0x8];
6196 u8 grade[0x18]; 6196 u8 grade[0x18];
6197 6197
6198 u8 reserved_3[0x4]; 6198 u8 reserved_at_60[0x4];
6199 u8 height_grade_type[0x4]; 6199 u8 height_grade_type[0x4];
6200 u8 height_grade[0x18]; 6200 u8 height_grade[0x18];
6201 6201
6202 u8 height_dz[0x10]; 6202 u8 height_dz[0x10];
6203 u8 height_dv[0x10]; 6203 u8 height_dv[0x10];
6204 6204
6205 u8 reserved_4[0x10]; 6205 u8 reserved_at_a0[0x10];
6206 u8 height_sigma[0x10]; 6206 u8 height_sigma[0x10];
6207 6207
6208 u8 reserved_5[0x20]; 6208 u8 reserved_at_c0[0x20];
6209 6209
6210 u8 reserved_6[0x4]; 6210 u8 reserved_at_e0[0x4];
6211 u8 phase_grade_type[0x4]; 6211 u8 phase_grade_type[0x4];
6212 u8 phase_grade[0x18]; 6212 u8 phase_grade[0x18];
6213 6213
6214 u8 reserved_7[0x8]; 6214 u8 reserved_at_100[0x8];
6215 u8 phase_eo_pos[0x8]; 6215 u8 phase_eo_pos[0x8];
6216 u8 reserved_8[0x8]; 6216 u8 reserved_at_110[0x8];
6217 u8 phase_eo_neg[0x8]; 6217 u8 phase_eo_neg[0x8];
6218 6218
6219 u8 ffe_set_tested[0x10]; 6219 u8 ffe_set_tested[0x10];
@@ -6221,70 +6221,70 @@ struct mlx5_ifc_slrg_reg_bits {
6221}; 6221};
6222 6222
6223struct mlx5_ifc_pvlc_reg_bits { 6223struct mlx5_ifc_pvlc_reg_bits {
6224 u8 reserved_0[0x8]; 6224 u8 reserved_at_0[0x8];
6225 u8 local_port[0x8]; 6225 u8 local_port[0x8];
6226 u8 reserved_1[0x10]; 6226 u8 reserved_at_10[0x10];
6227 6227
6228 u8 reserved_2[0x1c]; 6228 u8 reserved_at_20[0x1c];
6229 u8 vl_hw_cap[0x4]; 6229 u8 vl_hw_cap[0x4];
6230 6230
6231 u8 reserved_3[0x1c]; 6231 u8 reserved_at_40[0x1c];
6232 u8 vl_admin[0x4]; 6232 u8 vl_admin[0x4];
6233 6233
6234 u8 reserved_4[0x1c]; 6234 u8 reserved_at_60[0x1c];
6235 u8 vl_operational[0x4]; 6235 u8 vl_operational[0x4];
6236}; 6236};
6237 6237
6238struct mlx5_ifc_pude_reg_bits { 6238struct mlx5_ifc_pude_reg_bits {
6239 u8 swid[0x8]; 6239 u8 swid[0x8];
6240 u8 local_port[0x8]; 6240 u8 local_port[0x8];
6241 u8 reserved_0[0x4]; 6241 u8 reserved_at_10[0x4];
6242 u8 admin_status[0x4]; 6242 u8 admin_status[0x4];
6243 u8 reserved_1[0x4]; 6243 u8 reserved_at_18[0x4];
6244 u8 oper_status[0x4]; 6244 u8 oper_status[0x4];
6245 6245
6246 u8 reserved_2[0x60]; 6246 u8 reserved_at_20[0x60];
6247}; 6247};
6248 6248
6249struct mlx5_ifc_ptys_reg_bits { 6249struct mlx5_ifc_ptys_reg_bits {
6250 u8 reserved_0[0x8]; 6250 u8 reserved_at_0[0x8];
6251 u8 local_port[0x8]; 6251 u8 local_port[0x8];
6252 u8 reserved_1[0xd]; 6252 u8 reserved_at_10[0xd];
6253 u8 proto_mask[0x3]; 6253 u8 proto_mask[0x3];
6254 6254
6255 u8 reserved_2[0x40]; 6255 u8 reserved_at_20[0x40];
6256 6256
6257 u8 eth_proto_capability[0x20]; 6257 u8 eth_proto_capability[0x20];
6258 6258
6259 u8 ib_link_width_capability[0x10]; 6259 u8 ib_link_width_capability[0x10];
6260 u8 ib_proto_capability[0x10]; 6260 u8 ib_proto_capability[0x10];
6261 6261
6262 u8 reserved_3[0x20]; 6262 u8 reserved_at_a0[0x20];
6263 6263
6264 u8 eth_proto_admin[0x20]; 6264 u8 eth_proto_admin[0x20];
6265 6265
6266 u8 ib_link_width_admin[0x10]; 6266 u8 ib_link_width_admin[0x10];
6267 u8 ib_proto_admin[0x10]; 6267 u8 ib_proto_admin[0x10];
6268 6268
6269 u8 reserved_4[0x20]; 6269 u8 reserved_at_100[0x20];
6270 6270
6271 u8 eth_proto_oper[0x20]; 6271 u8 eth_proto_oper[0x20];
6272 6272
6273 u8 ib_link_width_oper[0x10]; 6273 u8 ib_link_width_oper[0x10];
6274 u8 ib_proto_oper[0x10]; 6274 u8 ib_proto_oper[0x10];
6275 6275
6276 u8 reserved_5[0x20]; 6276 u8 reserved_at_160[0x20];
6277 6277
6278 u8 eth_proto_lp_advertise[0x20]; 6278 u8 eth_proto_lp_advertise[0x20];
6279 6279
6280 u8 reserved_6[0x60]; 6280 u8 reserved_at_1a0[0x60];
6281}; 6281};
6282 6282
6283struct mlx5_ifc_ptas_reg_bits { 6283struct mlx5_ifc_ptas_reg_bits {
6284 u8 reserved_0[0x20]; 6284 u8 reserved_at_0[0x20];
6285 6285
6286 u8 algorithm_options[0x10]; 6286 u8 algorithm_options[0x10];
6287 u8 reserved_1[0x4]; 6287 u8 reserved_at_30[0x4];
6288 u8 repetitions_mode[0x4]; 6288 u8 repetitions_mode[0x4];
6289 u8 num_of_repetitions[0x8]; 6289 u8 num_of_repetitions[0x8];
6290 6290
@@ -6310,13 +6310,13 @@ struct mlx5_ifc_ptas_reg_bits {
6310 u8 ndeo_error_threshold[0x10]; 6310 u8 ndeo_error_threshold[0x10];
6311 6311
6312 u8 mixer_offset_step_size[0x10]; 6312 u8 mixer_offset_step_size[0x10];
6313 u8 reserved_2[0x8]; 6313 u8 reserved_at_110[0x8];
6314 u8 mix90_phase_for_voltage_bath[0x8]; 6314 u8 mix90_phase_for_voltage_bath[0x8];
6315 6315
6316 u8 mixer_offset_start[0x10]; 6316 u8 mixer_offset_start[0x10];
6317 u8 mixer_offset_end[0x10]; 6317 u8 mixer_offset_end[0x10];
6318 6318
6319 u8 reserved_3[0x15]; 6319 u8 reserved_at_140[0x15];
6320 u8 ber_test_time[0xb]; 6320 u8 ber_test_time[0xb];
6321}; 6321};
6322 6322
@@ -6324,154 +6324,154 @@ struct mlx5_ifc_pspa_reg_bits {
6324 u8 swid[0x8]; 6324 u8 swid[0x8];
6325 u8 local_port[0x8]; 6325 u8 local_port[0x8];
6326 u8 sub_port[0x8]; 6326 u8 sub_port[0x8];
6327 u8 reserved_0[0x8]; 6327 u8 reserved_at_18[0x8];
6328 6328
6329 u8 reserved_1[0x20]; 6329 u8 reserved_at_20[0x20];
6330}; 6330};
6331 6331
6332struct mlx5_ifc_pqdr_reg_bits { 6332struct mlx5_ifc_pqdr_reg_bits {
6333 u8 reserved_0[0x8]; 6333 u8 reserved_at_0[0x8];
6334 u8 local_port[0x8]; 6334 u8 local_port[0x8];
6335 u8 reserved_1[0x5]; 6335 u8 reserved_at_10[0x5];
6336 u8 prio[0x3]; 6336 u8 prio[0x3];
6337 u8 reserved_2[0x6]; 6337 u8 reserved_at_18[0x6];
6338 u8 mode[0x2]; 6338 u8 mode[0x2];
6339 6339
6340 u8 reserved_3[0x20]; 6340 u8 reserved_at_20[0x20];
6341 6341
6342 u8 reserved_4[0x10]; 6342 u8 reserved_at_40[0x10];
6343 u8 min_threshold[0x10]; 6343 u8 min_threshold[0x10];
6344 6344
6345 u8 reserved_5[0x10]; 6345 u8 reserved_at_60[0x10];
6346 u8 max_threshold[0x10]; 6346 u8 max_threshold[0x10];
6347 6347
6348 u8 reserved_6[0x10]; 6348 u8 reserved_at_80[0x10];
6349 u8 mark_probability_denominator[0x10]; 6349 u8 mark_probability_denominator[0x10];
6350 6350
6351 u8 reserved_7[0x60]; 6351 u8 reserved_at_a0[0x60];
6352}; 6352};
6353 6353
6354struct mlx5_ifc_ppsc_reg_bits { 6354struct mlx5_ifc_ppsc_reg_bits {
6355 u8 reserved_0[0x8]; 6355 u8 reserved_at_0[0x8];
6356 u8 local_port[0x8]; 6356 u8 local_port[0x8];
6357 u8 reserved_1[0x10]; 6357 u8 reserved_at_10[0x10];
6358 6358
6359 u8 reserved_2[0x60]; 6359 u8 reserved_at_20[0x60];
6360 6360
6361 u8 reserved_3[0x1c]; 6361 u8 reserved_at_80[0x1c];
6362 u8 wrps_admin[0x4]; 6362 u8 wrps_admin[0x4];
6363 6363
6364 u8 reserved_4[0x1c]; 6364 u8 reserved_at_a0[0x1c];
6365 u8 wrps_status[0x4]; 6365 u8 wrps_status[0x4];
6366 6366
6367 u8 reserved_5[0x8]; 6367 u8 reserved_at_c0[0x8];
6368 u8 up_threshold[0x8]; 6368 u8 up_threshold[0x8];
6369 u8 reserved_6[0x8]; 6369 u8 reserved_at_d0[0x8];
6370 u8 down_threshold[0x8]; 6370 u8 down_threshold[0x8];
6371 6371
6372 u8 reserved_7[0x20]; 6372 u8 reserved_at_e0[0x20];
6373 6373
6374 u8 reserved_8[0x1c]; 6374 u8 reserved_at_100[0x1c];
6375 u8 srps_admin[0x4]; 6375 u8 srps_admin[0x4];
6376 6376
6377 u8 reserved_9[0x1c]; 6377 u8 reserved_at_120[0x1c];
6378 u8 srps_status[0x4]; 6378 u8 srps_status[0x4];
6379 6379
6380 u8 reserved_10[0x40]; 6380 u8 reserved_at_140[0x40];
6381}; 6381};
6382 6382
6383struct mlx5_ifc_pplr_reg_bits { 6383struct mlx5_ifc_pplr_reg_bits {
6384 u8 reserved_0[0x8]; 6384 u8 reserved_at_0[0x8];
6385 u8 local_port[0x8]; 6385 u8 local_port[0x8];
6386 u8 reserved_1[0x10]; 6386 u8 reserved_at_10[0x10];
6387 6387
6388 u8 reserved_2[0x8]; 6388 u8 reserved_at_20[0x8];
6389 u8 lb_cap[0x8]; 6389 u8 lb_cap[0x8];
6390 u8 reserved_3[0x8]; 6390 u8 reserved_at_30[0x8];
6391 u8 lb_en[0x8]; 6391 u8 lb_en[0x8];
6392}; 6392};
6393 6393
6394struct mlx5_ifc_pplm_reg_bits { 6394struct mlx5_ifc_pplm_reg_bits {
6395 u8 reserved_0[0x8]; 6395 u8 reserved_at_0[0x8];
6396 u8 local_port[0x8]; 6396 u8 local_port[0x8];
6397 u8 reserved_1[0x10]; 6397 u8 reserved_at_10[0x10];
6398 6398
6399 u8 reserved_2[0x20]; 6399 u8 reserved_at_20[0x20];
6400 6400
6401 u8 port_profile_mode[0x8]; 6401 u8 port_profile_mode[0x8];
6402 u8 static_port_profile[0x8]; 6402 u8 static_port_profile[0x8];
6403 u8 active_port_profile[0x8]; 6403 u8 active_port_profile[0x8];
6404 u8 reserved_3[0x8]; 6404 u8 reserved_at_58[0x8];
6405 6405
6406 u8 retransmission_active[0x8]; 6406 u8 retransmission_active[0x8];
6407 u8 fec_mode_active[0x18]; 6407 u8 fec_mode_active[0x18];
6408 6408
6409 u8 reserved_4[0x20]; 6409 u8 reserved_at_80[0x20];
6410}; 6410};
6411 6411
6412struct mlx5_ifc_ppcnt_reg_bits { 6412struct mlx5_ifc_ppcnt_reg_bits {
6413 u8 swid[0x8]; 6413 u8 swid[0x8];
6414 u8 local_port[0x8]; 6414 u8 local_port[0x8];
6415 u8 pnat[0x2]; 6415 u8 pnat[0x2];
6416 u8 reserved_0[0x8]; 6416 u8 reserved_at_12[0x8];
6417 u8 grp[0x6]; 6417 u8 grp[0x6];
6418 6418
6419 u8 clr[0x1]; 6419 u8 clr[0x1];
6420 u8 reserved_1[0x1c]; 6420 u8 reserved_at_21[0x1c];
6421 u8 prio_tc[0x3]; 6421 u8 prio_tc[0x3];
6422 6422
6423 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; 6423 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
6424}; 6424};
6425 6425
6426struct mlx5_ifc_ppad_reg_bits { 6426struct mlx5_ifc_ppad_reg_bits {
6427 u8 reserved_0[0x3]; 6427 u8 reserved_at_0[0x3];
6428 u8 single_mac[0x1]; 6428 u8 single_mac[0x1];
6429 u8 reserved_1[0x4]; 6429 u8 reserved_at_4[0x4];
6430 u8 local_port[0x8]; 6430 u8 local_port[0x8];
6431 u8 mac_47_32[0x10]; 6431 u8 mac_47_32[0x10];
6432 6432
6433 u8 mac_31_0[0x20]; 6433 u8 mac_31_0[0x20];
6434 6434
6435 u8 reserved_2[0x40]; 6435 u8 reserved_at_40[0x40];
6436}; 6436};
6437 6437
6438struct mlx5_ifc_pmtu_reg_bits { 6438struct mlx5_ifc_pmtu_reg_bits {
6439 u8 reserved_0[0x8]; 6439 u8 reserved_at_0[0x8];
6440 u8 local_port[0x8]; 6440 u8 local_port[0x8];
6441 u8 reserved_1[0x10]; 6441 u8 reserved_at_10[0x10];
6442 6442
6443 u8 max_mtu[0x10]; 6443 u8 max_mtu[0x10];
6444 u8 reserved_2[0x10]; 6444 u8 reserved_at_30[0x10];
6445 6445
6446 u8 admin_mtu[0x10]; 6446 u8 admin_mtu[0x10];
6447 u8 reserved_3[0x10]; 6447 u8 reserved_at_50[0x10];
6448 6448
6449 u8 oper_mtu[0x10]; 6449 u8 oper_mtu[0x10];
6450 u8 reserved_4[0x10]; 6450 u8 reserved_at_70[0x10];
6451}; 6451};
6452 6452
6453struct mlx5_ifc_pmpr_reg_bits { 6453struct mlx5_ifc_pmpr_reg_bits {
6454 u8 reserved_0[0x8]; 6454 u8 reserved_at_0[0x8];
6455 u8 module[0x8]; 6455 u8 module[0x8];
6456 u8 reserved_1[0x10]; 6456 u8 reserved_at_10[0x10];
6457 6457
6458 u8 reserved_2[0x18]; 6458 u8 reserved_at_20[0x18];
6459 u8 attenuation_5g[0x8]; 6459 u8 attenuation_5g[0x8];
6460 6460
6461 u8 reserved_3[0x18]; 6461 u8 reserved_at_40[0x18];
6462 u8 attenuation_7g[0x8]; 6462 u8 attenuation_7g[0x8];
6463 6463
6464 u8 reserved_4[0x18]; 6464 u8 reserved_at_60[0x18];
6465 u8 attenuation_12g[0x8]; 6465 u8 attenuation_12g[0x8];
6466}; 6466};
6467 6467
6468struct mlx5_ifc_pmpe_reg_bits { 6468struct mlx5_ifc_pmpe_reg_bits {
6469 u8 reserved_0[0x8]; 6469 u8 reserved_at_0[0x8];
6470 u8 module[0x8]; 6470 u8 module[0x8];
6471 u8 reserved_1[0xc]; 6471 u8 reserved_at_10[0xc];
6472 u8 module_status[0x4]; 6472 u8 module_status[0x4];
6473 6473
6474 u8 reserved_2[0x60]; 6474 u8 reserved_at_20[0x60];
6475}; 6475};
6476 6476
6477struct mlx5_ifc_pmpc_reg_bits { 6477struct mlx5_ifc_pmpc_reg_bits {
@@ -6479,20 +6479,20 @@ struct mlx5_ifc_pmpc_reg_bits {
6479}; 6479};
6480 6480
6481struct mlx5_ifc_pmlpn_reg_bits { 6481struct mlx5_ifc_pmlpn_reg_bits {
6482 u8 reserved_0[0x4]; 6482 u8 reserved_at_0[0x4];
6483 u8 mlpn_status[0x4]; 6483 u8 mlpn_status[0x4];
6484 u8 local_port[0x8]; 6484 u8 local_port[0x8];
6485 u8 reserved_1[0x10]; 6485 u8 reserved_at_10[0x10];
6486 6486
6487 u8 e[0x1]; 6487 u8 e[0x1];
6488 u8 reserved_2[0x1f]; 6488 u8 reserved_at_21[0x1f];
6489}; 6489};
6490 6490
6491struct mlx5_ifc_pmlp_reg_bits { 6491struct mlx5_ifc_pmlp_reg_bits {
6492 u8 rxtx[0x1]; 6492 u8 rxtx[0x1];
6493 u8 reserved_0[0x7]; 6493 u8 reserved_at_1[0x7];
6494 u8 local_port[0x8]; 6494 u8 local_port[0x8];
6495 u8 reserved_1[0x8]; 6495 u8 reserved_at_10[0x8];
6496 u8 width[0x8]; 6496 u8 width[0x8];
6497 6497
6498 u8 lane0_module_mapping[0x20]; 6498 u8 lane0_module_mapping[0x20];
@@ -6503,36 +6503,36 @@ struct mlx5_ifc_pmlp_reg_bits {
6503 6503
6504 u8 lane3_module_mapping[0x20]; 6504 u8 lane3_module_mapping[0x20];
6505 6505
6506 u8 reserved_2[0x160]; 6506 u8 reserved_at_a0[0x160];
6507}; 6507};
6508 6508
6509struct mlx5_ifc_pmaos_reg_bits { 6509struct mlx5_ifc_pmaos_reg_bits {
6510 u8 reserved_0[0x8]; 6510 u8 reserved_at_0[0x8];
6511 u8 module[0x8]; 6511 u8 module[0x8];
6512 u8 reserved_1[0x4]; 6512 u8 reserved_at_10[0x4];
6513 u8 admin_status[0x4]; 6513 u8 admin_status[0x4];
6514 u8 reserved_2[0x4]; 6514 u8 reserved_at_18[0x4];
6515 u8 oper_status[0x4]; 6515 u8 oper_status[0x4];
6516 6516
6517 u8 ase[0x1]; 6517 u8 ase[0x1];
6518 u8 ee[0x1]; 6518 u8 ee[0x1];
6519 u8 reserved_3[0x1c]; 6519 u8 reserved_at_22[0x1c];
6520 u8 e[0x2]; 6520 u8 e[0x2];
6521 6521
6522 u8 reserved_4[0x40]; 6522 u8 reserved_at_40[0x40];
6523}; 6523};
6524 6524
6525struct mlx5_ifc_plpc_reg_bits { 6525struct mlx5_ifc_plpc_reg_bits {
6526 u8 reserved_0[0x4]; 6526 u8 reserved_at_0[0x4];
6527 u8 profile_id[0xc]; 6527 u8 profile_id[0xc];
6528 u8 reserved_1[0x4]; 6528 u8 reserved_at_10[0x4];
6529 u8 proto_mask[0x4]; 6529 u8 proto_mask[0x4];
6530 u8 reserved_2[0x8]; 6530 u8 reserved_at_18[0x8];
6531 6531
6532 u8 reserved_3[0x10]; 6532 u8 reserved_at_20[0x10];
6533 u8 lane_speed[0x10]; 6533 u8 lane_speed[0x10];
6534 6534
6535 u8 reserved_4[0x17]; 6535 u8 reserved_at_40[0x17];
6536 u8 lpbf[0x1]; 6536 u8 lpbf[0x1];
6537 u8 fec_mode_policy[0x8]; 6537 u8 fec_mode_policy[0x8];
6538 6538
@@ -6545,44 +6545,44 @@ struct mlx5_ifc_plpc_reg_bits {
6545 u8 retransmission_request_admin[0x8]; 6545 u8 retransmission_request_admin[0x8];
6546 u8 fec_mode_request_admin[0x18]; 6546 u8 fec_mode_request_admin[0x18];
6547 6547
6548 u8 reserved_5[0x80]; 6548 u8 reserved_at_c0[0x80];
6549}; 6549};
6550 6550
6551struct mlx5_ifc_plib_reg_bits { 6551struct mlx5_ifc_plib_reg_bits {
6552 u8 reserved_0[0x8]; 6552 u8 reserved_at_0[0x8];
6553 u8 local_port[0x8]; 6553 u8 local_port[0x8];
6554 u8 reserved_1[0x8]; 6554 u8 reserved_at_10[0x8];
6555 u8 ib_port[0x8]; 6555 u8 ib_port[0x8];
6556 6556
6557 u8 reserved_2[0x60]; 6557 u8 reserved_at_20[0x60];
6558}; 6558};
6559 6559
6560struct mlx5_ifc_plbf_reg_bits { 6560struct mlx5_ifc_plbf_reg_bits {
6561 u8 reserved_0[0x8]; 6561 u8 reserved_at_0[0x8];
6562 u8 local_port[0x8]; 6562 u8 local_port[0x8];
6563 u8 reserved_1[0xd]; 6563 u8 reserved_at_10[0xd];
6564 u8 lbf_mode[0x3]; 6564 u8 lbf_mode[0x3];
6565 6565
6566 u8 reserved_2[0x20]; 6566 u8 reserved_at_20[0x20];
6567}; 6567};
6568 6568
6569struct mlx5_ifc_pipg_reg_bits { 6569struct mlx5_ifc_pipg_reg_bits {
6570 u8 reserved_0[0x8]; 6570 u8 reserved_at_0[0x8];
6571 u8 local_port[0x8]; 6571 u8 local_port[0x8];
6572 u8 reserved_1[0x10]; 6572 u8 reserved_at_10[0x10];
6573 6573
6574 u8 dic[0x1]; 6574 u8 dic[0x1];
6575 u8 reserved_2[0x19]; 6575 u8 reserved_at_21[0x19];
6576 u8 ipg[0x4]; 6576 u8 ipg[0x4];
6577 u8 reserved_3[0x2]; 6577 u8 reserved_at_3e[0x2];
6578}; 6578};
6579 6579
6580struct mlx5_ifc_pifr_reg_bits { 6580struct mlx5_ifc_pifr_reg_bits {
6581 u8 reserved_0[0x8]; 6581 u8 reserved_at_0[0x8];
6582 u8 local_port[0x8]; 6582 u8 local_port[0x8];
6583 u8 reserved_1[0x10]; 6583 u8 reserved_at_10[0x10];
6584 6584
6585 u8 reserved_2[0xe0]; 6585 u8 reserved_at_20[0xe0];
6586 6586
6587 u8 port_filter[8][0x20]; 6587 u8 port_filter[8][0x20];
6588 6588
@@ -6590,36 +6590,36 @@ struct mlx5_ifc_pifr_reg_bits {
6590}; 6590};
6591 6591
6592struct mlx5_ifc_pfcc_reg_bits { 6592struct mlx5_ifc_pfcc_reg_bits {
6593 u8 reserved_0[0x8]; 6593 u8 reserved_at_0[0x8];
6594 u8 local_port[0x8]; 6594 u8 local_port[0x8];
6595 u8 reserved_1[0x10]; 6595 u8 reserved_at_10[0x10];
6596 6596
6597 u8 ppan[0x4]; 6597 u8 ppan[0x4];
6598 u8 reserved_2[0x4]; 6598 u8 reserved_at_24[0x4];
6599 u8 prio_mask_tx[0x8]; 6599 u8 prio_mask_tx[0x8];
6600 u8 reserved_3[0x8]; 6600 u8 reserved_at_30[0x8];
6601 u8 prio_mask_rx[0x8]; 6601 u8 prio_mask_rx[0x8];
6602 6602
6603 u8 pptx[0x1]; 6603 u8 pptx[0x1];
6604 u8 aptx[0x1]; 6604 u8 aptx[0x1];
6605 u8 reserved_4[0x6]; 6605 u8 reserved_at_42[0x6];
6606 u8 pfctx[0x8]; 6606 u8 pfctx[0x8];
6607 u8 reserved_5[0x10]; 6607 u8 reserved_at_50[0x10];
6608 6608
6609 u8 pprx[0x1]; 6609 u8 pprx[0x1];
6610 u8 aprx[0x1]; 6610 u8 aprx[0x1];
6611 u8 reserved_6[0x6]; 6611 u8 reserved_at_62[0x6];
6612 u8 pfcrx[0x8]; 6612 u8 pfcrx[0x8];
6613 u8 reserved_7[0x10]; 6613 u8 reserved_at_70[0x10];
6614 6614
6615 u8 reserved_8[0x80]; 6615 u8 reserved_at_80[0x80];
6616}; 6616};
6617 6617
6618struct mlx5_ifc_pelc_reg_bits { 6618struct mlx5_ifc_pelc_reg_bits {
6619 u8 op[0x4]; 6619 u8 op[0x4];
6620 u8 reserved_0[0x4]; 6620 u8 reserved_at_4[0x4];
6621 u8 local_port[0x8]; 6621 u8 local_port[0x8];
6622 u8 reserved_1[0x10]; 6622 u8 reserved_at_10[0x10];
6623 6623
6624 u8 op_admin[0x8]; 6624 u8 op_admin[0x8];
6625 u8 op_capability[0x8]; 6625 u8 op_capability[0x8];
@@ -6634,28 +6634,28 @@ struct mlx5_ifc_pelc_reg_bits {
6634 6634
6635 u8 active[0x40]; 6635 u8 active[0x40];
6636 6636
6637 u8 reserved_2[0x80]; 6637 u8 reserved_at_140[0x80];
6638}; 6638};
6639 6639
6640struct mlx5_ifc_peir_reg_bits { 6640struct mlx5_ifc_peir_reg_bits {
6641 u8 reserved_0[0x8]; 6641 u8 reserved_at_0[0x8];
6642 u8 local_port[0x8]; 6642 u8 local_port[0x8];
6643 u8 reserved_1[0x10]; 6643 u8 reserved_at_10[0x10];
6644 6644
6645 u8 reserved_2[0xc]; 6645 u8 reserved_at_20[0xc];
6646 u8 error_count[0x4]; 6646 u8 error_count[0x4];
6647 u8 reserved_3[0x10]; 6647 u8 reserved_at_30[0x10];
6648 6648
6649 u8 reserved_4[0xc]; 6649 u8 reserved_at_40[0xc];
6650 u8 lane[0x4]; 6650 u8 lane[0x4];
6651 u8 reserved_5[0x8]; 6651 u8 reserved_at_50[0x8];
6652 u8 error_type[0x8]; 6652 u8 error_type[0x8];
6653}; 6653};
6654 6654
6655struct mlx5_ifc_pcap_reg_bits { 6655struct mlx5_ifc_pcap_reg_bits {
6656 u8 reserved_0[0x8]; 6656 u8 reserved_at_0[0x8];
6657 u8 local_port[0x8]; 6657 u8 local_port[0x8];
6658 u8 reserved_1[0x10]; 6658 u8 reserved_at_10[0x10];
6659 6659
6660 u8 port_capability_mask[4][0x20]; 6660 u8 port_capability_mask[4][0x20];
6661}; 6661};
@@ -6663,46 +6663,46 @@ struct mlx5_ifc_pcap_reg_bits {
6663struct mlx5_ifc_paos_reg_bits { 6663struct mlx5_ifc_paos_reg_bits {
6664 u8 swid[0x8]; 6664 u8 swid[0x8];
6665 u8 local_port[0x8]; 6665 u8 local_port[0x8];
6666 u8 reserved_0[0x4]; 6666 u8 reserved_at_10[0x4];
6667 u8 admin_status[0x4]; 6667 u8 admin_status[0x4];
6668 u8 reserved_1[0x4]; 6668 u8 reserved_at_18[0x4];
6669 u8 oper_status[0x4]; 6669 u8 oper_status[0x4];
6670 6670
6671 u8 ase[0x1]; 6671 u8 ase[0x1];
6672 u8 ee[0x1]; 6672 u8 ee[0x1];
6673 u8 reserved_2[0x1c]; 6673 u8 reserved_at_22[0x1c];
6674 u8 e[0x2]; 6674 u8 e[0x2];
6675 6675
6676 u8 reserved_3[0x40]; 6676 u8 reserved_at_40[0x40];
6677}; 6677};
6678 6678
6679struct mlx5_ifc_pamp_reg_bits { 6679struct mlx5_ifc_pamp_reg_bits {
6680 u8 reserved_0[0x8]; 6680 u8 reserved_at_0[0x8];
6681 u8 opamp_group[0x8]; 6681 u8 opamp_group[0x8];
6682 u8 reserved_1[0xc]; 6682 u8 reserved_at_10[0xc];
6683 u8 opamp_group_type[0x4]; 6683 u8 opamp_group_type[0x4];
6684 6684
6685 u8 start_index[0x10]; 6685 u8 start_index[0x10];
6686 u8 reserved_2[0x4]; 6686 u8 reserved_at_30[0x4];
6687 u8 num_of_indices[0xc]; 6687 u8 num_of_indices[0xc];
6688 6688
6689 u8 index_data[18][0x10]; 6689 u8 index_data[18][0x10];
6690}; 6690};
6691 6691
6692struct mlx5_ifc_lane_2_module_mapping_bits { 6692struct mlx5_ifc_lane_2_module_mapping_bits {
6693 u8 reserved_0[0x6]; 6693 u8 reserved_at_0[0x6];
6694 u8 rx_lane[0x2]; 6694 u8 rx_lane[0x2];
6695 u8 reserved_1[0x6]; 6695 u8 reserved_at_8[0x6];
6696 u8 tx_lane[0x2]; 6696 u8 tx_lane[0x2];
6697 u8 reserved_2[0x8]; 6697 u8 reserved_at_10[0x8];
6698 u8 module[0x8]; 6698 u8 module[0x8];
6699}; 6699};
6700 6700
6701struct mlx5_ifc_bufferx_reg_bits { 6701struct mlx5_ifc_bufferx_reg_bits {
6702 u8 reserved_0[0x6]; 6702 u8 reserved_at_0[0x6];
6703 u8 lossy[0x1]; 6703 u8 lossy[0x1];
6704 u8 epsb[0x1]; 6704 u8 epsb[0x1];
6705 u8 reserved_1[0xc]; 6705 u8 reserved_at_8[0xc];
6706 u8 size[0xc]; 6706 u8 size[0xc];
6707 6707
6708 u8 xoff_threshold[0x10]; 6708 u8 xoff_threshold[0x10];
@@ -6714,21 +6714,21 @@ struct mlx5_ifc_set_node_in_bits {
6714}; 6714};
6715 6715
6716struct mlx5_ifc_register_power_settings_bits { 6716struct mlx5_ifc_register_power_settings_bits {
6717 u8 reserved_0[0x18]; 6717 u8 reserved_at_0[0x18];
6718 u8 power_settings_level[0x8]; 6718 u8 power_settings_level[0x8];
6719 6719
6720 u8 reserved_1[0x60]; 6720 u8 reserved_at_20[0x60];
6721}; 6721};
6722 6722
6723struct mlx5_ifc_register_host_endianness_bits { 6723struct mlx5_ifc_register_host_endianness_bits {
6724 u8 he[0x1]; 6724 u8 he[0x1];
6725 u8 reserved_0[0x1f]; 6725 u8 reserved_at_1[0x1f];
6726 6726
6727 u8 reserved_1[0x60]; 6727 u8 reserved_at_20[0x60];
6728}; 6728};
6729 6729
6730struct mlx5_ifc_umr_pointer_desc_argument_bits { 6730struct mlx5_ifc_umr_pointer_desc_argument_bits {
6731 u8 reserved_0[0x20]; 6731 u8 reserved_at_0[0x20];
6732 6732
6733 u8 mkey[0x20]; 6733 u8 mkey[0x20];
6734 6734
@@ -6741,7 +6741,7 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6741 u8 dc_key[0x40]; 6741 u8 dc_key[0x40];
6742 6742
6743 u8 ext[0x1]; 6743 u8 ext[0x1];
6744 u8 reserved_0[0x7]; 6744 u8 reserved_at_41[0x7];
6745 u8 destination_qp_dct[0x18]; 6745 u8 destination_qp_dct[0x18];
6746 6746
6747 u8 static_rate[0x4]; 6747 u8 static_rate[0x4];
@@ -6750,7 +6750,7 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6750 u8 mlid[0x7]; 6750 u8 mlid[0x7];
6751 u8 rlid_udp_sport[0x10]; 6751 u8 rlid_udp_sport[0x10];
6752 6752
6753 u8 reserved_1[0x20]; 6753 u8 reserved_at_80[0x20];
6754 6754
6755 u8 rmac_47_16[0x20]; 6755 u8 rmac_47_16[0x20];
6756 6756
@@ -6758,9 +6758,9 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6758 u8 tclass[0x8]; 6758 u8 tclass[0x8];
6759 u8 hop_limit[0x8]; 6759 u8 hop_limit[0x8];
6760 6760
6761 u8 reserved_2[0x1]; 6761 u8 reserved_at_e0[0x1];
6762 u8 grh[0x1]; 6762 u8 grh[0x1];
6763 u8 reserved_3[0x2]; 6763 u8 reserved_at_e2[0x2];
6764 u8 src_addr_index[0x8]; 6764 u8 src_addr_index[0x8];
6765 u8 flow_label[0x14]; 6765 u8 flow_label[0x14];
6766 6766
@@ -6768,27 +6768,27 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6768}; 6768};
6769 6769
6770struct mlx5_ifc_pages_req_event_bits { 6770struct mlx5_ifc_pages_req_event_bits {
6771 u8 reserved_0[0x10]; 6771 u8 reserved_at_0[0x10];
6772 u8 function_id[0x10]; 6772 u8 function_id[0x10];
6773 6773
6774 u8 num_pages[0x20]; 6774 u8 num_pages[0x20];
6775 6775
6776 u8 reserved_1[0xa0]; 6776 u8 reserved_at_40[0xa0];
6777}; 6777};
6778 6778
6779struct mlx5_ifc_eqe_bits { 6779struct mlx5_ifc_eqe_bits {
6780 u8 reserved_0[0x8]; 6780 u8 reserved_at_0[0x8];
6781 u8 event_type[0x8]; 6781 u8 event_type[0x8];
6782 u8 reserved_1[0x8]; 6782 u8 reserved_at_10[0x8];
6783 u8 event_sub_type[0x8]; 6783 u8 event_sub_type[0x8];
6784 6784
6785 u8 reserved_2[0xe0]; 6785 u8 reserved_at_20[0xe0];
6786 6786
6787 union mlx5_ifc_event_auto_bits event_data; 6787 union mlx5_ifc_event_auto_bits event_data;
6788 6788
6789 u8 reserved_3[0x10]; 6789 u8 reserved_at_1e0[0x10];
6790 u8 signature[0x8]; 6790 u8 signature[0x8];
6791 u8 reserved_4[0x7]; 6791 u8 reserved_at_1f8[0x7];
6792 u8 owner[0x1]; 6792 u8 owner[0x1];
6793}; 6793};
6794 6794
@@ -6798,14 +6798,14 @@ enum {
6798 6798
6799struct mlx5_ifc_cmd_queue_entry_bits { 6799struct mlx5_ifc_cmd_queue_entry_bits {
6800 u8 type[0x8]; 6800 u8 type[0x8];
6801 u8 reserved_0[0x18]; 6801 u8 reserved_at_8[0x18];
6802 6802
6803 u8 input_length[0x20]; 6803 u8 input_length[0x20];
6804 6804
6805 u8 input_mailbox_pointer_63_32[0x20]; 6805 u8 input_mailbox_pointer_63_32[0x20];
6806 6806
6807 u8 input_mailbox_pointer_31_9[0x17]; 6807 u8 input_mailbox_pointer_31_9[0x17];
6808 u8 reserved_1[0x9]; 6808 u8 reserved_at_77[0x9];
6809 6809
6810 u8 command_input_inline_data[16][0x8]; 6810 u8 command_input_inline_data[16][0x8];
6811 6811
@@ -6814,20 +6814,20 @@ struct mlx5_ifc_cmd_queue_entry_bits {
6814 u8 output_mailbox_pointer_63_32[0x20]; 6814 u8 output_mailbox_pointer_63_32[0x20];
6815 6815
6816 u8 output_mailbox_pointer_31_9[0x17]; 6816 u8 output_mailbox_pointer_31_9[0x17];
6817 u8 reserved_2[0x9]; 6817 u8 reserved_at_1b7[0x9];
6818 6818
6819 u8 output_length[0x20]; 6819 u8 output_length[0x20];
6820 6820
6821 u8 token[0x8]; 6821 u8 token[0x8];
6822 u8 signature[0x8]; 6822 u8 signature[0x8];
6823 u8 reserved_3[0x8]; 6823 u8 reserved_at_1f0[0x8];
6824 u8 status[0x7]; 6824 u8 status[0x7];
6825 u8 ownership[0x1]; 6825 u8 ownership[0x1];
6826}; 6826};
6827 6827
6828struct mlx5_ifc_cmd_out_bits { 6828struct mlx5_ifc_cmd_out_bits {
6829 u8 status[0x8]; 6829 u8 status[0x8];
6830 u8 reserved_0[0x18]; 6830 u8 reserved_at_8[0x18];
6831 6831
6832 u8 syndrome[0x20]; 6832 u8 syndrome[0x20];
6833 6833
@@ -6836,9 +6836,9 @@ struct mlx5_ifc_cmd_out_bits {
6836 6836
6837struct mlx5_ifc_cmd_in_bits { 6837struct mlx5_ifc_cmd_in_bits {
6838 u8 opcode[0x10]; 6838 u8 opcode[0x10];
6839 u8 reserved_0[0x10]; 6839 u8 reserved_at_10[0x10];
6840 6840
6841 u8 reserved_1[0x10]; 6841 u8 reserved_at_20[0x10];
6842 u8 op_mod[0x10]; 6842 u8 op_mod[0x10];
6843 6843
6844 u8 command[0][0x20]; 6844 u8 command[0][0x20];
@@ -6847,16 +6847,16 @@ struct mlx5_ifc_cmd_in_bits {
6847struct mlx5_ifc_cmd_if_box_bits { 6847struct mlx5_ifc_cmd_if_box_bits {
6848 u8 mailbox_data[512][0x8]; 6848 u8 mailbox_data[512][0x8];
6849 6849
6850 u8 reserved_0[0x180]; 6850 u8 reserved_at_1000[0x180];
6851 6851
6852 u8 next_pointer_63_32[0x20]; 6852 u8 next_pointer_63_32[0x20];
6853 6853
6854 u8 next_pointer_31_10[0x16]; 6854 u8 next_pointer_31_10[0x16];
6855 u8 reserved_1[0xa]; 6855 u8 reserved_at_11b6[0xa];
6856 6856
6857 u8 block_number[0x20]; 6857 u8 block_number[0x20];
6858 6858
6859 u8 reserved_2[0x8]; 6859 u8 reserved_at_11e0[0x8];
6860 u8 token[0x8]; 6860 u8 token[0x8];
6861 u8 ctrl_signature[0x8]; 6861 u8 ctrl_signature[0x8];
6862 u8 signature[0x8]; 6862 u8 signature[0x8];
@@ -6866,7 +6866,7 @@ struct mlx5_ifc_mtt_bits {
6866 u8 ptag_63_32[0x20]; 6866 u8 ptag_63_32[0x20];
6867 6867
6868 u8 ptag_31_8[0x18]; 6868 u8 ptag_31_8[0x18];
6869 u8 reserved_0[0x6]; 6869 u8 reserved_at_38[0x6];
6870 u8 wr_en[0x1]; 6870 u8 wr_en[0x1];
6871 u8 rd_en[0x1]; 6871 u8 rd_en[0x1];
6872}; 6872};
@@ -6904,38 +6904,38 @@ struct mlx5_ifc_initial_seg_bits {
6904 u8 cmd_interface_rev[0x10]; 6904 u8 cmd_interface_rev[0x10];
6905 u8 fw_rev_subminor[0x10]; 6905 u8 fw_rev_subminor[0x10];
6906 6906
6907 u8 reserved_0[0x40]; 6907 u8 reserved_at_40[0x40];
6908 6908
6909 u8 cmdq_phy_addr_63_32[0x20]; 6909 u8 cmdq_phy_addr_63_32[0x20];
6910 6910
6911 u8 cmdq_phy_addr_31_12[0x14]; 6911 u8 cmdq_phy_addr_31_12[0x14];
6912 u8 reserved_1[0x2]; 6912 u8 reserved_at_b4[0x2];
6913 u8 nic_interface[0x2]; 6913 u8 nic_interface[0x2];
6914 u8 log_cmdq_size[0x4]; 6914 u8 log_cmdq_size[0x4];
6915 u8 log_cmdq_stride[0x4]; 6915 u8 log_cmdq_stride[0x4];
6916 6916
6917 u8 command_doorbell_vector[0x20]; 6917 u8 command_doorbell_vector[0x20];
6918 6918
6919 u8 reserved_2[0xf00]; 6919 u8 reserved_at_e0[0xf00];
6920 6920
6921 u8 initializing[0x1]; 6921 u8 initializing[0x1];
6922 u8 reserved_3[0x4]; 6922 u8 reserved_at_fe1[0x4];
6923 u8 nic_interface_supported[0x3]; 6923 u8 nic_interface_supported[0x3];
6924 u8 reserved_4[0x18]; 6924 u8 reserved_at_fe8[0x18];
6925 6925
6926 struct mlx5_ifc_health_buffer_bits health_buffer; 6926 struct mlx5_ifc_health_buffer_bits health_buffer;
6927 6927
6928 u8 no_dram_nic_offset[0x20]; 6928 u8 no_dram_nic_offset[0x20];
6929 6929
6930 u8 reserved_5[0x6e40]; 6930 u8 reserved_at_1220[0x6e40];
6931 6931
6932 u8 reserved_6[0x1f]; 6932 u8 reserved_at_8060[0x1f];
6933 u8 clear_int[0x1]; 6933 u8 clear_int[0x1];
6934 6934
6935 u8 health_syndrome[0x8]; 6935 u8 health_syndrome[0x8];
6936 u8 health_counter[0x18]; 6936 u8 health_counter[0x18];
6937 6937
6938 u8 reserved_7[0x17fc0]; 6938 u8 reserved_at_80a0[0x17fc0];
6939}; 6939};
6940 6940
6941union mlx5_ifc_ports_control_registers_document_bits { 6941union mlx5_ifc_ports_control_registers_document_bits {
@@ -6980,44 +6980,44 @@ union mlx5_ifc_ports_control_registers_document_bits {
6980 struct mlx5_ifc_pvlc_reg_bits pvlc_reg; 6980 struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
6981 struct mlx5_ifc_slrg_reg_bits slrg_reg; 6981 struct mlx5_ifc_slrg_reg_bits slrg_reg;
6982 struct mlx5_ifc_sltp_reg_bits sltp_reg; 6982 struct mlx5_ifc_sltp_reg_bits sltp_reg;
6983 u8 reserved_0[0x60e0]; 6983 u8 reserved_at_0[0x60e0];
6984}; 6984};
6985 6985
6986union mlx5_ifc_debug_enhancements_document_bits { 6986union mlx5_ifc_debug_enhancements_document_bits {
6987 struct mlx5_ifc_health_buffer_bits health_buffer; 6987 struct mlx5_ifc_health_buffer_bits health_buffer;
6988 u8 reserved_0[0x200]; 6988 u8 reserved_at_0[0x200];
6989}; 6989};
6990 6990
6991union mlx5_ifc_uplink_pci_interface_document_bits { 6991union mlx5_ifc_uplink_pci_interface_document_bits {
6992 struct mlx5_ifc_initial_seg_bits initial_seg; 6992 struct mlx5_ifc_initial_seg_bits initial_seg;
6993 u8 reserved_0[0x20060]; 6993 u8 reserved_at_0[0x20060];
6994}; 6994};
6995 6995
6996struct mlx5_ifc_set_flow_table_root_out_bits { 6996struct mlx5_ifc_set_flow_table_root_out_bits {
6997 u8 status[0x8]; 6997 u8 status[0x8];
6998 u8 reserved_0[0x18]; 6998 u8 reserved_at_8[0x18];
6999 6999
7000 u8 syndrome[0x20]; 7000 u8 syndrome[0x20];
7001 7001
7002 u8 reserved_1[0x40]; 7002 u8 reserved_at_40[0x40];
7003}; 7003};
7004 7004
7005struct mlx5_ifc_set_flow_table_root_in_bits { 7005struct mlx5_ifc_set_flow_table_root_in_bits {
7006 u8 opcode[0x10]; 7006 u8 opcode[0x10];
7007 u8 reserved_0[0x10]; 7007 u8 reserved_at_10[0x10];
7008 7008
7009 u8 reserved_1[0x10]; 7009 u8 reserved_at_20[0x10];
7010 u8 op_mod[0x10]; 7010 u8 op_mod[0x10];
7011 7011
7012 u8 reserved_2[0x40]; 7012 u8 reserved_at_40[0x40];
7013 7013
7014 u8 table_type[0x8]; 7014 u8 table_type[0x8];
7015 u8 reserved_3[0x18]; 7015 u8 reserved_at_88[0x18];
7016 7016
7017 u8 reserved_4[0x8]; 7017 u8 reserved_at_a0[0x8];
7018 u8 table_id[0x18]; 7018 u8 table_id[0x18];
7019 7019
7020 u8 reserved_5[0x140]; 7020 u8 reserved_at_c0[0x140];
7021}; 7021};
7022 7022
7023enum { 7023enum {
@@ -7026,39 +7026,39 @@ enum {
7026 7026
7027struct mlx5_ifc_modify_flow_table_out_bits { 7027struct mlx5_ifc_modify_flow_table_out_bits {
7028 u8 status[0x8]; 7028 u8 status[0x8];
7029 u8 reserved_0[0x18]; 7029 u8 reserved_at_8[0x18];
7030 7030
7031 u8 syndrome[0x20]; 7031 u8 syndrome[0x20];
7032 7032
7033 u8 reserved_1[0x40]; 7033 u8 reserved_at_40[0x40];
7034}; 7034};
7035 7035
7036struct mlx5_ifc_modify_flow_table_in_bits { 7036struct mlx5_ifc_modify_flow_table_in_bits {
7037 u8 opcode[0x10]; 7037 u8 opcode[0x10];
7038 u8 reserved_0[0x10]; 7038 u8 reserved_at_10[0x10];
7039 7039
7040 u8 reserved_1[0x10]; 7040 u8 reserved_at_20[0x10];
7041 u8 op_mod[0x10]; 7041 u8 op_mod[0x10];
7042 7042
7043 u8 reserved_2[0x20]; 7043 u8 reserved_at_40[0x20];
7044 7044
7045 u8 reserved_3[0x10]; 7045 u8 reserved_at_60[0x10];
7046 u8 modify_field_select[0x10]; 7046 u8 modify_field_select[0x10];
7047 7047
7048 u8 table_type[0x8]; 7048 u8 table_type[0x8];
7049 u8 reserved_4[0x18]; 7049 u8 reserved_at_88[0x18];
7050 7050
7051 u8 reserved_5[0x8]; 7051 u8 reserved_at_a0[0x8];
7052 u8 table_id[0x18]; 7052 u8 table_id[0x18];
7053 7053
7054 u8 reserved_6[0x4]; 7054 u8 reserved_at_c0[0x4];
7055 u8 table_miss_mode[0x4]; 7055 u8 table_miss_mode[0x4];
7056 u8 reserved_7[0x18]; 7056 u8 reserved_at_c8[0x18];
7057 7057
7058 u8 reserved_8[0x8]; 7058 u8 reserved_at_e0[0x8];
7059 u8 table_miss_id[0x18]; 7059 u8 table_miss_id[0x18];
7060 7060
7061 u8 reserved_9[0x100]; 7061 u8 reserved_at_100[0x100];
7062}; 7062};
7063 7063
7064#endif /* MLX5_IFC_H */ 7064#endif /* MLX5_IFC_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 289c2314d766..5440b7b705eb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3718,7 +3718,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3718void *netdev_lower_get_next(struct net_device *dev, 3718void *netdev_lower_get_next(struct net_device *dev,
3719 struct list_head **iter); 3719 struct list_head **iter);
3720#define netdev_for_each_lower_dev(dev, ldev, iter) \ 3720#define netdev_for_each_lower_dev(dev, ldev, iter) \
3721 for (iter = &(dev)->adj_list.lower, \ 3721 for (iter = (dev)->adj_list.lower.next, \
3722 ldev = netdev_lower_get_next(dev, &(iter)); \ 3722 ldev = netdev_lower_get_next(dev, &(iter)); \
3723 ldev; \ 3723 ldev; \
3724 ldev = netdev_lower_get_next(dev, &(iter))) 3724 ldev = netdev_lower_get_next(dev, &(iter)))
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 343c13ac4f71..35cb9264e0d5 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -44,6 +44,7 @@
44 44
45#define KNAV_DMA_NUM_EPIB_WORDS 4 45#define KNAV_DMA_NUM_EPIB_WORDS 4
46#define KNAV_DMA_NUM_PS_WORDS 16 46#define KNAV_DMA_NUM_PS_WORDS 16
47#define KNAV_DMA_NUM_SW_DATA_WORDS 4
47#define KNAV_DMA_FDQ_PER_CHAN 4 48#define KNAV_DMA_FDQ_PER_CHAN 4
48 49
49/* Tx channel scheduling priority */ 50/* Tx channel scheduling priority */
@@ -142,6 +143,7 @@ struct knav_dma_cfg {
142 * @orig_buff: buff pointer since 'buff' can be overwritten 143 * @orig_buff: buff pointer since 'buff' can be overwritten
143 * @epib: Extended packet info block 144 * @epib: Extended packet info block
144 * @psdata: Protocol specific 145 * @psdata: Protocol specific
146 * @sw_data: Software private data not touched by h/w
145 */ 147 */
146struct knav_dma_desc { 148struct knav_dma_desc {
147 __le32 desc_info; 149 __le32 desc_info;
@@ -154,7 +156,7 @@ struct knav_dma_desc {
154 __le32 orig_buff; 156 __le32 orig_buff;
155 __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; 157 __le32 epib[KNAV_DMA_NUM_EPIB_WORDS];
156 __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; 158 __le32 psdata[KNAV_DMA_NUM_PS_WORDS];
157 __le32 pad[4]; 159 u32 sw_data[KNAV_DMA_NUM_SW_DATA_WORDS];
158} ____cacheline_aligned; 160} ____cacheline_aligned;
159 161
160#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) 162#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 481fe1c9044c..49dcad4fe99e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
270 struct sock *newsk, 270 struct sock *newsk,
271 const struct request_sock *req); 271 const struct request_sock *req);
272 272
273void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, 273struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
274 struct sock *child); 274 struct request_sock *req,
275 struct sock *child);
275void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 276void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
276 unsigned long timeout); 277 unsigned long timeout);
277struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, 278struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7029527725dd..4079fc18ffe4 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -61,6 +61,7 @@ struct fib_nh_exception {
61 struct rtable __rcu *fnhe_rth_input; 61 struct rtable __rcu *fnhe_rth_input;
62 struct rtable __rcu *fnhe_rth_output; 62 struct rtable __rcu *fnhe_rth_output;
63 unsigned long fnhe_stamp; 63 unsigned long fnhe_stamp;
64 struct rcu_head rcu;
64}; 65};
65 66
66struct fnhe_hash_bucket { 67struct fnhe_hash_bucket {
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index d5871ac493eb..f066781be3c8 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1625,7 +1625,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1625 1625
1626 rt = atrtr_find(&at_hint); 1626 rt = atrtr_find(&at_hint);
1627 } 1627 }
1628 err = ENETUNREACH; 1628 err = -ENETUNREACH;
1629 if (!rt) 1629 if (!rt)
1630 goto out; 1630 goto out;
1631 1631
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index e6c8382c79ba..ccf70bed0d0c 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -527,11 +527,12 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
527 * gets dereferenced. 527 * gets dereferenced.
528 */ 528 */
529 spin_lock_bh(&bat_priv->gw.list_lock); 529 spin_lock_bh(&bat_priv->gw.list_lock);
530 hlist_del_init_rcu(&gw_node->list); 530 if (!hlist_unhashed(&gw_node->list)) {
531 hlist_del_init_rcu(&gw_node->list);
532 batadv_gw_node_free_ref(gw_node);
533 }
531 spin_unlock_bh(&bat_priv->gw.list_lock); 534 spin_unlock_bh(&bat_priv->gw.list_lock);
532 535
533 batadv_gw_node_free_ref(gw_node);
534
535 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 536 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
536 if (gw_node == curr_gw) 537 if (gw_node == curr_gw)
537 batadv_gw_reselect(bat_priv); 538 batadv_gw_reselect(bat_priv);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 01acccc4d218..57f7107169f5 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -76,6 +76,28 @@ out:
76} 76}
77 77
78/** 78/**
79 * batadv_mutual_parents - check if two devices are each others parent
80 * @dev1: 1st net_device
81 * @dev2: 2nd net_device
82 *
83 * veth devices come in pairs and each is the parent of the other!
84 *
85 * Return: true if the devices are each others parent, otherwise false
86 */
87static bool batadv_mutual_parents(const struct net_device *dev1,
88 const struct net_device *dev2)
89{
90 int dev1_parent_iflink = dev_get_iflink(dev1);
91 int dev2_parent_iflink = dev_get_iflink(dev2);
92
93 if (!dev1_parent_iflink || !dev2_parent_iflink)
94 return false;
95
96 return (dev1_parent_iflink == dev2->ifindex) &&
97 (dev2_parent_iflink == dev1->ifindex);
98}
99
100/**
79 * batadv_is_on_batman_iface - check if a device is a batman iface descendant 101 * batadv_is_on_batman_iface - check if a device is a batman iface descendant
80 * @net_dev: the device to check 102 * @net_dev: the device to check
81 * 103 *
@@ -108,6 +130,9 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
108 if (WARN(!parent_dev, "Cannot find parent device")) 130 if (WARN(!parent_dev, "Cannot find parent device"))
109 return false; 131 return false;
110 132
133 if (batadv_mutual_parents(net_dev, parent_dev))
134 return false;
135
111 ret = batadv_is_on_batman_iface(parent_dev); 136 ret = batadv_is_on_batman_iface(parent_dev);
112 137
113 return ret; 138 return ret;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index cdfc85fa2743..0e80fd1461ab 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -303,9 +303,11 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
303 303
304 if (atomic_add_return(v, &vlan->tt.num_entries) == 0) { 304 if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
305 spin_lock_bh(&orig_node->vlan_list_lock); 305 spin_lock_bh(&orig_node->vlan_list_lock);
306 hlist_del_init_rcu(&vlan->list); 306 if (!hlist_unhashed(&vlan->list)) {
307 hlist_del_init_rcu(&vlan->list);
308 batadv_orig_node_vlan_free_ref(vlan);
309 }
307 spin_unlock_bh(&orig_node->vlan_list_lock); 310 spin_unlock_bh(&orig_node->vlan_list_lock);
308 batadv_orig_node_vlan_free_ref(vlan);
309 } 311 }
310 312
311 batadv_orig_node_vlan_free_ref(vlan); 313 batadv_orig_node_vlan_free_ref(vlan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 47bcef754796..883c821a9e78 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -4112,8 +4112,10 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4112 break; 4112 break;
4113 } 4113 }
4114 4114
4115 *req_complete = bt_cb(skb)->hci.req_complete; 4115 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4116 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 4116 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4117 else
4118 *req_complete = bt_cb(skb)->hci.req_complete;
4117 kfree_skb(skb); 4119 kfree_skb(skb);
4118 } 4120 }
4119 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 4121 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 30e105f57f0d..74c278e00225 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -425,8 +425,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
425 mp = br_mdb_ip_get(mdb, group); 425 mp = br_mdb_ip_get(mdb, group);
426 if (!mp) { 426 if (!mp) {
427 mp = br_multicast_new_group(br, port, group); 427 mp = br_multicast_new_group(br, port, group);
428 err = PTR_ERR(mp); 428 err = PTR_ERR_OR_ZERO(mp);
429 if (IS_ERR(mp)) 429 if (err)
430 return err; 430 return err;
431 } 431 }
432 432
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 61d7617d9249..b82440e1fcb4 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -159,7 +159,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
159 tmppkt = NULL; 159 tmppkt = NULL;
160 160
161 /* Verify that length is correct */ 161 /* Verify that length is correct */
162 err = EPROTO; 162 err = -EPROTO;
163 if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1) 163 if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
164 goto out; 164 goto out;
165 } 165 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 8cba3d852f25..0ef061b2badc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5379,12 +5379,12 @@ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5379{ 5379{
5380 struct netdev_adjacent *lower; 5380 struct netdev_adjacent *lower;
5381 5381
5382 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 5382 lower = list_entry(*iter, struct netdev_adjacent, list);
5383 5383
5384 if (&lower->list == &dev->adj_list.lower) 5384 if (&lower->list == &dev->adj_list.lower)
5385 return NULL; 5385 return NULL;
5386 5386
5387 *iter = &lower->list; 5387 *iter = lower->list.next;
5388 5388
5389 return lower->dev; 5389 return lower->dev;
5390} 5390}
@@ -7422,8 +7422,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7422 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 7422 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
7423 setup(dev); 7423 setup(dev);
7424 7424
7425 if (!dev->tx_queue_len) 7425 if (!dev->tx_queue_len) {
7426 dev->priv_flags |= IFF_NO_QUEUE; 7426 dev->priv_flags |= IFF_NO_QUEUE;
7427 dev->tx_queue_len = 1;
7428 }
7427 7429
7428 dev->num_tx_queues = txqs; 7430 dev->num_tx_queues = txqs;
7429 dev->real_num_tx_queues = txqs; 7431 dev->real_num_tx_queues = txqs;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index eab81bc80e5c..12e700332010 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -399,6 +399,13 @@ ip_proto_again:
399 goto out_bad; 399 goto out_bad;
400 proto = eth->h_proto; 400 proto = eth->h_proto;
401 nhoff += sizeof(*eth); 401 nhoff += sizeof(*eth);
402
403 /* Cap headers that we access via pointers at the
404 * end of the Ethernet header as our maximum alignment
405 * at that point is only 2 bytes.
406 */
407 if (NET_IP_ALIGN)
408 hlen = nhoff;
402 } 409 }
403 410
404 key_control->flags |= FLOW_DIS_ENCAPSULATION; 411 key_control->flags |= FLOW_DIS_ENCAPSULATION;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 5684e14932bd..902d606324a0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -824,26 +824,26 @@ lookup:
824 824
825 if (sk->sk_state == DCCP_NEW_SYN_RECV) { 825 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
826 struct request_sock *req = inet_reqsk(sk); 826 struct request_sock *req = inet_reqsk(sk);
827 struct sock *nsk = NULL; 827 struct sock *nsk;
828 828
829 sk = req->rsk_listener; 829 sk = req->rsk_listener;
830 if (likely(sk->sk_state == DCCP_LISTEN)) { 830 if (unlikely(sk->sk_state != DCCP_LISTEN)) {
831 nsk = dccp_check_req(sk, skb, req);
832 } else {
833 inet_csk_reqsk_queue_drop_and_put(sk, req); 831 inet_csk_reqsk_queue_drop_and_put(sk, req);
834 goto lookup; 832 goto lookup;
835 } 833 }
834 sock_hold(sk);
835 nsk = dccp_check_req(sk, skb, req);
836 if (!nsk) { 836 if (!nsk) {
837 reqsk_put(req); 837 reqsk_put(req);
838 goto discard_it; 838 goto discard_and_relse;
839 } 839 }
840 if (nsk == sk) { 840 if (nsk == sk) {
841 sock_hold(sk);
842 reqsk_put(req); 841 reqsk_put(req);
843 } else if (dccp_child_process(sk, nsk, skb)) { 842 } else if (dccp_child_process(sk, nsk, skb)) {
844 dccp_v4_ctl_send_reset(sk, skb); 843 dccp_v4_ctl_send_reset(sk, skb);
845 goto discard_it; 844 goto discard_and_relse;
846 } else { 845 } else {
846 sock_put(sk);
847 return 0; 847 return 0;
848 } 848 }
849 } 849 }
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9c6d0508e63a..b8608b71a66d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -691,26 +691,26 @@ lookup:
691 691
692 if (sk->sk_state == DCCP_NEW_SYN_RECV) { 692 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
693 struct request_sock *req = inet_reqsk(sk); 693 struct request_sock *req = inet_reqsk(sk);
694 struct sock *nsk = NULL; 694 struct sock *nsk;
695 695
696 sk = req->rsk_listener; 696 sk = req->rsk_listener;
697 if (likely(sk->sk_state == DCCP_LISTEN)) { 697 if (unlikely(sk->sk_state != DCCP_LISTEN)) {
698 nsk = dccp_check_req(sk, skb, req);
699 } else {
700 inet_csk_reqsk_queue_drop_and_put(sk, req); 698 inet_csk_reqsk_queue_drop_and_put(sk, req);
701 goto lookup; 699 goto lookup;
702 } 700 }
701 sock_hold(sk);
702 nsk = dccp_check_req(sk, skb, req);
703 if (!nsk) { 703 if (!nsk) {
704 reqsk_put(req); 704 reqsk_put(req);
705 goto discard_it; 705 goto discard_and_relse;
706 } 706 }
707 if (nsk == sk) { 707 if (nsk == sk) {
708 sock_hold(sk);
709 reqsk_put(req); 708 reqsk_put(req);
710 } else if (dccp_child_process(sk, nsk, skb)) { 709 } else if (dccp_child_process(sk, nsk, skb)) {
711 dccp_v6_ctl_send_reset(sk, skb); 710 dccp_v6_ctl_send_reset(sk, skb);
712 goto discard_it; 711 goto discard_and_relse;
713 } else { 712 } else {
713 sock_put(sk);
714 return 0; 714 return 0;
715 } 715 }
716 } 716 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 40b9ca72aae3..ab24521beb4d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1194,7 +1194,6 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1194 if (ret) { 1194 if (ret) {
1195 netdev_err(master, "error %d registering interface %s\n", 1195 netdev_err(master, "error %d registering interface %s\n",
1196 ret, slave_dev->name); 1196 ret, slave_dev->name);
1197 phy_disconnect(p->phy);
1198 ds->ports[port] = NULL; 1197 ds->ports[port] = NULL;
1199 free_netdev(slave_dev); 1198 free_netdev(slave_dev);
1200 return ret; 1199 return ret;
@@ -1205,6 +1204,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1205 ret = dsa_slave_phy_setup(p, slave_dev); 1204 ret = dsa_slave_phy_setup(p, slave_dev);
1206 if (ret) { 1205 if (ret) {
1207 netdev_err(master, "error %d setting up slave phy\n", ret); 1206 netdev_err(master, "error %d setting up slave phy\n", ret);
1207 unregister_netdev(slave_dev);
1208 free_netdev(slave_dev); 1208 free_netdev(slave_dev);
1209 return ret; 1209 return ret;
1210 } 1210 }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cebd9d31e65a..f6303b17546b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1847 if (err < 0) 1847 if (err < 0)
1848 goto errout; 1848 goto errout;
1849 1849
1850 err = EINVAL; 1850 err = -EINVAL;
1851 if (!tb[NETCONFA_IFINDEX]) 1851 if (!tb[NETCONFA_IFINDEX])
1852 goto errout; 1852 goto errout;
1853 1853
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 46b9c887bede..64148914803a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
789 reqsk_put(req); 789 reqsk_put(req);
790} 790}
791 791
792void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, 792struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
793 struct sock *child) 793 struct request_sock *req,
794 struct sock *child)
794{ 795{
795 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 796 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
796 797
797 spin_lock(&queue->rskq_lock); 798 spin_lock(&queue->rskq_lock);
798 if (unlikely(sk->sk_state != TCP_LISTEN)) { 799 if (unlikely(sk->sk_state != TCP_LISTEN)) {
799 inet_child_forget(sk, req, child); 800 inet_child_forget(sk, req, child);
801 child = NULL;
800 } else { 802 } else {
801 req->sk = child; 803 req->sk = child;
802 req->dl_next = NULL; 804 req->dl_next = NULL;
@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
808 sk_acceptq_added(sk); 810 sk_acceptq_added(sk);
809 } 811 }
810 spin_unlock(&queue->rskq_lock); 812 spin_unlock(&queue->rskq_lock);
813 return child;
811} 814}
812EXPORT_SYMBOL(inet_csk_reqsk_queue_add); 815EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
813 816
@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
817 if (own_req) { 820 if (own_req) {
818 inet_csk_reqsk_queue_drop(sk, req); 821 inet_csk_reqsk_queue_drop(sk, req);
819 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); 822 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
820 inet_csk_reqsk_queue_add(sk, req, child); 823 if (inet_csk_reqsk_queue_add(sk, req, child))
821 /* Warning: caller must not call reqsk_put(req); 824 return child;
822 * child stole last reference on it.
823 */
824 return child;
825 } 825 }
826 /* Too bad, another child took ownership of the request, undo. */ 826 /* Too bad, another child took ownership of the request, undo. */
827 bh_unlock_sock(child); 827 bh_unlock_sock(child);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 56fdf4e0dce4..41ba68de46d8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1054,8 +1054,9 @@ static const struct net_device_ops gre_tap_netdev_ops = {
1054static void ipgre_tap_setup(struct net_device *dev) 1054static void ipgre_tap_setup(struct net_device *dev)
1055{ 1055{
1056 ether_setup(dev); 1056 ether_setup(dev);
1057 dev->netdev_ops = &gre_tap_netdev_ops; 1057 dev->netdev_ops = &gre_tap_netdev_ops;
1058 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1058 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1059 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1059 ip_tunnel_setup(dev, gre_tap_net_id); 1060 ip_tunnel_setup(dev, gre_tap_net_id);
1060} 1061}
1061 1062
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5f73a7c03e27..a50124260f5a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
249 switch (cmsg->cmsg_type) { 249 switch (cmsg->cmsg_type) {
250 case IP_RETOPTS: 250 case IP_RETOPTS:
251 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 251 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
252
253 /* Our caller is responsible for freeing ipc->opt */
252 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), 254 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
253 err < 40 ? err : 40); 255 err < 40 ? err : 40);
254 if (err) 256 if (err)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c117b21b937d..d3a27165f9cc 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
746 746
747 if (msg->msg_controllen) { 747 if (msg->msg_controllen) {
748 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); 748 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
749 if (err) 749 if (unlikely(err)) {
750 kfree(ipc.opt);
750 return err; 751 return err;
752 }
751 if (ipc.opt) 753 if (ipc.opt)
752 free = 1; 754 free = 1;
753 } 755 }
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bc35f1842512..7113bae4e6a0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
547 547
548 if (msg->msg_controllen) { 548 if (msg->msg_controllen) {
549 err = ip_cmsg_send(net, msg, &ipc, false); 549 err = ip_cmsg_send(net, msg, &ipc, false);
550 if (err) 550 if (unlikely(err)) {
551 kfree(ipc.opt);
551 goto out; 552 goto out;
553 }
552 if (ipc.opt) 554 if (ipc.opt)
553 free = 1; 555 free = 1;
554 } 556 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 85f184e429c6..02c62299d717 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 129static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130static int ip_rt_min_advmss __read_mostly = 256; 130static int ip_rt_min_advmss __read_mostly = 256;
131 131
132static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
132/* 133/*
133 * Interface to generic destination cache. 134 * Interface to generic destination cache.
134 */ 135 */
@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
755 struct fib_nh *nh = &FIB_RES_NH(res); 756 struct fib_nh *nh = &FIB_RES_NH(res);
756 757
757 update_or_create_fnhe(nh, fl4->daddr, new_gw, 758 update_or_create_fnhe(nh, fl4->daddr, new_gw,
758 0, 0); 759 0, jiffies + ip_rt_gc_timeout);
759 } 760 }
760 if (kill_route) 761 if (kill_route)
761 rt->dst.obsolete = DST_OBSOLETE_KILL; 762 rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
1556#endif 1557#endif
1557} 1558}
1558 1559
1560static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1561{
1562 struct fnhe_hash_bucket *hash;
1563 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1564 u32 hval = fnhe_hashfun(daddr);
1565
1566 spin_lock_bh(&fnhe_lock);
1567
1568 hash = rcu_dereference_protected(nh->nh_exceptions,
1569 lockdep_is_held(&fnhe_lock));
1570 hash += hval;
1571
1572 fnhe_p = &hash->chain;
1573 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1574 while (fnhe) {
1575 if (fnhe->fnhe_daddr == daddr) {
1576 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1577 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1578 fnhe_flush_routes(fnhe);
1579 kfree_rcu(fnhe, rcu);
1580 break;
1581 }
1582 fnhe_p = &fnhe->fnhe_next;
1583 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1584 lockdep_is_held(&fnhe_lock));
1585 }
1586
1587 spin_unlock_bh(&fnhe_lock);
1588}
1589
1559/* called in rcu_read_lock() section */ 1590/* called in rcu_read_lock() section */
1560static int __mkroute_input(struct sk_buff *skb, 1591static int __mkroute_input(struct sk_buff *skb,
1561 const struct fib_result *res, 1592 const struct fib_result *res,
@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
1609 1640
1610 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1641 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1611 if (do_cache) { 1642 if (do_cache) {
1612 if (fnhe) 1643 if (fnhe) {
1613 rth = rcu_dereference(fnhe->fnhe_rth_input); 1644 rth = rcu_dereference(fnhe->fnhe_rth_input);
1614 else 1645 if (rth && rth->dst.expires &&
1615 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1646 time_after(jiffies, rth->dst.expires)) {
1647 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1648 fnhe = NULL;
1649 } else {
1650 goto rt_cache;
1651 }
1652 }
1653
1654 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1616 1655
1656rt_cache:
1617 if (rt_cache_valid(rth)) { 1657 if (rt_cache_valid(rth)) {
1618 skb_dst_set_noref(skb, &rth->dst); 1658 skb_dst_set_noref(skb, &rth->dst);
1619 goto out; 1659 goto out;
@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2014 struct fib_nh *nh = &FIB_RES_NH(*res); 2054 struct fib_nh *nh = &FIB_RES_NH(*res);
2015 2055
2016 fnhe = find_exception(nh, fl4->daddr); 2056 fnhe = find_exception(nh, fl4->daddr);
2017 if (fnhe) 2057 if (fnhe) {
2018 prth = &fnhe->fnhe_rth_output; 2058 prth = &fnhe->fnhe_rth_output;
2019 else { 2059 rth = rcu_dereference(*prth);
2020 if (unlikely(fl4->flowi4_flags & 2060 if (rth && rth->dst.expires &&
2021 FLOWI_FLAG_KNOWN_NH && 2061 time_after(jiffies, rth->dst.expires)) {
2022 !(nh->nh_gw && 2062 ip_del_fnhe(nh, fl4->daddr);
2023 nh->nh_scope == RT_SCOPE_LINK))) { 2063 fnhe = NULL;
2024 do_cache = false; 2064 } else {
2025 goto add; 2065 goto rt_cache;
2026 } 2066 }
2027 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2028 } 2067 }
2068
2069 if (unlikely(fl4->flowi4_flags &
2070 FLOWI_FLAG_KNOWN_NH &&
2071 !(nh->nh_gw &&
2072 nh->nh_scope == RT_SCOPE_LINK))) {
2073 do_cache = false;
2074 goto add;
2075 }
2076 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2029 rth = rcu_dereference(*prth); 2077 rth = rcu_dereference(*prth);
2078
2079rt_cache:
2030 if (rt_cache_valid(rth)) { 2080 if (rt_cache_valid(rth)) {
2031 dst_hold(&rth->dst); 2081 dst_hold(&rth->dst);
2032 return rth; 2082 return rth;
@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
2569} 2619}
2570 2620
2571#ifdef CONFIG_SYSCTL 2621#ifdef CONFIG_SYSCTL
2572static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
2573static int ip_rt_gc_interval __read_mostly = 60 * HZ; 2622static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2574static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 2623static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2575static int ip_rt_gc_elasticity __read_mostly = 8; 2624static int ip_rt_gc_elasticity __read_mostly = 8;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0c36ef4a3f86..483ffdf5aa4d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2950,7 +2950,7 @@ static void __tcp_alloc_md5sig_pool(void)
2950 struct crypto_hash *hash; 2950 struct crypto_hash *hash;
2951 2951
2952 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 2952 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2953 if (IS_ERR_OR_NULL(hash)) 2953 if (IS_ERR(hash))
2954 return; 2954 return;
2955 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; 2955 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
2956 } 2956 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1c2a73406261..3b2c8e90a475 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2896,7 +2896,10 @@ static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
2896{ 2896{
2897 const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; 2897 const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
2898 struct rtt_meas *m = tcp_sk(sk)->rtt_min; 2898 struct rtt_meas *m = tcp_sk(sk)->rtt_min;
2899 struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now }; 2899 struct rtt_meas rttm = {
2900 .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1),
2901 .ts = now,
2902 };
2900 u32 elapsed; 2903 u32 elapsed;
2901 2904
2902 /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ 2905 /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7f6ff037adaf..487ac67059e2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1597,28 +1597,30 @@ process:
1597 1597
1598 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1598 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1599 struct request_sock *req = inet_reqsk(sk); 1599 struct request_sock *req = inet_reqsk(sk);
1600 struct sock *nsk = NULL; 1600 struct sock *nsk;
1601 1601
1602 sk = req->rsk_listener; 1602 sk = req->rsk_listener;
1603 if (tcp_v4_inbound_md5_hash(sk, skb)) 1603 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1604 goto discard_and_relse; 1604 reqsk_put(req);
1605 if (likely(sk->sk_state == TCP_LISTEN)) { 1605 goto discard_it;
1606 nsk = tcp_check_req(sk, skb, req, false); 1606 }
1607 } else { 1607 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1608 inet_csk_reqsk_queue_drop_and_put(sk, req); 1608 inet_csk_reqsk_queue_drop_and_put(sk, req);
1609 goto lookup; 1609 goto lookup;
1610 } 1610 }
1611 sock_hold(sk);
1612 nsk = tcp_check_req(sk, skb, req, false);
1611 if (!nsk) { 1613 if (!nsk) {
1612 reqsk_put(req); 1614 reqsk_put(req);
1613 goto discard_it; 1615 goto discard_and_relse;
1614 } 1616 }
1615 if (nsk == sk) { 1617 if (nsk == sk) {
1616 sock_hold(sk);
1617 reqsk_put(req); 1618 reqsk_put(req);
1618 } else if (tcp_child_process(sk, nsk, skb)) { 1619 } else if (tcp_child_process(sk, nsk, skb)) {
1619 tcp_v4_send_reset(nsk, skb); 1620 tcp_v4_send_reset(nsk, skb);
1620 goto discard_it; 1621 goto discard_and_relse;
1621 } else { 1622 } else {
1623 sock_put(sk);
1622 return 0; 1624 return 0;
1623 } 1625 }
1624 } 1626 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index be0b21852b13..95d2f198017e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1048,8 +1048,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1048 if (msg->msg_controllen) { 1048 if (msg->msg_controllen) {
1049 err = ip_cmsg_send(sock_net(sk), msg, &ipc, 1049 err = ip_cmsg_send(sock_net(sk), msg, &ipc,
1050 sk->sk_family == AF_INET6); 1050 sk->sk_family == AF_INET6);
1051 if (err) 1051 if (unlikely(err)) {
1052 kfree(ipc.opt);
1052 return err; 1053 return err;
1054 }
1053 if (ipc.opt) 1055 if (ipc.opt)
1054 free = 1; 1056 free = 1;
1055 connected = 0; 1057 connected = 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9efd9ffdc34c..bdd7eac4307a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
583 if (err < 0) 583 if (err < 0)
584 goto errout; 584 goto errout;
585 585
586 err = EINVAL; 586 err = -EINVAL;
587 if (!tb[NETCONFA_IFINDEX]) 587 if (!tb[NETCONFA_IFINDEX])
588 goto errout; 588 goto errout;
589 589
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f37f18b6b40c..a69aad1e29d1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1512,6 +1512,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
1512 dev->destructor = ip6gre_dev_free; 1512 dev->destructor = ip6gre_dev_free;
1513 1513
1514 dev->features |= NETIF_F_NETNS_LOCAL; 1514 dev->features |= NETIF_F_NETNS_LOCAL;
1515 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1515} 1516}
1516 1517
1517static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 1518static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 31ba7ca19757..051b6a6bfff6 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -21,6 +21,10 @@
21#include <net/ipv6.h> 21#include <net/ipv6.h>
22#include <net/netfilter/ipv6/nf_nat_masquerade.h> 22#include <net/netfilter/ipv6/nf_nat_masquerade.h>
23 23
24#define MAX_WORK_COUNT 16
25
26static atomic_t v6_worker_count;
27
24unsigned int 28unsigned int
25nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 29nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
26 const struct net_device *out) 30 const struct net_device *out)
@@ -78,14 +82,78 @@ static struct notifier_block masq_dev_notifier = {
78 .notifier_call = masq_device_event, 82 .notifier_call = masq_device_event,
79}; 83};
80 84
85struct masq_dev_work {
86 struct work_struct work;
87 struct net *net;
88 int ifindex;
89};
90
91static void iterate_cleanup_work(struct work_struct *work)
92{
93 struct masq_dev_work *w;
94 long index;
95
96 w = container_of(work, struct masq_dev_work, work);
97
98 index = w->ifindex;
99 nf_ct_iterate_cleanup(w->net, device_cmp, (void *)index, 0, 0);
100
101 put_net(w->net);
102 kfree(w);
103 atomic_dec(&v6_worker_count);
104 module_put(THIS_MODULE);
105}
106
107/* ipv6 inet notifier is an atomic notifier, i.e. we cannot
108 * schedule.
109 *
110 * Unfortunately, nf_ct_iterate_cleanup can run for a long
111 * time if there are lots of conntracks and the system
112 * handles high softirq load, so it frequently calls cond_resched
113 * while iterating the conntrack table.
114 *
115 * So we defer nf_ct_iterate_cleanup walk to the system workqueue.
116 *
117 * As we can have 'a lot' of inet_events (depending on amount
118 * of ipv6 addresses being deleted), we also need to add an upper
119 * limit to the number of queued work items.
120 */
81static int masq_inet_event(struct notifier_block *this, 121static int masq_inet_event(struct notifier_block *this,
82 unsigned long event, void *ptr) 122 unsigned long event, void *ptr)
83{ 123{
84 struct inet6_ifaddr *ifa = ptr; 124 struct inet6_ifaddr *ifa = ptr;
85 struct netdev_notifier_info info; 125 const struct net_device *dev;
126 struct masq_dev_work *w;
127 struct net *net;
128
129 if (event != NETDEV_DOWN ||
130 atomic_read(&v6_worker_count) >= MAX_WORK_COUNT)
131 return NOTIFY_DONE;
132
133 dev = ifa->idev->dev;
134 net = maybe_get_net(dev_net(dev));
135 if (!net)
136 return NOTIFY_DONE;
86 137
87 netdev_notifier_info_init(&info, ifa->idev->dev); 138 if (!try_module_get(THIS_MODULE))
88 return masq_device_event(this, event, &info); 139 goto err_module;
140
141 w = kmalloc(sizeof(*w), GFP_ATOMIC);
142 if (w) {
143 atomic_inc(&v6_worker_count);
144
145 INIT_WORK(&w->work, iterate_cleanup_work);
146 w->ifindex = dev->ifindex;
147 w->net = net;
148 schedule_work(&w->work);
149
150 return NOTIFY_DONE;
151 }
152
153 module_put(THIS_MODULE);
154 err_module:
155 put_net(net);
156 return NOTIFY_DONE;
89} 157}
90 158
91static struct notifier_block masq_inet_notifier = { 159static struct notifier_block masq_inet_notifier = {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1a5a70fb8551..5c8c84273028 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1387,7 +1387,7 @@ process:
1387 1387
1388 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1388 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1389 struct request_sock *req = inet_reqsk(sk); 1389 struct request_sock *req = inet_reqsk(sk);
1390 struct sock *nsk = NULL; 1390 struct sock *nsk;
1391 1391
1392 sk = req->rsk_listener; 1392 sk = req->rsk_listener;
1393 tcp_v6_fill_cb(skb, hdr, th); 1393 tcp_v6_fill_cb(skb, hdr, th);
@@ -1395,24 +1395,24 @@ process:
1395 reqsk_put(req); 1395 reqsk_put(req);
1396 goto discard_it; 1396 goto discard_it;
1397 } 1397 }
1398 if (likely(sk->sk_state == TCP_LISTEN)) { 1398 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1399 nsk = tcp_check_req(sk, skb, req, false);
1400 } else {
1401 inet_csk_reqsk_queue_drop_and_put(sk, req); 1399 inet_csk_reqsk_queue_drop_and_put(sk, req);
1402 goto lookup; 1400 goto lookup;
1403 } 1401 }
1402 sock_hold(sk);
1403 nsk = tcp_check_req(sk, skb, req, false);
1404 if (!nsk) { 1404 if (!nsk) {
1405 reqsk_put(req); 1405 reqsk_put(req);
1406 goto discard_it; 1406 goto discard_and_relse;
1407 } 1407 }
1408 if (nsk == sk) { 1408 if (nsk == sk) {
1409 sock_hold(sk);
1410 reqsk_put(req); 1409 reqsk_put(req);
1411 tcp_v6_restore_cb(skb); 1410 tcp_v6_restore_cb(skb);
1412 } else if (tcp_child_process(sk, nsk, skb)) { 1411 } else if (tcp_child_process(sk, nsk, skb)) {
1413 tcp_v6_send_reset(nsk, skb); 1412 tcp_v6_send_reset(nsk, skb);
1414 goto discard_it; 1413 goto discard_and_relse;
1415 } else { 1414 } else {
1415 sock_put(sk);
1416 return 0; 1416 return 0;
1417 } 1417 }
1418 } 1418 }
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index f93c5be612a7..2caaa84ce92d 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
124 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, 124 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
125 NLM_F_ACK, tunnel, cmd); 125 NLM_F_ACK, tunnel, cmd);
126 126
127 if (ret >= 0) 127 if (ret >= 0) {
128 return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); 128 ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
129 /* We don't care if no one is listening */
130 if (ret == -ESRCH)
131 ret = 0;
132 return ret;
133 }
129 134
130 nlmsg_free(msg); 135 nlmsg_free(msg);
131 136
@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
147 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 152 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
148 NLM_F_ACK, session, cmd); 153 NLM_F_ACK, session, cmd);
149 154
150 if (ret >= 0) 155 if (ret >= 0) {
151 return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); 156 ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
157 /* We don't care if no one is listening */
158 if (ret == -ESRCH)
159 ret = 0;
160 return ret;
161 }
152 162
153 nlmsg_free(msg); 163 nlmsg_free(msg);
154 164
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8c067e6663a1..95e757c377f9 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -891,7 +891,7 @@ config NETFILTER_XT_TARGET_TEE
891 depends on IPV6 || IPV6=n 891 depends on IPV6 || IPV6=n
892 depends on !NF_CONNTRACK || NF_CONNTRACK 892 depends on !NF_CONNTRACK || NF_CONNTRACK
893 select NF_DUP_IPV4 893 select NF_DUP_IPV4
894 select NF_DUP_IPV6 if IP6_NF_IPTABLES != n 894 select NF_DUP_IPV6 if IPV6
895 ---help--- 895 ---help---
896 This option adds a "TEE" target with which a packet can be cloned and 896 This option adds a "TEE" target with which a packet can be cloned and
897 this clone be rerouted to another nexthop. 897 this clone be rerouted to another nexthop.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 58882de06bd7..f60b4fdeeb8c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1412,6 +1412,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1412 } 1412 }
1413 spin_unlock(lockp); 1413 spin_unlock(lockp);
1414 local_bh_enable(); 1414 local_bh_enable();
1415 cond_resched();
1415 } 1416 }
1416 1417
1417 for_each_possible_cpu(cpu) { 1418 for_each_possible_cpu(cpu) {
@@ -1424,6 +1425,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1424 set_bit(IPS_DYING_BIT, &ct->status); 1425 set_bit(IPS_DYING_BIT, &ct->status);
1425 } 1426 }
1426 spin_unlock_bh(&pcpu->lock); 1427 spin_unlock_bh(&pcpu->lock);
1428 cond_resched();
1427 } 1429 }
1428 return NULL; 1430 return NULL;
1429found: 1431found:
@@ -1440,6 +1442,8 @@ void nf_ct_iterate_cleanup(struct net *net,
1440 struct nf_conn *ct; 1442 struct nf_conn *ct;
1441 unsigned int bucket = 0; 1443 unsigned int bucket = 0;
1442 1444
1445 might_sleep();
1446
1443 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { 1447 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1444 /* Time to push up daises... */ 1448 /* Time to push up daises... */
1445 if (del_timer(&ct->timeout)) 1449 if (del_timer(&ct->timeout))
@@ -1448,6 +1452,7 @@ void nf_ct_iterate_cleanup(struct net *net,
1448 /* ... else the timer will get him soon. */ 1452 /* ... else the timer will get him soon. */
1449 1453
1450 nf_ct_put(ct); 1454 nf_ct_put(ct);
1455 cond_resched();
1451 } 1456 }
1452} 1457}
1453EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); 1458EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index a7ba23353dab..857ae89633af 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -311,14 +311,14 @@ replay:
311#endif 311#endif
312 { 312 {
313 nfnl_unlock(subsys_id); 313 nfnl_unlock(subsys_id);
314 netlink_ack(skb, nlh, -EOPNOTSUPP); 314 netlink_ack(oskb, nlh, -EOPNOTSUPP);
315 return kfree_skb(skb); 315 return kfree_skb(skb);
316 } 316 }
317 } 317 }
318 318
319 if (!ss->commit || !ss->abort) { 319 if (!ss->commit || !ss->abort) {
320 nfnl_unlock(subsys_id); 320 nfnl_unlock(subsys_id);
321 netlink_ack(skb, nlh, -EOPNOTSUPP); 321 netlink_ack(oskb, nlh, -EOPNOTSUPP);
322 return kfree_skb(skb); 322 return kfree_skb(skb);
323 } 323 }
324 324
@@ -328,10 +328,12 @@ replay:
328 nlh = nlmsg_hdr(skb); 328 nlh = nlmsg_hdr(skb);
329 err = 0; 329 err = 0;
330 330
331 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || 331 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
332 skb->len < nlh->nlmsg_len) { 332 skb->len < nlh->nlmsg_len ||
333 err = -EINVAL; 333 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
334 goto ack; 334 nfnl_err_reset(&err_list);
335 status |= NFNL_BATCH_FAILURE;
336 goto done;
335 } 337 }
336 338
337 /* Only requests are handled by the kernel */ 339 /* Only requests are handled by the kernel */
@@ -406,7 +408,7 @@ ack:
406 * pointing to the batch header. 408 * pointing to the batch header.
407 */ 409 */
408 nfnl_err_reset(&err_list); 410 nfnl_err_reset(&err_list);
409 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); 411 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM);
410 status |= NFNL_BATCH_FAILURE; 412 status |= NFNL_BATCH_FAILURE;
411 goto done; 413 goto done;
412 } 414 }
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 94837d236ab0..2671b9deb103 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -312,7 +312,7 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
312 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 312 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
313 untimeout(h, timeout); 313 untimeout(h, timeout);
314 } 314 }
315 nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); 315 spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
316 } 316 }
317 local_bh_enable(); 317 local_bh_enable();
318} 318}
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index c7808fc19719..c9743f78f219 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -100,7 +100,7 @@ static int nft_counter_init(const struct nft_ctx *ctx,
100 100
101 cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu); 101 cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
102 if (cpu_stats == NULL) 102 if (cpu_stats == NULL)
103 return ENOMEM; 103 return -ENOMEM;
104 104
105 preempt_disable(); 105 preempt_disable();
106 this_cpu = this_cpu_ptr(cpu_stats); 106 this_cpu = this_cpu_ptr(cpu_stats);
@@ -138,7 +138,7 @@ static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu, 138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
139 GFP_ATOMIC); 139 GFP_ATOMIC);
140 if (cpu_stats == NULL) 140 if (cpu_stats == NULL)
141 return ENOMEM; 141 return -ENOMEM;
142 142
143 preempt_disable(); 143 preempt_disable();
144 this_cpu = this_cpu_ptr(cpu_stats); 144 this_cpu = this_cpu_ptr(cpu_stats);
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 3eff7b67cdf2..6e57a3966dc5 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -38,7 +38,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
38 return XT_CONTINUE; 38 return XT_CONTINUE;
39} 39}
40 40
41#if IS_ENABLED(CONFIG_NF_DUP_IPV6) 41#if IS_ENABLED(CONFIG_IPV6)
42static unsigned int 42static unsigned int
43tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) 43tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
44{ 44{
@@ -131,7 +131,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
131 .destroy = tee_tg_destroy, 131 .destroy = tee_tg_destroy,
132 .me = THIS_MODULE, 132 .me = THIS_MODULE,
133 }, 133 },
134#if IS_ENABLED(CONFIG_NF_DUP_IPV6) 134#if IS_ENABLED(CONFIG_IPV6)
135 { 135 {
136 .name = "TEE", 136 .name = "TEE",
137 .revision = 1, 137 .revision = 1,
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index de9cb19efb6a..5eb7694348b5 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
90 int err; 90 int err;
91 struct vxlan_config conf = { 91 struct vxlan_config conf = {
92 .no_share = true, 92 .no_share = true,
93 .flags = VXLAN_F_COLLECT_METADATA, 93 .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
94 /* Don't restrict the packets that can be sent by MTU */ 94 /* Don't restrict the packets that can be sent by MTU */
95 .mtu = IP_MAX_MTU, 95 .mtu = IP_MAX_MTU,
96 }; 96 };
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b5c2cf2aa6d4..af1acf009866 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1852,6 +1852,7 @@ reset:
1852 } 1852 }
1853 1853
1854 tp = old_tp; 1854 tp = old_tp;
1855 protocol = tc_skb_protocol(skb);
1855 goto reclassify; 1856 goto reclassify;
1856#endif 1857#endif
1857} 1858}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index ab0d538a74ed..1099e99a53c4 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -60,6 +60,8 @@
60#include <net/inet_common.h> 60#include <net/inet_common.h>
61#include <net/inet_ecn.h> 61#include <net/inet_ecn.h>
62 62
63#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
64
63/* Global data structures. */ 65/* Global data structures. */
64struct sctp_globals sctp_globals __read_mostly; 66struct sctp_globals sctp_globals __read_mostly;
65 67
@@ -1355,6 +1357,8 @@ static __init int sctp_init(void)
1355 unsigned long limit; 1357 unsigned long limit;
1356 int max_share; 1358 int max_share;
1357 int order; 1359 int order;
1360 int num_entries;
1361 int max_entry_order;
1358 1362
1359 sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); 1363 sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
1360 1364
@@ -1407,14 +1411,24 @@ static __init int sctp_init(void)
1407 1411
1408 /* Size and allocate the association hash table. 1412 /* Size and allocate the association hash table.
1409 * The methodology is similar to that of the tcp hash tables. 1413 * The methodology is similar to that of the tcp hash tables.
1414 * Though not identical. Start by getting a goal size
1410 */ 1415 */
1411 if (totalram_pages >= (128 * 1024)) 1416 if (totalram_pages >= (128 * 1024))
1412 goal = totalram_pages >> (22 - PAGE_SHIFT); 1417 goal = totalram_pages >> (22 - PAGE_SHIFT);
1413 else 1418 else
1414 goal = totalram_pages >> (24 - PAGE_SHIFT); 1419 goal = totalram_pages >> (24 - PAGE_SHIFT);
1415 1420
1416 for (order = 0; (1UL << order) < goal; order++) 1421 /* Then compute the page order for said goal */
1417 ; 1422 order = get_order(goal);
1423
1424 /* Now compute the required page order for the maximum sized table we
1425 * want to create
1426 */
1427 max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
1428 sizeof(struct sctp_bind_hashbucket));
1429
1430 /* Limit the page order by that maximum hash table size */
1431 order = min(order, max_entry_order);
1418 1432
1419 /* Allocate and initialize the endpoint hash table. */ 1433 /* Allocate and initialize the endpoint hash table. */
1420 sctp_ep_hashsize = 64; 1434 sctp_ep_hashsize = 64;
@@ -1430,20 +1444,35 @@ static __init int sctp_init(void)
1430 INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); 1444 INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
1431 } 1445 }
1432 1446
1433 /* Allocate and initialize the SCTP port hash table. */ 1447 /* Allocate and initialize the SCTP port hash table.
1448 * Note that order is initalized to start at the max sized
1449 * table we want to support. If we can't get that many pages
1450 * reduce the order and try again
1451 */
1434 do { 1452 do {
1435 sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
1436 sizeof(struct sctp_bind_hashbucket);
1437 if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
1438 continue;
1439 sctp_port_hashtable = (struct sctp_bind_hashbucket *) 1453 sctp_port_hashtable = (struct sctp_bind_hashbucket *)
1440 __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order); 1454 __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
1441 } while (!sctp_port_hashtable && --order > 0); 1455 } while (!sctp_port_hashtable && --order > 0);
1456
1442 if (!sctp_port_hashtable) { 1457 if (!sctp_port_hashtable) {
1443 pr_err("Failed bind hash alloc\n"); 1458 pr_err("Failed bind hash alloc\n");
1444 status = -ENOMEM; 1459 status = -ENOMEM;
1445 goto err_bhash_alloc; 1460 goto err_bhash_alloc;
1446 } 1461 }
1462
1463 /* Now compute the number of entries that will fit in the
1464 * port hash space we allocated
1465 */
1466 num_entries = (1UL << order) * PAGE_SIZE /
1467 sizeof(struct sctp_bind_hashbucket);
1468
1469 /* And finish by rounding it down to the nearest power of two
1470 * this wastes some memory of course, but its needed because
1471 * the hash function operates based on the assumption that
1472 * that the number of entries is a power of two
1473 */
1474 sctp_port_hashsize = rounddown_pow_of_two(num_entries);
1475
1447 for (i = 0; i < sctp_port_hashsize; i++) { 1476 for (i = 0; i < sctp_port_hashsize; i++) {
1448 spin_lock_init(&sctp_port_hashtable[i].lock); 1477 spin_lock_init(&sctp_port_hashtable[i].lock);
1449 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); 1478 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
@@ -1452,7 +1481,8 @@ static __init int sctp_init(void)
1452 if (sctp_transport_hashtable_init()) 1481 if (sctp_transport_hashtable_init())
1453 goto err_thash_alloc; 1482 goto err_thash_alloc;
1454 1483
1455 pr_info("Hash tables configured (bind %d)\n", sctp_port_hashsize); 1484 pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
1485 num_entries);
1456 1486
1457 sctp_sysctl_register(); 1487 sctp_sysctl_register();
1458 1488
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 0c2944fb9ae0..347cdc99ed09 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1973,8 +1973,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1973 1973
1974 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1974 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1975 NLM_F_MULTI, TIPC_NL_LINK_GET); 1975 NLM_F_MULTI, TIPC_NL_LINK_GET);
1976 if (!hdr) 1976 if (!hdr) {
1977 tipc_bcast_unlock(net);
1977 return -EMSGSIZE; 1978 return -EMSGSIZE;
1979 }
1978 1980
1979 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1981 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1980 if (!attrs) 1982 if (!attrs)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index fa97d9649a28..9d7a16fc5ca4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -346,12 +346,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
346 skb_queue_head_init(&n->bc_entry.inputq2); 346 skb_queue_head_init(&n->bc_entry.inputq2);
347 for (i = 0; i < MAX_BEARERS; i++) 347 for (i = 0; i < MAX_BEARERS; i++)
348 spin_lock_init(&n->links[i].lock); 348 spin_lock_init(&n->links[i].lock);
349 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
350 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
351 if (n->addr < temp_node->addr)
352 break;
353 }
354 list_add_tail_rcu(&n->list, &temp_node->list);
355 n->state = SELF_DOWN_PEER_LEAVING; 349 n->state = SELF_DOWN_PEER_LEAVING;
356 n->signature = INVALID_NODE_SIG; 350 n->signature = INVALID_NODE_SIG;
357 n->active_links[0] = INVALID_BEARER_ID; 351 n->active_links[0] = INVALID_BEARER_ID;
@@ -372,6 +366,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
372 tipc_node_get(n); 366 tipc_node_get(n);
373 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n); 367 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
374 n->keepalive_intv = U32_MAX; 368 n->keepalive_intv = U32_MAX;
369 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
370 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
371 if (n->addr < temp_node->addr)
372 break;
373 }
374 list_add_tail_rcu(&n->list, &temp_node->list);
375exit: 375exit:
376 spin_unlock_bh(&tn->node_list_lock); 376 spin_unlock_bh(&tn->node_list_lock);
377 return n; 377 return n;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 29be035f9c65..f75f847e688d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1781,7 +1781,12 @@ restart_locked:
1781 goto out_unlock; 1781 goto out_unlock;
1782 } 1782 }
1783 1783
1784 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { 1784 /* other == sk && unix_peer(other) != sk if
1785 * - unix_peer(sk) == NULL, destination address bound to sk
1786 * - unix_peer(sk) == sk by time of get but disconnected before lock
1787 */
1788 if (other != sk &&
1789 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1785 if (timeo) { 1790 if (timeo) {
1786 timeo = unix_wait_for_peer(other, timeo); 1791 timeo = unix_wait_for_peer(other, timeo);
1787 1792
@@ -2277,13 +2282,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2277 size_t size = state->size; 2282 size_t size = state->size;
2278 unsigned int last_len; 2283 unsigned int last_len;
2279 2284
2280 err = -EINVAL; 2285 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2281 if (sk->sk_state != TCP_ESTABLISHED) 2286 err = -EINVAL;
2282 goto out; 2287 goto out;
2288 }
2283 2289
2284 err = -EOPNOTSUPP; 2290 if (unlikely(flags & MSG_OOB)) {
2285 if (flags & MSG_OOB) 2291 err = -EOPNOTSUPP;
2286 goto out; 2292 goto out;
2293 }
2287 2294
2288 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); 2295 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2289 timeo = sock_rcvtimeo(sk, noblock); 2296 timeo = sock_rcvtimeo(sk, noblock);
@@ -2305,6 +2312,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2305 bool drop_skb; 2312 bool drop_skb;
2306 struct sk_buff *skb, *last; 2313 struct sk_buff *skb, *last;
2307 2314
2315redo:
2308 unix_state_lock(sk); 2316 unix_state_lock(sk);
2309 if (sock_flag(sk, SOCK_DEAD)) { 2317 if (sock_flag(sk, SOCK_DEAD)) {
2310 err = -ECONNRESET; 2318 err = -ECONNRESET;
@@ -2329,9 +2337,11 @@ again:
2329 goto unlock; 2337 goto unlock;
2330 2338
2331 unix_state_unlock(sk); 2339 unix_state_unlock(sk);
2332 err = -EAGAIN; 2340 if (!timeo) {
2333 if (!timeo) 2341 err = -EAGAIN;
2334 break; 2342 break;
2343 }
2344
2335 mutex_unlock(&u->readlock); 2345 mutex_unlock(&u->readlock);
2336 2346
2337 timeo = unix_stream_data_wait(sk, timeo, last, 2347 timeo = unix_stream_data_wait(sk, timeo, last,
@@ -2344,7 +2354,7 @@ again:
2344 } 2354 }
2345 2355
2346 mutex_lock(&u->readlock); 2356 mutex_lock(&u->readlock);
2347 continue; 2357 goto redo;
2348unlock: 2358unlock:
2349 unix_state_unlock(sk); 2359 unix_state_unlock(sk);
2350 break; 2360 break;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index c512f64d5287..4d9679701a6d 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -220,7 +220,7 @@ done:
220 return skb->len; 220 return skb->len;
221} 221}
222 222
223static struct sock *unix_lookup_by_ino(int ino) 223static struct sock *unix_lookup_by_ino(unsigned int ino)
224{ 224{
225 int i; 225 int i;
226 struct sock *sk; 226 struct sock *sk;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7fd1220fbfa0..bbe65dcb9738 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1557,8 +1557,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1557 if (err < 0) 1557 if (err < 0)
1558 goto out; 1558 goto out;
1559 1559
1560 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1561
1562 while (total_written < len) { 1560 while (total_written < len) {
1563 ssize_t written; 1561 ssize_t written;
1564 1562
@@ -1578,7 +1576,9 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1578 goto out_wait; 1576 goto out_wait;
1579 1577
1580 release_sock(sk); 1578 release_sock(sk);
1579 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1581 timeout = schedule_timeout(timeout); 1580 timeout = schedule_timeout(timeout);
1581 finish_wait(sk_sleep(sk), &wait);
1582 lock_sock(sk); 1582 lock_sock(sk);
1583 if (signal_pending(current)) { 1583 if (signal_pending(current)) {
1584 err = sock_intr_errno(timeout); 1584 err = sock_intr_errno(timeout);
@@ -1588,8 +1588,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1588 goto out_wait; 1588 goto out_wait;
1589 } 1589 }
1590 1590
1591 prepare_to_wait(sk_sleep(sk), &wait,
1592 TASK_INTERRUPTIBLE);
1593 } 1591 }
1594 1592
1595 /* These checks occur both as part of and after the loop 1593 /* These checks occur both as part of and after the loop
@@ -1635,7 +1633,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1635out_wait: 1633out_wait:
1636 if (total_written > 0) 1634 if (total_written > 0)
1637 err = total_written; 1635 err = total_written;
1638 finish_wait(sk_sleep(sk), &wait);
1639out: 1636out:
1640 release_sock(sk); 1637 release_sock(sk);
1641 return err; 1638 return err;
@@ -1716,7 +1713,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1716 if (err < 0) 1713 if (err < 0)
1717 goto out; 1714 goto out;
1718 1715
1719 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1720 1716
1721 while (1) { 1717 while (1) {
1722 s64 ready = vsock_stream_has_data(vsk); 1718 s64 ready = vsock_stream_has_data(vsk);
@@ -1727,7 +1723,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1727 */ 1723 */
1728 1724
1729 err = -ENOMEM; 1725 err = -ENOMEM;
1730 goto out_wait; 1726 goto out;
1731 } else if (ready > 0) { 1727 } else if (ready > 0) {
1732 ssize_t read; 1728 ssize_t read;
1733 1729
@@ -1750,7 +1746,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1750 vsk, target, read, 1746 vsk, target, read,
1751 !(flags & MSG_PEEK), &recv_data); 1747 !(flags & MSG_PEEK), &recv_data);
1752 if (err < 0) 1748 if (err < 0)
1753 goto out_wait; 1749 goto out;
1754 1750
1755 if (read >= target || flags & MSG_PEEK) 1751 if (read >= target || flags & MSG_PEEK)
1756 break; 1752 break;
@@ -1773,7 +1769,9 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1773 break; 1769 break;
1774 1770
1775 release_sock(sk); 1771 release_sock(sk);
1772 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1776 timeout = schedule_timeout(timeout); 1773 timeout = schedule_timeout(timeout);
1774 finish_wait(sk_sleep(sk), &wait);
1777 lock_sock(sk); 1775 lock_sock(sk);
1778 1776
1779 if (signal_pending(current)) { 1777 if (signal_pending(current)) {
@@ -1783,9 +1781,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1783 err = -EAGAIN; 1781 err = -EAGAIN;
1784 break; 1782 break;
1785 } 1783 }
1786
1787 prepare_to_wait(sk_sleep(sk), &wait,
1788 TASK_INTERRUPTIBLE);
1789 } 1784 }
1790 } 1785 }
1791 1786
@@ -1816,8 +1811,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1816 err = copied; 1811 err = copied;
1817 } 1812 }
1818 1813
1819out_wait:
1820 finish_wait(sk_sleep(sk), &wait);
1821out: 1814out:
1822 release_sock(sk); 1815 release_sock(sk);
1823 return err; 1816 return err;