diff options
138 files changed, 6012 insertions, 3279 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index a62c889aafca..e124847443f8 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt | |||
@@ -10,7 +10,7 @@ Required properties: | |||
10 | - dsa,ethernet : Should be a phandle to a valid Ethernet device node | 10 | - dsa,ethernet : Should be a phandle to a valid Ethernet device node |
11 | - dsa,mii-bus : Should be a phandle to a valid MDIO bus device node | 11 | - dsa,mii-bus : Should be a phandle to a valid MDIO bus device node |
12 | 12 | ||
13 | Optionnal properties: | 13 | Optional properties: |
14 | - interrupts : property with a value describing the switch | 14 | - interrupts : property with a value describing the switch |
15 | interrupt number (not supported by the driver) | 15 | interrupt number (not supported by the driver) |
16 | 16 | ||
@@ -23,6 +23,13 @@ Each of these switch child nodes should have the following required properties: | |||
23 | - #address-cells : Must be 1 | 23 | - #address-cells : Must be 1 |
24 | - #size-cells : Must be 0 | 24 | - #size-cells : Must be 0 |
25 | 25 | ||
26 | A switch child node has the following optional property: | ||
27 | |||
28 | - eeprom-length : Set to the length of an EEPROM connected to the | ||
29 | switch. Must be set if the switch can not detect | ||
30 | the presence and/or size of a connected EEPROM, | ||
31 | otherwise optional. | ||
32 | |||
26 | A switch may have multiple "port" children nodes | 33 | A switch may have multiple "port" children nodes |
27 | 34 | ||
28 | Each port children node must have the following mandatory properties: | 35 | Each port children node must have the following mandatory properties: |
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index eeb5b2e97bed..83bf4986baea 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt | |||
@@ -2230,11 +2230,8 @@ balance-rr: This mode is the only mode that will permit a single | |||
2230 | 2230 | ||
2231 | It is possible to adjust TCP/IP's congestion limits by | 2231 | It is possible to adjust TCP/IP's congestion limits by |
2232 | altering the net.ipv4.tcp_reordering sysctl parameter. The | 2232 | altering the net.ipv4.tcp_reordering sysctl parameter. The |
2233 | usual default value is 3, and the maximum useful value is 127. | 2233 | usual default value is 3. But keep in mind TCP stack is able |
2234 | For a four interface balance-rr bond, expect that a single | 2234 | to automatically increase this when it detects reorders. |
2235 | TCP/IP stream will utilize no more than approximately 2.3 | ||
2236 | interface's worth of throughput, even after adjusting | ||
2237 | tcp_reordering. | ||
2238 | 2235 | ||
2239 | Note that the fraction of packets that will be delivered out of | 2236 | Note that the fraction of packets that will be delivered out of |
2240 | order is highly variable, and is unlikely to be zero. The level | 2237 | order is highly variable, and is unlikely to be zero. The level |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 0307e2875f21..368e3251c553 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -376,9 +376,17 @@ tcp_orphan_retries - INTEGER | |||
376 | may consume significant resources. Cf. tcp_max_orphans. | 376 | may consume significant resources. Cf. tcp_max_orphans. |
377 | 377 | ||
378 | tcp_reordering - INTEGER | 378 | tcp_reordering - INTEGER |
379 | Maximal reordering of packets in a TCP stream. | 379 | Initial reordering level of packets in a TCP stream. |
380 | TCP stack can then dynamically adjust flow reordering level | ||
381 | between this initial value and tcp_max_reordering | ||
380 | Default: 3 | 382 | Default: 3 |
381 | 383 | ||
384 | tcp_max_reordering - INTEGER | ||
385 | Maximal reordering level of packets in a TCP stream. | ||
386 | 300 is a fairly conservative value, but you might increase it | ||
387 | if paths are using per packet load balancing (like bonding rr mode) | ||
388 | Default: 300 | ||
389 | |||
382 | tcp_retrans_collapse - BOOLEAN | 390 | tcp_retrans_collapse - BOOLEAN |
383 | Bug-to-bug compatibility with some broken printers. | 391 | Bug-to-bug compatibility with some broken printers. |
384 | On retransmit try to send bigger packets to work around bugs in | 392 | On retransmit try to send bigger packets to work around bugs in |
@@ -1452,6 +1460,19 @@ suppress_frag_ndisc - INTEGER | |||
1452 | 1 - (default) discard fragmented neighbor discovery packets | 1460 | 1 - (default) discard fragmented neighbor discovery packets |
1453 | 0 - allow fragmented neighbor discovery packets | 1461 | 0 - allow fragmented neighbor discovery packets |
1454 | 1462 | ||
1463 | optimistic_dad - BOOLEAN | ||
1464 | Whether to perform Optimistic Duplicate Address Detection (RFC 4429). | ||
1465 | 0: disabled (default) | ||
1466 | 1: enabled | ||
1467 | |||
1468 | use_optimistic - BOOLEAN | ||
1469 | If enabled, do not classify optimistic addresses as deprecated during | ||
1470 | source address selection. Preferred addresses will still be chosen | ||
1471 | before optimistic addresses, subject to other ranking in the source | ||
1472 | address selection algorithm. | ||
1473 | 0: disabled (default) | ||
1474 | 1: enabled | ||
1475 | |||
1455 | icmp/*: | 1476 | icmp/*: |
1456 | ratelimit - INTEGER | 1477 | ratelimit - INTEGER |
1457 | Limit the maximal rates for sending ICMPv6 packets. | 1478 | Limit the maximal rates for sending ICMPv6 packets. |
diff --git a/MAINTAINERS b/MAINTAINERS index 1cfabdd1d23f..3a41fb0db2bd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5860,6 +5860,11 @@ M: Russell King <rmk+kernel@arm.linux.org.uk> | |||
5860 | S: Maintained | 5860 | S: Maintained |
5861 | F: drivers/gpu/drm/armada/ | 5861 | F: drivers/gpu/drm/armada/ |
5862 | 5862 | ||
5863 | MARVELL 88E6352 DSA support | ||
5864 | M: Guenter Roeck <linux@roeck-us.net> | ||
5865 | S: Maintained | ||
5866 | F: drivers/net/dsa/mv88e6352.c | ||
5867 | |||
5863 | MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) | 5868 | MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) |
5864 | M: Mirko Lindner <mlindner@marvell.com> | 5869 | M: Mirko Lindner <mlindner@marvell.com> |
5865 | M: Stephen Hemminger <stephen@networkplumber.org> | 5870 | M: Stephen Hemminger <stephen@networkplumber.org> |
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index d81b2475e67e..22762a1f9f72 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c | |||
@@ -158,6 +158,7 @@ struct pxa168_eth_platform_data gplugd_eth_platform_data = { | |||
158 | .port_number = 0, | 158 | .port_number = 0, |
159 | .phy_addr = 0, | 159 | .phy_addr = 0, |
160 | .speed = 0, /* Autonagotiation */ | 160 | .speed = 0, /* Autonagotiation */ |
161 | .intf = PHY_INTERFACE_MODE_RMII, | ||
161 | .init = gplugd_eth_init, | 162 | .init = gplugd_eth_init, |
162 | }; | 163 | }; |
163 | 164 | ||
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index d2eadab787c5..baa58e79256a 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1326,7 +1326,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, | |||
1326 | } | 1326 | } |
1327 | 1327 | ||
1328 | /* no suitable interface, frame not sent */ | 1328 | /* no suitable interface, frame not sent */ |
1329 | dev_kfree_skb_any(skb); | 1329 | bond_tx_drop(bond->dev, skb); |
1330 | out: | 1330 | out: |
1331 | return NETDEV_TX_OK; | 1331 | return NETDEV_TX_OK; |
1332 | } | 1332 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c9ac06cfe6b7..c7520082fb0d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3522,7 +3522,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl | |||
3522 | } | 3522 | } |
3523 | } | 3523 | } |
3524 | /* no slave that can tx has been found */ | 3524 | /* no slave that can tx has been found */ |
3525 | dev_kfree_skb_any(skb); | 3525 | bond_tx_drop(bond->dev, skb); |
3526 | } | 3526 | } |
3527 | 3527 | ||
3528 | /** | 3528 | /** |
@@ -3584,7 +3584,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
3584 | slave_id = bond_rr_gen_slave_id(bond); | 3584 | slave_id = bond_rr_gen_slave_id(bond); |
3585 | bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); | 3585 | bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); |
3586 | } else { | 3586 | } else { |
3587 | dev_kfree_skb_any(skb); | 3587 | bond_tx_drop(bond_dev, skb); |
3588 | } | 3588 | } |
3589 | } | 3589 | } |
3590 | 3590 | ||
@@ -3603,7 +3603,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
3603 | if (slave) | 3603 | if (slave) |
3604 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3604 | bond_dev_queue_xmit(bond, skb, slave->dev); |
3605 | else | 3605 | else |
3606 | dev_kfree_skb_any(skb); | 3606 | bond_tx_drop(bond_dev, skb); |
3607 | 3607 | ||
3608 | return NETDEV_TX_OK; | 3608 | return NETDEV_TX_OK; |
3609 | } | 3609 | } |
@@ -3747,8 +3747,7 @@ int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3747 | slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; | 3747 | slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; |
3748 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3748 | bond_dev_queue_xmit(bond, skb, slave->dev); |
3749 | } else { | 3749 | } else { |
3750 | dev_kfree_skb_any(skb); | 3750 | bond_tx_drop(dev, skb); |
3751 | atomic_long_inc(&dev->tx_dropped); | ||
3752 | } | 3751 | } |
3753 | 3752 | ||
3754 | return NETDEV_TX_OK; | 3753 | return NETDEV_TX_OK; |
@@ -3778,7 +3777,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) | |||
3778 | if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) | 3777 | if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) |
3779 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3778 | bond_dev_queue_xmit(bond, skb, slave->dev); |
3780 | else | 3779 | else |
3781 | dev_kfree_skb_any(skb); | 3780 | bond_tx_drop(bond_dev, skb); |
3782 | 3781 | ||
3783 | return NETDEV_TX_OK; | 3782 | return NETDEV_TX_OK; |
3784 | } | 3783 | } |
@@ -3858,7 +3857,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev | |||
3858 | /* Should never happen, mode already checked */ | 3857 | /* Should never happen, mode already checked */ |
3859 | netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); | 3858 | netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); |
3860 | WARN_ON_ONCE(1); | 3859 | WARN_ON_ONCE(1); |
3861 | dev_kfree_skb_any(skb); | 3860 | bond_tx_drop(dev, skb); |
3862 | return NETDEV_TX_OK; | 3861 | return NETDEV_TX_OK; |
3863 | } | 3862 | } |
3864 | } | 3863 | } |
@@ -3878,7 +3877,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3878 | if (bond_has_slaves(bond)) | 3877 | if (bond_has_slaves(bond)) |
3879 | ret = __bond_start_xmit(skb, dev); | 3878 | ret = __bond_start_xmit(skb, dev); |
3880 | else | 3879 | else |
3881 | dev_kfree_skb_any(skb); | 3880 | bond_tx_drop(dev, skb); |
3882 | rcu_read_unlock(); | 3881 | rcu_read_unlock(); |
3883 | 3882 | ||
3884 | return ret; | 3883 | return ret; |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 10920f0686e2..bfb0b51c081a 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -645,4 +645,10 @@ extern struct bond_parm_tbl ad_select_tbl[]; | |||
645 | /* exported from bond_netlink.c */ | 645 | /* exported from bond_netlink.c */ |
646 | extern struct rtnl_link_ops bond_link_ops; | 646 | extern struct rtnl_link_ops bond_link_ops; |
647 | 647 | ||
648 | static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb) | ||
649 | { | ||
650 | atomic_long_inc(&dev->tx_dropped); | ||
651 | dev_kfree_skb_any(skb); | ||
652 | } | ||
653 | |||
648 | #endif /* _LINUX_BONDING_H */ | 654 | #endif /* _LINUX_BONDING_H */ |
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 9234d808cbb3..2d1a55e980da 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig | |||
@@ -45,6 +45,15 @@ config NET_DSA_MV88E6171 | |||
45 | This enables support for the Marvell 88E6171 ethernet switch | 45 | This enables support for the Marvell 88E6171 ethernet switch |
46 | chip. | 46 | chip. |
47 | 47 | ||
48 | config NET_DSA_MV88E6352 | ||
49 | tristate "Marvell 88E6176/88E6352 ethernet switch chip support" | ||
50 | select NET_DSA | ||
51 | select NET_DSA_MV88E6XXX | ||
52 | select NET_DSA_TAG_EDSA | ||
53 | ---help--- | ||
54 | This enables support for the Marvell 88E6176 and 88E6352 ethernet | ||
55 | switch chips. | ||
56 | |||
48 | config NET_DSA_BCM_SF2 | 57 | config NET_DSA_BCM_SF2 |
49 | tristate "Broadcom Starfighter 2 Ethernet switch support" | 58 | tristate "Broadcom Starfighter 2 Ethernet switch support" |
50 | depends on HAS_IOMEM | 59 | depends on HAS_IOMEM |
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 23a90de9830e..e2d51c4b9382 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile | |||
@@ -7,6 +7,9 @@ endif | |||
7 | ifdef CONFIG_NET_DSA_MV88E6131 | 7 | ifdef CONFIG_NET_DSA_MV88E6131 |
8 | mv88e6xxx_drv-y += mv88e6131.o | 8 | mv88e6xxx_drv-y += mv88e6131.o |
9 | endif | 9 | endif |
10 | ifdef CONFIG_NET_DSA_MV88E6352 | ||
11 | mv88e6xxx_drv-y += mv88e6352.o | ||
12 | endif | ||
10 | ifdef CONFIG_NET_DSA_MV88E6171 | 13 | ifdef CONFIG_NET_DSA_MV88E6171 |
11 | mv88e6xxx_drv-y += mv88e6171.o | 14 | mv88e6xxx_drv-y += mv88e6171.o |
12 | endif | 15 | endif |
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 05b0ca3bf71d..c29aebe1e62b 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c | |||
@@ -69,8 +69,11 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr) | |||
69 | 69 | ||
70 | ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); | 70 | ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); |
71 | if (ret >= 0) { | 71 | if (ret >= 0) { |
72 | ret &= 0xfff0; | ||
73 | if (ret == 0x0600) | 72 | if (ret == 0x0600) |
73 | return "Marvell 88E6060 (A0)"; | ||
74 | if (ret == 0x0601 || ret == 0x0602) | ||
75 | return "Marvell 88E6060 (B0)"; | ||
76 | if ((ret & 0xfff0) == 0x0600) | ||
74 | return "Marvell 88E6060"; | 77 | return "Marvell 88E6060"; |
75 | } | 78 | } |
76 | 79 | ||
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c index a332c53ff955..9a3f9e0b0532 100644 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ b/drivers/net/dsa/mv88e6123_61_65.c | |||
@@ -291,6 +291,54 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) | |||
291 | return 0; | 291 | return 0; |
292 | } | 292 | } |
293 | 293 | ||
294 | #ifdef CONFIG_NET_DSA_HWMON | ||
295 | |||
296 | static int mv88e6123_61_65_get_temp(struct dsa_switch *ds, int *temp) | ||
297 | { | ||
298 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
299 | int ret; | ||
300 | int val; | ||
301 | |||
302 | *temp = 0; | ||
303 | |||
304 | mutex_lock(&ps->phy_mutex); | ||
305 | |||
306 | ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); | ||
307 | if (ret < 0) | ||
308 | goto error; | ||
309 | |||
310 | /* Enable temperature sensor */ | ||
311 | ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a); | ||
312 | if (ret < 0) | ||
313 | goto error; | ||
314 | |||
315 | ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); | ||
316 | if (ret < 0) | ||
317 | goto error; | ||
318 | |||
319 | /* Wait for temperature to stabilize */ | ||
320 | usleep_range(10000, 12000); | ||
321 | |||
322 | val = mv88e6xxx_phy_read(ds, 0x0, 0x1a); | ||
323 | if (val < 0) { | ||
324 | ret = val; | ||
325 | goto error; | ||
326 | } | ||
327 | |||
328 | /* Disable temperature sensor */ | ||
329 | ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); | ||
330 | if (ret < 0) | ||
331 | goto error; | ||
332 | |||
333 | *temp = ((val & 0x1f) - 5) * 5; | ||
334 | |||
335 | error: | ||
336 | mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); | ||
337 | mutex_unlock(&ps->phy_mutex); | ||
338 | return ret; | ||
339 | } | ||
340 | #endif /* CONFIG_NET_DSA_HWMON */ | ||
341 | |||
294 | static int mv88e6123_61_65_setup(struct dsa_switch *ds) | 342 | static int mv88e6123_61_65_setup(struct dsa_switch *ds) |
295 | { | 343 | { |
296 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | 344 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); |
@@ -299,6 +347,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds) | |||
299 | 347 | ||
300 | mutex_init(&ps->smi_mutex); | 348 | mutex_init(&ps->smi_mutex); |
301 | mutex_init(&ps->stats_mutex); | 349 | mutex_init(&ps->stats_mutex); |
350 | mutex_init(&ps->phy_mutex); | ||
302 | 351 | ||
303 | ret = mv88e6123_61_65_switch_reset(ds); | 352 | ret = mv88e6123_61_65_switch_reset(ds); |
304 | if (ret < 0) | 353 | if (ret < 0) |
@@ -329,16 +378,28 @@ static int mv88e6123_61_65_port_to_phy_addr(int port) | |||
329 | static int | 378 | static int |
330 | mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum) | 379 | mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum) |
331 | { | 380 | { |
381 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
332 | int addr = mv88e6123_61_65_port_to_phy_addr(port); | 382 | int addr = mv88e6123_61_65_port_to_phy_addr(port); |
333 | return mv88e6xxx_phy_read(ds, addr, regnum); | 383 | int ret; |
384 | |||
385 | mutex_lock(&ps->phy_mutex); | ||
386 | ret = mv88e6xxx_phy_read(ds, addr, regnum); | ||
387 | mutex_unlock(&ps->phy_mutex); | ||
388 | return ret; | ||
334 | } | 389 | } |
335 | 390 | ||
336 | static int | 391 | static int |
337 | mv88e6123_61_65_phy_write(struct dsa_switch *ds, | 392 | mv88e6123_61_65_phy_write(struct dsa_switch *ds, |
338 | int port, int regnum, u16 val) | 393 | int port, int regnum, u16 val) |
339 | { | 394 | { |
395 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
340 | int addr = mv88e6123_61_65_port_to_phy_addr(port); | 396 | int addr = mv88e6123_61_65_port_to_phy_addr(port); |
341 | return mv88e6xxx_phy_write(ds, addr, regnum, val); | 397 | int ret; |
398 | |||
399 | mutex_lock(&ps->phy_mutex); | ||
400 | ret = mv88e6xxx_phy_write(ds, addr, regnum, val); | ||
401 | mutex_unlock(&ps->phy_mutex); | ||
402 | return ret; | ||
342 | } | 403 | } |
343 | 404 | ||
344 | static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { | 405 | static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { |
@@ -372,6 +433,9 @@ static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { | |||
372 | { "hist_256_511bytes", 4, 0x0b, }, | 433 | { "hist_256_511bytes", 4, 0x0b, }, |
373 | { "hist_512_1023bytes", 4, 0x0c, }, | 434 | { "hist_512_1023bytes", 4, 0x0c, }, |
374 | { "hist_1024_max_bytes", 4, 0x0d, }, | 435 | { "hist_1024_max_bytes", 4, 0x0d, }, |
436 | { "sw_in_discards", 4, 0x110, }, | ||
437 | { "sw_in_filtered", 2, 0x112, }, | ||
438 | { "sw_out_filtered", 2, 0x113, }, | ||
375 | }; | 439 | }; |
376 | 440 | ||
377 | static void | 441 | static void |
@@ -406,6 +470,11 @@ struct dsa_switch_driver mv88e6123_61_65_switch_driver = { | |||
406 | .get_strings = mv88e6123_61_65_get_strings, | 470 | .get_strings = mv88e6123_61_65_get_strings, |
407 | .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats, | 471 | .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats, |
408 | .get_sset_count = mv88e6123_61_65_get_sset_count, | 472 | .get_sset_count = mv88e6123_61_65_get_sset_count, |
473 | #ifdef CONFIG_NET_DSA_HWMON | ||
474 | .get_temp = mv88e6123_61_65_get_temp, | ||
475 | #endif | ||
476 | .get_regs_len = mv88e6xxx_get_regs_len, | ||
477 | .get_regs = mv88e6xxx_get_regs, | ||
409 | }; | 478 | }; |
410 | 479 | ||
411 | MODULE_ALIAS("platform:mv88e6123"); | 480 | MODULE_ALIAS("platform:mv88e6123"); |
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 244c735014fa..1230f52aa70e 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #define ID_6085 0x04a0 | 21 | #define ID_6085 0x04a0 |
22 | #define ID_6095 0x0950 | 22 | #define ID_6095 0x0950 |
23 | #define ID_6131 0x1060 | 23 | #define ID_6131 0x1060 |
24 | #define ID_6131_B2 0x1066 | ||
24 | 25 | ||
25 | static char *mv88e6131_probe(struct device *host_dev, int sw_addr) | 26 | static char *mv88e6131_probe(struct device *host_dev, int sw_addr) |
26 | { | 27 | { |
@@ -32,12 +33,15 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr) | |||
32 | 33 | ||
33 | ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); | 34 | ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); |
34 | if (ret >= 0) { | 35 | if (ret >= 0) { |
35 | ret &= 0xfff0; | 36 | int ret_masked = ret & 0xfff0; |
36 | if (ret == ID_6085) | 37 | |
38 | if (ret_masked == ID_6085) | ||
37 | return "Marvell 88E6085"; | 39 | return "Marvell 88E6085"; |
38 | if (ret == ID_6095) | 40 | if (ret_masked == ID_6095) |
39 | return "Marvell 88E6095/88E6095F"; | 41 | return "Marvell 88E6095/88E6095F"; |
40 | if (ret == ID_6131) | 42 | if (ret == ID_6131_B2) |
43 | return "Marvell 88E6131 (B2)"; | ||
44 | if (ret_masked == ID_6131) | ||
41 | return "Marvell 88E6131"; | 45 | return "Marvell 88E6131"; |
42 | } | 46 | } |
43 | 47 | ||
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c new file mode 100644 index 000000000000..258d9ef5ef25 --- /dev/null +++ b/drivers/net/dsa/mv88e6352.c | |||
@@ -0,0 +1,788 @@ | |||
1 | /* | ||
2 | * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support | ||
3 | * | ||
4 | * Copyright (c) 2014 Guenter Roeck | ||
5 | * | ||
6 | * Derived from mv88e6123_61_65.c | ||
7 | * Copyright (c) 2008-2009 Marvell Semiconductor | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/delay.h> | ||
16 | #include <linux/jiffies.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/netdevice.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/phy.h> | ||
22 | #include <net/dsa.h> | ||
23 | #include "mv88e6xxx.h" | ||
24 | |||
25 | static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask) | ||
26 | { | ||
27 | unsigned long timeout = jiffies + HZ / 10; | ||
28 | |||
29 | while (time_before(jiffies, timeout)) { | ||
30 | int ret; | ||
31 | |||
32 | ret = REG_READ(REG_GLOBAL2, reg); | ||
33 | if (ret < 0) | ||
34 | return ret; | ||
35 | |||
36 | if (!(ret & mask)) | ||
37 | return 0; | ||
38 | |||
39 | usleep_range(1000, 2000); | ||
40 | } | ||
41 | return -ETIMEDOUT; | ||
42 | } | ||
43 | |||
44 | static inline int mv88e6352_phy_wait(struct dsa_switch *ds) | ||
45 | { | ||
46 | return mv88e6352_wait(ds, 0x18, 0x8000); | ||
47 | } | ||
48 | |||
49 | static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds) | ||
50 | { | ||
51 | return mv88e6352_wait(ds, 0x14, 0x0800); | ||
52 | } | ||
53 | |||
54 | static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds) | ||
55 | { | ||
56 | return mv88e6352_wait(ds, 0x14, 0x8000); | ||
57 | } | ||
58 | |||
59 | static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum) | ||
60 | { | ||
61 | int ret; | ||
62 | |||
63 | REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); | ||
64 | |||
65 | ret = mv88e6352_phy_wait(ds); | ||
66 | if (ret < 0) | ||
67 | return ret; | ||
68 | |||
69 | return REG_READ(REG_GLOBAL2, 0x19); | ||
70 | } | ||
71 | |||
72 | static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum, | ||
73 | u16 val) | ||
74 | { | ||
75 | REG_WRITE(REG_GLOBAL2, 0x19, val); | ||
76 | REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); | ||
77 | |||
78 | return mv88e6352_phy_wait(ds); | ||
79 | } | ||
80 | |||
81 | static char *mv88e6352_probe(struct device *host_dev, int sw_addr) | ||
82 | { | ||
83 | struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); | ||
84 | int ret; | ||
85 | |||
86 | if (bus == NULL) | ||
87 | return NULL; | ||
88 | |||
89 | ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); | ||
90 | if (ret >= 0) { | ||
91 | if ((ret & 0xfff0) == 0x1760) | ||
92 | return "Marvell 88E6176"; | ||
93 | if (ret == 0x3521) | ||
94 | return "Marvell 88E6352 (A0)"; | ||
95 | if (ret == 0x3522) | ||
96 | return "Marvell 88E6352 (A1)"; | ||
97 | if ((ret & 0xfff0) == 0x3520) | ||
98 | return "Marvell 88E6352"; | ||
99 | } | ||
100 | |||
101 | return NULL; | ||
102 | } | ||
103 | |||
104 | static int mv88e6352_switch_reset(struct dsa_switch *ds) | ||
105 | { | ||
106 | unsigned long timeout; | ||
107 | int ret; | ||
108 | int i; | ||
109 | |||
110 | /* Set all ports to the disabled state. */ | ||
111 | for (i = 0; i < 7; i++) { | ||
112 | ret = REG_READ(REG_PORT(i), 0x04); | ||
113 | REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); | ||
114 | } | ||
115 | |||
116 | /* Wait for transmit queues to drain. */ | ||
117 | usleep_range(2000, 4000); | ||
118 | |||
119 | /* Reset the switch. Keep PPU active (bit 14, undocumented). | ||
120 | * The PPU needs to be active to support indirect phy register | ||
121 | * accesses through global registers 0x18 and 0x19. | ||
122 | */ | ||
123 | REG_WRITE(REG_GLOBAL, 0x04, 0xc000); | ||
124 | |||
125 | /* Wait up to one second for reset to complete. */ | ||
126 | timeout = jiffies + 1 * HZ; | ||
127 | while (time_before(jiffies, timeout)) { | ||
128 | ret = REG_READ(REG_GLOBAL, 0x00); | ||
129 | if ((ret & 0x8800) == 0x8800) | ||
130 | break; | ||
131 | usleep_range(1000, 2000); | ||
132 | } | ||
133 | if (time_after(jiffies, timeout)) | ||
134 | return -ETIMEDOUT; | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int mv88e6352_setup_global(struct dsa_switch *ds) | ||
140 | { | ||
141 | int ret; | ||
142 | int i; | ||
143 | |||
144 | /* Discard packets with excessive collisions, | ||
145 | * mask all interrupt sources, enable PPU (bit 14, undocumented). | ||
146 | */ | ||
147 | REG_WRITE(REG_GLOBAL, 0x04, 0x6000); | ||
148 | |||
149 | /* Set the default address aging time to 5 minutes, and | ||
150 | * enable address learn messages to be sent to all message | ||
151 | * ports. | ||
152 | */ | ||
153 | REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); | ||
154 | |||
155 | /* Configure the priority mapping registers. */ | ||
156 | ret = mv88e6xxx_config_prio(ds); | ||
157 | if (ret < 0) | ||
158 | return ret; | ||
159 | |||
160 | /* Configure the upstream port, and configure the upstream | ||
161 | * port as the port to which ingress and egress monitor frames | ||
162 | * are to be sent. | ||
163 | */ | ||
164 | REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); | ||
165 | |||
166 | /* Disable remote management for now, and set the switch's | ||
167 | * DSA device number. | ||
168 | */ | ||
169 | REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); | ||
170 | |||
171 | /* Send all frames with destination addresses matching | ||
172 | * 01:80:c2:00:00:2x to the CPU port. | ||
173 | */ | ||
174 | REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); | ||
175 | |||
176 | /* Send all frames with destination addresses matching | ||
177 | * 01:80:c2:00:00:0x to the CPU port. | ||
178 | */ | ||
179 | REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); | ||
180 | |||
181 | /* Disable the loopback filter, disable flow control | ||
182 | * messages, disable flood broadcast override, disable | ||
183 | * removing of provider tags, disable ATU age violation | ||
184 | * interrupts, disable tag flow control, force flow | ||
185 | * control priority to the highest, and send all special | ||
186 | * multicast frames to the CPU at the highest priority. | ||
187 | */ | ||
188 | REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); | ||
189 | |||
190 | /* Program the DSA routing table. */ | ||
191 | for (i = 0; i < 32; i++) { | ||
192 | int nexthop = 0x1f; | ||
193 | |||
194 | if (i != ds->index && i < ds->dst->pd->nr_chips) | ||
195 | nexthop = ds->pd->rtable[i] & 0x1f; | ||
196 | |||
197 | REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); | ||
198 | } | ||
199 | |||
200 | /* Clear all trunk masks. */ | ||
201 | for (i = 0; i < 8; i++) | ||
202 | REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f); | ||
203 | |||
204 | /* Clear all trunk mappings. */ | ||
205 | for (i = 0; i < 16; i++) | ||
206 | REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); | ||
207 | |||
208 | /* Disable ingress rate limiting by resetting all ingress | ||
209 | * rate limit registers to their initial state. | ||
210 | */ | ||
211 | for (i = 0; i < 7; i++) | ||
212 | REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); | ||
213 | |||
214 | /* Initialise cross-chip port VLAN table to reset defaults. */ | ||
215 | REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); | ||
216 | |||
217 | /* Clear the priority override table. */ | ||
218 | for (i = 0; i < 16; i++) | ||
219 | REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); | ||
220 | |||
221 | /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int mv88e6352_setup_port(struct dsa_switch *ds, int p) | ||
227 | { | ||
228 | int addr = REG_PORT(p); | ||
229 | u16 val; | ||
230 | |||
231 | /* MAC Forcing register: don't force link, speed, duplex | ||
232 | * or flow control state to any particular values on physical | ||
233 | * ports, but force the CPU port and all DSA ports to 1000 Mb/s | ||
234 | * full duplex. | ||
235 | */ | ||
236 | if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) | ||
237 | REG_WRITE(addr, 0x01, 0x003e); | ||
238 | else | ||
239 | REG_WRITE(addr, 0x01, 0x0003); | ||
240 | |||
241 | /* Do not limit the period of time that this port can be | ||
242 | * paused for by the remote end or the period of time that | ||
243 | * this port can pause the remote end. | ||
244 | */ | ||
245 | REG_WRITE(addr, 0x02, 0x0000); | ||
246 | |||
247 | /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, | ||
248 | * disable Header mode, enable IGMP/MLD snooping, disable VLAN | ||
249 | * tunneling, determine priority by looking at 802.1p and IP | ||
250 | * priority fields (IP prio has precedence), and set STP state | ||
251 | * to Forwarding. | ||
252 | * | ||
253 | * If this is the CPU link, use DSA or EDSA tagging depending | ||
254 | * on which tagging mode was configured. | ||
255 | * | ||
256 | * If this is a link to another switch, use DSA tagging mode. | ||
257 | * | ||
258 | * If this is the upstream port for this switch, enable | ||
259 | * forwarding of unknown unicasts and multicasts. | ||
260 | */ | ||
261 | val = 0x0433; | ||
262 | if (dsa_is_cpu_port(ds, p)) { | ||
263 | if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) | ||
264 | val |= 0x3300; | ||
265 | else | ||
266 | val |= 0x0100; | ||
267 | } | ||
268 | if (ds->dsa_port_mask & (1 << p)) | ||
269 | val |= 0x0100; | ||
270 | if (p == dsa_upstream_port(ds)) | ||
271 | val |= 0x000c; | ||
272 | REG_WRITE(addr, 0x04, val); | ||
273 | |||
274 | /* Port Control 1: disable trunking. Also, if this is the | ||
275 | * CPU port, enable learn messages to be sent to this port. | ||
276 | */ | ||
277 | REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); | ||
278 | |||
279 | /* Port based VLAN map: give each port its own address | ||
280 | * database, allow the CPU port to talk to each of the 'real' | ||
281 | * ports, and allow each of the 'real' ports to only talk to | ||
282 | * the upstream port. | ||
283 | */ | ||
284 | val = (p & 0xf) << 12; | ||
285 | if (dsa_is_cpu_port(ds, p)) | ||
286 | val |= ds->phys_port_mask; | ||
287 | else | ||
288 | val |= 1 << dsa_upstream_port(ds); | ||
289 | REG_WRITE(addr, 0x06, val); | ||
290 | |||
291 | /* Default VLAN ID and priority: don't set a default VLAN | ||
292 | * ID, and set the default packet priority to zero. | ||
293 | */ | ||
294 | REG_WRITE(addr, 0x07, 0x0000); | ||
295 | |||
296 | /* Port Control 2: don't force a good FCS, set the maximum | ||
297 | * frame size to 10240 bytes, don't let the switch add or | ||
298 | * strip 802.1q tags, don't discard tagged or untagged frames | ||
299 | * on this port, do a destination address lookup on all | ||
300 | * received packets as usual, disable ARP mirroring and don't | ||
301 | * send a copy of all transmitted/received frames on this port | ||
302 | * to the CPU. | ||
303 | */ | ||
304 | REG_WRITE(addr, 0x08, 0x2080); | ||
305 | |||
306 | /* Egress rate control: disable egress rate control. */ | ||
307 | REG_WRITE(addr, 0x09, 0x0001); | ||
308 | |||
309 | /* Egress rate control 2: disable egress rate control. */ | ||
310 | REG_WRITE(addr, 0x0a, 0x0000); | ||
311 | |||
312 | /* Port Association Vector: when learning source addresses | ||
313 | * of packets, add the address to the address database using | ||
314 | * a port bitmap that has only the bit for this port set and | ||
315 | * the other bits clear. | ||
316 | */ | ||
317 | REG_WRITE(addr, 0x0b, 1 << p); | ||
318 | |||
319 | /* Port ATU control: disable limiting the number of address | ||
320 | * database entries that this port is allowed to use. | ||
321 | */ | ||
322 | REG_WRITE(addr, 0x0c, 0x0000); | ||
323 | |||
324 | /* Priority Override: disable DA, SA and VTU priority override. */ | ||
325 | REG_WRITE(addr, 0x0d, 0x0000); | ||
326 | |||
327 | /* Port Ethertype: use the Ethertype DSA Ethertype value. */ | ||
328 | REG_WRITE(addr, 0x0f, ETH_P_EDSA); | ||
329 | |||
330 | /* Tag Remap: use an identity 802.1p prio -> switch prio | ||
331 | * mapping. | ||
332 | */ | ||
333 | REG_WRITE(addr, 0x18, 0x3210); | ||
334 | |||
335 | /* Tag Remap 2: use an identity 802.1p prio -> switch prio | ||
336 | * mapping. | ||
337 | */ | ||
338 | REG_WRITE(addr, 0x19, 0x7654); | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | #ifdef CONFIG_NET_DSA_HWMON | ||
344 | |||
345 | static int mv88e6352_phy_page_read(struct dsa_switch *ds, | ||
346 | int port, int page, int reg) | ||
347 | { | ||
348 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
349 | int ret; | ||
350 | |||
351 | mutex_lock(&ps->phy_mutex); | ||
352 | ret = __mv88e6352_phy_write(ds, port, 0x16, page); | ||
353 | if (ret < 0) | ||
354 | goto error; | ||
355 | ret = __mv88e6352_phy_read(ds, port, reg); | ||
356 | error: | ||
357 | __mv88e6352_phy_write(ds, port, 0x16, 0x0); | ||
358 | mutex_unlock(&ps->phy_mutex); | ||
359 | return ret; | ||
360 | } | ||
361 | |||
362 | static int mv88e6352_phy_page_write(struct dsa_switch *ds, | ||
363 | int port, int page, int reg, int val) | ||
364 | { | ||
365 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
366 | int ret; | ||
367 | |||
368 | mutex_lock(&ps->phy_mutex); | ||
369 | ret = __mv88e6352_phy_write(ds, port, 0x16, page); | ||
370 | if (ret < 0) | ||
371 | goto error; | ||
372 | |||
373 | ret = __mv88e6352_phy_write(ds, port, reg, val); | ||
374 | error: | ||
375 | __mv88e6352_phy_write(ds, port, 0x16, 0x0); | ||
376 | mutex_unlock(&ps->phy_mutex); | ||
377 | return ret; | ||
378 | } | ||
379 | |||
380 | static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp) | ||
381 | { | ||
382 | int ret; | ||
383 | |||
384 | *temp = 0; | ||
385 | |||
386 | ret = mv88e6352_phy_page_read(ds, 0, 6, 27); | ||
387 | if (ret < 0) | ||
388 | return ret; | ||
389 | |||
390 | *temp = (ret & 0xff) - 25; | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp) | ||
396 | { | ||
397 | int ret; | ||
398 | |||
399 | *temp = 0; | ||
400 | |||
401 | ret = mv88e6352_phy_page_read(ds, 0, 6, 26); | ||
402 | if (ret < 0) | ||
403 | return ret; | ||
404 | |||
405 | *temp = (((ret >> 8) & 0x1f) * 5) - 25; | ||
406 | |||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp) | ||
411 | { | ||
412 | int ret; | ||
413 | |||
414 | ret = mv88e6352_phy_page_read(ds, 0, 6, 26); | ||
415 | if (ret < 0) | ||
416 | return ret; | ||
417 | temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); | ||
418 | return mv88e6352_phy_page_write(ds, 0, 6, 26, | ||
419 | (ret & 0xe0ff) | (temp << 8)); | ||
420 | } | ||
421 | |||
422 | static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm) | ||
423 | { | ||
424 | int ret; | ||
425 | |||
426 | *alarm = false; | ||
427 | |||
428 | ret = mv88e6352_phy_page_read(ds, 0, 6, 26); | ||
429 | if (ret < 0) | ||
430 | return ret; | ||
431 | |||
432 | *alarm = !!(ret & 0x40); | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | #endif /* CONFIG_NET_DSA_HWMON */ | ||
437 | |||
438 | static int mv88e6352_setup(struct dsa_switch *ds) | ||
439 | { | ||
440 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
441 | int ret; | ||
442 | int i; | ||
443 | |||
444 | mutex_init(&ps->smi_mutex); | ||
445 | mutex_init(&ps->stats_mutex); | ||
446 | mutex_init(&ps->phy_mutex); | ||
447 | mutex_init(&ps->eeprom_mutex); | ||
448 | |||
449 | ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; | ||
450 | |||
451 | ret = mv88e6352_switch_reset(ds); | ||
452 | if (ret < 0) | ||
453 | return ret; | ||
454 | |||
455 | /* @@@ initialise vtu and atu */ | ||
456 | |||
457 | ret = mv88e6352_setup_global(ds); | ||
458 | if (ret < 0) | ||
459 | return ret; | ||
460 | |||
461 | for (i = 0; i < 7; i++) { | ||
462 | ret = mv88e6352_setup_port(ds, i); | ||
463 | if (ret < 0) | ||
464 | return ret; | ||
465 | } | ||
466 | |||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | static int mv88e6352_port_to_phy_addr(int port) | ||
471 | { | ||
472 | if (port >= 0 && port <= 4) | ||
473 | return port; | ||
474 | return -EINVAL; | ||
475 | } | ||
476 | |||
477 | static int | ||
478 | mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum) | ||
479 | { | ||
480 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
481 | int addr = mv88e6352_port_to_phy_addr(port); | ||
482 | int ret; | ||
483 | |||
484 | if (addr < 0) | ||
485 | return addr; | ||
486 | |||
487 | mutex_lock(&ps->phy_mutex); | ||
488 | ret = __mv88e6352_phy_read(ds, addr, regnum); | ||
489 | mutex_unlock(&ps->phy_mutex); | ||
490 | |||
491 | return ret; | ||
492 | } | ||
493 | |||
494 | static int | ||
495 | mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) | ||
496 | { | ||
497 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
498 | int addr = mv88e6352_port_to_phy_addr(port); | ||
499 | int ret; | ||
500 | |||
501 | if (addr < 0) | ||
502 | return addr; | ||
503 | |||
504 | mutex_lock(&ps->phy_mutex); | ||
505 | ret = __mv88e6352_phy_write(ds, addr, regnum, val); | ||
506 | mutex_unlock(&ps->phy_mutex); | ||
507 | |||
508 | return ret; | ||
509 | } | ||
510 | |||
511 | static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = { | ||
512 | { "in_good_octets", 8, 0x00, }, | ||
513 | { "in_bad_octets", 4, 0x02, }, | ||
514 | { "in_unicast", 4, 0x04, }, | ||
515 | { "in_broadcasts", 4, 0x06, }, | ||
516 | { "in_multicasts", 4, 0x07, }, | ||
517 | { "in_pause", 4, 0x16, }, | ||
518 | { "in_undersize", 4, 0x18, }, | ||
519 | { "in_fragments", 4, 0x19, }, | ||
520 | { "in_oversize", 4, 0x1a, }, | ||
521 | { "in_jabber", 4, 0x1b, }, | ||
522 | { "in_rx_error", 4, 0x1c, }, | ||
523 | { "in_fcs_error", 4, 0x1d, }, | ||
524 | { "out_octets", 8, 0x0e, }, | ||
525 | { "out_unicast", 4, 0x10, }, | ||
526 | { "out_broadcasts", 4, 0x13, }, | ||
527 | { "out_multicasts", 4, 0x12, }, | ||
528 | { "out_pause", 4, 0x15, }, | ||
529 | { "excessive", 4, 0x11, }, | ||
530 | { "collisions", 4, 0x1e, }, | ||
531 | { "deferred", 4, 0x05, }, | ||
532 | { "single", 4, 0x14, }, | ||
533 | { "multiple", 4, 0x17, }, | ||
534 | { "out_fcs_error", 4, 0x03, }, | ||
535 | { "late", 4, 0x1f, }, | ||
536 | { "hist_64bytes", 4, 0x08, }, | ||
537 | { "hist_65_127bytes", 4, 0x09, }, | ||
538 | { "hist_128_255bytes", 4, 0x0a, }, | ||
539 | { "hist_256_511bytes", 4, 0x0b, }, | ||
540 | { "hist_512_1023bytes", 4, 0x0c, }, | ||
541 | { "hist_1024_max_bytes", 4, 0x0d, }, | ||
542 | { "sw_in_discards", 4, 0x110, }, | ||
543 | { "sw_in_filtered", 2, 0x112, }, | ||
544 | { "sw_out_filtered", 2, 0x113, }, | ||
545 | }; | ||
546 | |||
547 | static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) | ||
548 | { | ||
549 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
550 | int ret; | ||
551 | |||
552 | mutex_lock(&ps->eeprom_mutex); | ||
553 | |||
554 | ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, | ||
555 | 0xc000 | (addr & 0xff)); | ||
556 | if (ret < 0) | ||
557 | goto error; | ||
558 | |||
559 | ret = mv88e6352_eeprom_busy_wait(ds); | ||
560 | if (ret < 0) | ||
561 | goto error; | ||
562 | |||
563 | ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15); | ||
564 | error: | ||
565 | mutex_unlock(&ps->eeprom_mutex); | ||
566 | return ret; | ||
567 | } | ||
568 | |||
569 | static int mv88e6352_get_eeprom(struct dsa_switch *ds, | ||
570 | struct ethtool_eeprom *eeprom, u8 *data) | ||
571 | { | ||
572 | int offset; | ||
573 | int len; | ||
574 | int ret; | ||
575 | |||
576 | offset = eeprom->offset; | ||
577 | len = eeprom->len; | ||
578 | eeprom->len = 0; | ||
579 | |||
580 | eeprom->magic = 0xc3ec4951; | ||
581 | |||
582 | ret = mv88e6352_eeprom_load_wait(ds); | ||
583 | if (ret < 0) | ||
584 | return ret; | ||
585 | |||
586 | if (offset & 1) { | ||
587 | int word; | ||
588 | |||
589 | word = mv88e6352_read_eeprom_word(ds, offset >> 1); | ||
590 | if (word < 0) | ||
591 | return word; | ||
592 | |||
593 | *data++ = (word >> 8) & 0xff; | ||
594 | |||
595 | offset++; | ||
596 | len--; | ||
597 | eeprom->len++; | ||
598 | } | ||
599 | |||
600 | while (len >= 2) { | ||
601 | int word; | ||
602 | |||
603 | word = mv88e6352_read_eeprom_word(ds, offset >> 1); | ||
604 | if (word < 0) | ||
605 | return word; | ||
606 | |||
607 | *data++ = word & 0xff; | ||
608 | *data++ = (word >> 8) & 0xff; | ||
609 | |||
610 | offset += 2; | ||
611 | len -= 2; | ||
612 | eeprom->len += 2; | ||
613 | } | ||
614 | |||
615 | if (len) { | ||
616 | int word; | ||
617 | |||
618 | word = mv88e6352_read_eeprom_word(ds, offset >> 1); | ||
619 | if (word < 0) | ||
620 | return word; | ||
621 | |||
622 | *data++ = word & 0xff; | ||
623 | |||
624 | offset++; | ||
625 | len--; | ||
626 | eeprom->len++; | ||
627 | } | ||
628 | |||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds) | ||
633 | { | ||
634 | int ret; | ||
635 | |||
636 | ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14); | ||
637 | if (ret < 0) | ||
638 | return ret; | ||
639 | |||
640 | if (!(ret & 0x0400)) | ||
641 | return -EROFS; | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, | ||
647 | u16 data) | ||
648 | { | ||
649 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | ||
650 | int ret; | ||
651 | |||
652 | mutex_lock(&ps->eeprom_mutex); | ||
653 | |||
654 | ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data); | ||
655 | if (ret < 0) | ||
656 | goto error; | ||
657 | |||
658 | ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, | ||
659 | 0xb000 | (addr & 0xff)); | ||
660 | if (ret < 0) | ||
661 | goto error; | ||
662 | |||
663 | ret = mv88e6352_eeprom_busy_wait(ds); | ||
664 | error: | ||
665 | mutex_unlock(&ps->eeprom_mutex); | ||
666 | return ret; | ||
667 | } | ||
668 | |||
669 | static int mv88e6352_set_eeprom(struct dsa_switch *ds, | ||
670 | struct ethtool_eeprom *eeprom, u8 *data) | ||
671 | { | ||
672 | int offset; | ||
673 | int ret; | ||
674 | int len; | ||
675 | |||
676 | if (eeprom->magic != 0xc3ec4951) | ||
677 | return -EINVAL; | ||
678 | |||
679 | ret = mv88e6352_eeprom_is_readonly(ds); | ||
680 | if (ret) | ||
681 | return ret; | ||
682 | |||
683 | offset = eeprom->offset; | ||
684 | len = eeprom->len; | ||
685 | eeprom->len = 0; | ||
686 | |||
687 | ret = mv88e6352_eeprom_load_wait(ds); | ||
688 | if (ret < 0) | ||
689 | return ret; | ||
690 | |||
691 | if (offset & 1) { | ||
692 | int word; | ||
693 | |||
694 | word = mv88e6352_read_eeprom_word(ds, offset >> 1); | ||
695 | if (word < 0) | ||
696 | return word; | ||
697 | |||
698 | word = (*data++ << 8) | (word & 0xff); | ||
699 | |||
700 | ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); | ||
701 | if (ret < 0) | ||
702 | return ret; | ||
703 | |||
704 | offset++; | ||
705 | len--; | ||
706 | eeprom->len++; | ||
707 | } | ||
708 | |||
709 | while (len >= 2) { | ||
710 | int word; | ||
711 | |||
712 | word = *data++; | ||
713 | word |= *data++ << 8; | ||
714 | |||
715 | ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); | ||
716 | if (ret < 0) | ||
717 | return ret; | ||
718 | |||
719 | offset += 2; | ||
720 | len -= 2; | ||
721 | eeprom->len += 2; | ||
722 | } | ||
723 | |||
724 | if (len) { | ||
725 | int word; | ||
726 | |||
727 | word = mv88e6352_read_eeprom_word(ds, offset >> 1); | ||
728 | if (word < 0) | ||
729 | return word; | ||
730 | |||
731 | word = (word & 0xff00) | *data++; | ||
732 | |||
733 | ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word); | ||
734 | if (ret < 0) | ||
735 | return ret; | ||
736 | |||
737 | offset++; | ||
738 | len--; | ||
739 | eeprom->len++; | ||
740 | } | ||
741 | |||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | static void | ||
746 | mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data) | ||
747 | { | ||
748 | mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats), | ||
749 | mv88e6352_hw_stats, port, data); | ||
750 | } | ||
751 | |||
752 | static void | ||
753 | mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) | ||
754 | { | ||
755 | mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats), | ||
756 | mv88e6352_hw_stats, port, data); | ||
757 | } | ||
758 | |||
759 | static int mv88e6352_get_sset_count(struct dsa_switch *ds) | ||
760 | { | ||
761 | return ARRAY_SIZE(mv88e6352_hw_stats); | ||
762 | } | ||
763 | |||
764 | struct dsa_switch_driver mv88e6352_switch_driver = { | ||
765 | .tag_protocol = DSA_TAG_PROTO_EDSA, | ||
766 | .priv_size = sizeof(struct mv88e6xxx_priv_state), | ||
767 | .probe = mv88e6352_probe, | ||
768 | .setup = mv88e6352_setup, | ||
769 | .set_addr = mv88e6xxx_set_addr_indirect, | ||
770 | .phy_read = mv88e6352_phy_read, | ||
771 | .phy_write = mv88e6352_phy_write, | ||
772 | .poll_link = mv88e6xxx_poll_link, | ||
773 | .get_strings = mv88e6352_get_strings, | ||
774 | .get_ethtool_stats = mv88e6352_get_ethtool_stats, | ||
775 | .get_sset_count = mv88e6352_get_sset_count, | ||
776 | #ifdef CONFIG_NET_DSA_HWMON | ||
777 | .get_temp = mv88e6352_get_temp, | ||
778 | .get_temp_limit = mv88e6352_get_temp_limit, | ||
779 | .set_temp_limit = mv88e6352_set_temp_limit, | ||
780 | .get_temp_alarm = mv88e6352_get_temp_alarm, | ||
781 | #endif | ||
782 | .get_eeprom = mv88e6352_get_eeprom, | ||
783 | .set_eeprom = mv88e6352_set_eeprom, | ||
784 | .get_regs_len = mv88e6xxx_get_regs_len, | ||
785 | .get_regs = mv88e6xxx_get_regs, | ||
786 | }; | ||
787 | |||
788 | MODULE_ALIAS("platform:mv88e6352"); | ||
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index a6c90cf5634d..da558d887dad 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
@@ -485,20 +485,60 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, | |||
485 | for (i = 0; i < nr_stats; i++) { | 485 | for (i = 0; i < nr_stats; i++) { |
486 | struct mv88e6xxx_hw_stat *s = stats + i; | 486 | struct mv88e6xxx_hw_stat *s = stats + i; |
487 | u32 low; | 487 | u32 low; |
488 | u32 high; | 488 | u32 high = 0; |
489 | 489 | ||
490 | if (s->reg >= 0x100) { | ||
491 | int ret; | ||
492 | |||
493 | ret = mv88e6xxx_reg_read(ds, REG_PORT(port), | ||
494 | s->reg - 0x100); | ||
495 | if (ret < 0) | ||
496 | goto error; | ||
497 | low = ret; | ||
498 | if (s->sizeof_stat == 4) { | ||
499 | ret = mv88e6xxx_reg_read(ds, REG_PORT(port), | ||
500 | s->reg - 0x100 + 1); | ||
501 | if (ret < 0) | ||
502 | goto error; | ||
503 | high = ret; | ||
504 | } | ||
505 | data[i] = (((u64)high) << 16) | low; | ||
506 | continue; | ||
507 | } | ||
490 | mv88e6xxx_stats_read(ds, s->reg, &low); | 508 | mv88e6xxx_stats_read(ds, s->reg, &low); |
491 | if (s->sizeof_stat == 8) | 509 | if (s->sizeof_stat == 8) |
492 | mv88e6xxx_stats_read(ds, s->reg + 1, &high); | 510 | mv88e6xxx_stats_read(ds, s->reg + 1, &high); |
493 | else | ||
494 | high = 0; | ||
495 | 511 | ||
496 | data[i] = (((u64)high) << 32) | low; | 512 | data[i] = (((u64)high) << 32) | low; |
497 | } | 513 | } |
498 | 514 | error: | |
499 | mutex_unlock(&ps->stats_mutex); | 515 | mutex_unlock(&ps->stats_mutex); |
500 | } | 516 | } |
501 | 517 | ||
518 | int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) | ||
519 | { | ||
520 | return 32 * sizeof(u16); | ||
521 | } | ||
522 | |||
523 | void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, | ||
524 | struct ethtool_regs *regs, void *_p) | ||
525 | { | ||
526 | u16 *p = _p; | ||
527 | int i; | ||
528 | |||
529 | regs->version = 0; | ||
530 | |||
531 | memset(p, 0xff, 32 * sizeof(u16)); | ||
532 | |||
533 | for (i = 0; i < 32; i++) { | ||
534 | int ret; | ||
535 | |||
536 | ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i); | ||
537 | if (ret >= 0) | ||
538 | p[i] = ret; | ||
539 | } | ||
540 | } | ||
541 | |||
502 | static int __init mv88e6xxx_init(void) | 542 | static int __init mv88e6xxx_init(void) |
503 | { | 543 | { |
504 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) | 544 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) |
@@ -507,6 +547,9 @@ static int __init mv88e6xxx_init(void) | |||
507 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) | 547 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) |
508 | register_switch_driver(&mv88e6123_61_65_switch_driver); | 548 | register_switch_driver(&mv88e6123_61_65_switch_driver); |
509 | #endif | 549 | #endif |
550 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) | ||
551 | register_switch_driver(&mv88e6352_switch_driver); | ||
552 | #endif | ||
510 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) | 553 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) |
511 | register_switch_driver(&mv88e6171_switch_driver); | 554 | register_switch_driver(&mv88e6171_switch_driver); |
512 | #endif | 555 | #endif |
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 5e5145ad9525..a0780b08bb4c 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h | |||
@@ -37,6 +37,17 @@ struct mv88e6xxx_priv_state { | |||
37 | */ | 37 | */ |
38 | struct mutex stats_mutex; | 38 | struct mutex stats_mutex; |
39 | 39 | ||
40 | /* This mutex serializes phy access for chips with | ||
41 | * indirect phy addressing. It is unused for chips | ||
42 | * with direct phy access. | ||
43 | */ | ||
44 | struct mutex phy_mutex; | ||
45 | |||
46 | /* This mutex serializes eeprom access for chips with | ||
47 | * eeprom support. | ||
48 | */ | ||
49 | struct mutex eeprom_mutex; | ||
50 | |||
40 | int id; /* switch product id */ | 51 | int id; /* switch product id */ |
41 | }; | 52 | }; |
42 | 53 | ||
@@ -67,9 +78,13 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds, | |||
67 | void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, | 78 | void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, |
68 | int nr_stats, struct mv88e6xxx_hw_stat *stats, | 79 | int nr_stats, struct mv88e6xxx_hw_stat *stats, |
69 | int port, uint64_t *data); | 80 | int port, uint64_t *data); |
81 | int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); | ||
82 | void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, | ||
83 | struct ethtool_regs *regs, void *_p); | ||
70 | 84 | ||
71 | extern struct dsa_switch_driver mv88e6131_switch_driver; | 85 | extern struct dsa_switch_driver mv88e6131_switch_driver; |
72 | extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; | 86 | extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; |
87 | extern struct dsa_switch_driver mv88e6352_switch_driver; | ||
73 | extern struct dsa_switch_driver mv88e6171_switch_driver; | 88 | extern struct dsa_switch_driver mv88e6171_switch_driver; |
74 | 89 | ||
75 | #define REG_READ(addr, reg) \ | 90 | #define REG_READ(addr, reg) \ |
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 48775b88bac7..dede43f4ce09 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c | |||
@@ -1285,7 +1285,7 @@ typhoon_request_firmware(struct typhoon *tp) | |||
1285 | return err; | 1285 | return err; |
1286 | } | 1286 | } |
1287 | 1287 | ||
1288 | image_data = (u8 *) typhoon_fw->data; | 1288 | image_data = typhoon_fw->data; |
1289 | remaining = typhoon_fw->size; | 1289 | remaining = typhoon_fw->size; |
1290 | if (remaining < sizeof(struct typhoon_file_header)) | 1290 | if (remaining < sizeof(struct typhoon_file_header)) |
1291 | goto invalid_fw; | 1291 | goto invalid_fw; |
@@ -1343,7 +1343,7 @@ typhoon_download_firmware(struct typhoon *tp) | |||
1343 | int i; | 1343 | int i; |
1344 | int err; | 1344 | int err; |
1345 | 1345 | ||
1346 | image_data = (u8 *) typhoon_fw->data; | 1346 | image_data = typhoon_fw->data; |
1347 | fHdr = (struct typhoon_file_header *) image_data; | 1347 | fHdr = (struct typhoon_file_header *) image_data; |
1348 | 1348 | ||
1349 | /* Cannot just map the firmware image using pci_map_single() as | 1349 | /* Cannot just map the firmware image using pci_map_single() as |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 3c208cc6f6bb..f22659438436 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -761,10 +761,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) | |||
761 | ndev = pdata->ndev; | 761 | ndev = pdata->ndev; |
762 | 762 | ||
763 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); | 763 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); |
764 | if (!res) { | ||
765 | dev_err(dev, "Resource enet_csr not defined\n"); | ||
766 | return -ENODEV; | ||
767 | } | ||
768 | pdata->base_addr = devm_ioremap_resource(dev, res); | 764 | pdata->base_addr = devm_ioremap_resource(dev, res); |
769 | if (IS_ERR(pdata->base_addr)) { | 765 | if (IS_ERR(pdata->base_addr)) { |
770 | dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); | 766 | dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); |
@@ -772,10 +768,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) | |||
772 | } | 768 | } |
773 | 769 | ||
774 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); | 770 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); |
775 | if (!res) { | ||
776 | dev_err(dev, "Resource ring_csr not defined\n"); | ||
777 | return -ENODEV; | ||
778 | } | ||
779 | pdata->ring_csr_addr = devm_ioremap_resource(dev, res); | 771 | pdata->ring_csr_addr = devm_ioremap_resource(dev, res); |
780 | if (IS_ERR(pdata->ring_csr_addr)) { | 772 | if (IS_ERR(pdata->ring_csr_addr)) { |
781 | dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); | 773 | dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); |
@@ -783,10 +775,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) | |||
783 | } | 775 | } |
784 | 776 | ||
785 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); | 777 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); |
786 | if (!res) { | ||
787 | dev_err(dev, "Resource ring_cmd not defined\n"); | ||
788 | return -ENODEV; | ||
789 | } | ||
790 | pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); | 778 | pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); |
791 | if (IS_ERR(pdata->ring_cmd_addr)) { | 779 | if (IS_ERR(pdata->ring_cmd_addr)) { |
792 | dev_err(dev, "Unable to retrieve ENET Ring command region\n"); | 780 | dev_err(dev, "Unable to retrieve ENET Ring command region\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 40beef5bca88..e9af4af5edba 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -1139,7 +1139,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1139 | prefetch(fp->txdata_ptr[cos]->tx_cons_sb); | 1139 | prefetch(fp->txdata_ptr[cos]->tx_cons_sb); |
1140 | 1140 | ||
1141 | prefetch(&fp->sb_running_index[SM_RX_ID]); | 1141 | prefetch(&fp->sb_running_index[SM_RX_ID]); |
1142 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | 1142 | napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); |
1143 | 1143 | ||
1144 | return IRQ_HANDLED; | 1144 | return IRQ_HANDLED; |
1145 | } | 1145 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 74fbf9ea7bd8..c4bd025c74c9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -1931,7 +1931,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1931 | for_each_cos_in_tx_queue(fp, cos) | 1931 | for_each_cos_in_tx_queue(fp, cos) |
1932 | prefetch(fp->txdata_ptr[cos]->tx_cons_sb); | 1932 | prefetch(fp->txdata_ptr[cos]->tx_cons_sb); |
1933 | prefetch(&fp->sb_running_index[SM_RX_ID]); | 1933 | prefetch(&fp->sb_running_index[SM_RX_ID]); |
1934 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | 1934 | napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); |
1935 | status &= ~mask; | 1935 | status &= ~mask; |
1936 | } | 1936 | } |
1937 | } | 1937 | } |
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index e285f384b096..07719676c305 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c | |||
@@ -216,14 +216,10 @@ struct net_device * __init mac89x0_probe(int unit) | |||
216 | ioaddr = (unsigned long) | 216 | ioaddr = (unsigned long) |
217 | nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE); | 217 | nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE); |
218 | { | 218 | { |
219 | unsigned long flags; | ||
220 | int card_present; | 219 | int card_present; |
221 | 220 | ||
222 | local_irq_save(flags); | 221 | card_present = (hwreg_present((void *)ioaddr + 4) && |
223 | card_present = (hwreg_present((void*) ioaddr+4) && | 222 | hwreg_present((void *)ioaddr + DATA_PORT)); |
224 | hwreg_present((void*) ioaddr + DATA_PORT)); | ||
225 | local_irq_restore(flags); | ||
226 | |||
227 | if (!card_present) | 223 | if (!card_present) |
228 | goto out; | 224 | goto out; |
229 | } | 225 | } |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4fdf0aa16978..86dccb26fecc 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -173,10 +173,12 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | |||
173 | static int gfar_init_bds(struct net_device *ndev) | 173 | static int gfar_init_bds(struct net_device *ndev) |
174 | { | 174 | { |
175 | struct gfar_private *priv = netdev_priv(ndev); | 175 | struct gfar_private *priv = netdev_priv(ndev); |
176 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
176 | struct gfar_priv_tx_q *tx_queue = NULL; | 177 | struct gfar_priv_tx_q *tx_queue = NULL; |
177 | struct gfar_priv_rx_q *rx_queue = NULL; | 178 | struct gfar_priv_rx_q *rx_queue = NULL; |
178 | struct txbd8 *txbdp; | 179 | struct txbd8 *txbdp; |
179 | struct rxbd8 *rxbdp; | 180 | struct rxbd8 *rxbdp; |
181 | u32 *rfbptr; | ||
180 | int i, j; | 182 | int i, j; |
181 | 183 | ||
182 | for (i = 0; i < priv->num_tx_queues; i++) { | 184 | for (i = 0; i < priv->num_tx_queues; i++) { |
@@ -201,6 +203,7 @@ static int gfar_init_bds(struct net_device *ndev) | |||
201 | txbdp->status |= TXBD_WRAP; | 203 | txbdp->status |= TXBD_WRAP; |
202 | } | 204 | } |
203 | 205 | ||
206 | rfbptr = ®s->rfbptr0; | ||
204 | for (i = 0; i < priv->num_rx_queues; i++) { | 207 | for (i = 0; i < priv->num_rx_queues; i++) { |
205 | rx_queue = priv->rx_queue[i]; | 208 | rx_queue = priv->rx_queue[i]; |
206 | rx_queue->cur_rx = rx_queue->rx_bd_base; | 209 | rx_queue->cur_rx = rx_queue->rx_bd_base; |
@@ -227,6 +230,8 @@ static int gfar_init_bds(struct net_device *ndev) | |||
227 | rxbdp++; | 230 | rxbdp++; |
228 | } | 231 | } |
229 | 232 | ||
233 | rx_queue->rfbptr = rfbptr; | ||
234 | rfbptr += 2; | ||
230 | } | 235 | } |
231 | 236 | ||
232 | return 0; | 237 | return 0; |
@@ -336,6 +341,20 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv) | |||
336 | } | 341 | } |
337 | } | 342 | } |
338 | 343 | ||
344 | static void gfar_init_rqprm(struct gfar_private *priv) | ||
345 | { | ||
346 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
347 | u32 __iomem *baddr; | ||
348 | int i; | ||
349 | |||
350 | baddr = ®s->rqprm0; | ||
351 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
352 | gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | | ||
353 | (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); | ||
354 | baddr++; | ||
355 | } | ||
356 | } | ||
357 | |||
339 | static void gfar_rx_buff_size_config(struct gfar_private *priv) | 358 | static void gfar_rx_buff_size_config(struct gfar_private *priv) |
340 | { | 359 | { |
341 | int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; | 360 | int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; |
@@ -396,6 +415,13 @@ static void gfar_mac_rx_config(struct gfar_private *priv) | |||
396 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) | 415 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) |
397 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | 416 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
398 | 417 | ||
418 | /* Clear the LFC bit */ | ||
419 | gfar_write(®s->rctrl, rctrl); | ||
420 | /* Init flow control threshold values */ | ||
421 | gfar_init_rqprm(priv); | ||
422 | gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); | ||
423 | rctrl |= RCTRL_LFC; | ||
424 | |||
399 | /* Init rctrl based on our settings */ | 425 | /* Init rctrl based on our settings */ |
400 | gfar_write(®s->rctrl, rctrl); | 426 | gfar_write(®s->rctrl, rctrl); |
401 | } | 427 | } |
@@ -1687,6 +1713,9 @@ static int init_phy(struct net_device *dev) | |||
1687 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); | 1713 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
1688 | priv->phydev->advertising = priv->phydev->supported; | 1714 | priv->phydev->advertising = priv->phydev->supported; |
1689 | 1715 | ||
1716 | /* Add support for flow control, but don't advertise it by default */ | ||
1717 | priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); | ||
1718 | |||
1690 | return 0; | 1719 | return 0; |
1691 | } | 1720 | } |
1692 | 1721 | ||
@@ -2856,6 +2885,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
2856 | /* Setup the new bdp */ | 2885 | /* Setup the new bdp */ |
2857 | gfar_new_rxbdp(rx_queue, bdp, newskb); | 2886 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
2858 | 2887 | ||
2888 | /* Update Last Free RxBD pointer for LFC */ | ||
2889 | if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) | ||
2890 | gfar_write(rx_queue->rfbptr, (u32)bdp); | ||
2891 | |||
2859 | /* Update to the next pointer */ | 2892 | /* Update to the next pointer */ |
2860 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); | 2893 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
2861 | 2894 | ||
@@ -3370,7 +3403,11 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) | |||
3370 | if (phydev->asym_pause) | 3403 | if (phydev->asym_pause) |
3371 | rmt_adv |= LPA_PAUSE_ASYM; | 3404 | rmt_adv |= LPA_PAUSE_ASYM; |
3372 | 3405 | ||
3373 | lcl_adv = mii_advertise_flowctrl(phydev->advertising); | 3406 | lcl_adv = 0; |
3407 | if (phydev->advertising & ADVERTISED_Pause) | ||
3408 | lcl_adv |= ADVERTISE_PAUSE_CAP; | ||
3409 | if (phydev->advertising & ADVERTISED_Asym_Pause) | ||
3410 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | ||
3374 | 3411 | ||
3375 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | 3412 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); |
3376 | if (flowctrl & FLOW_CTRL_TX) | 3413 | if (flowctrl & FLOW_CTRL_TX) |
@@ -3386,6 +3423,9 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) | |||
3386 | { | 3423 | { |
3387 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | 3424 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3388 | struct phy_device *phydev = priv->phydev; | 3425 | struct phy_device *phydev = priv->phydev; |
3426 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
3427 | int i; | ||
3428 | struct rxbd8 *bdp; | ||
3389 | 3429 | ||
3390 | if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) | 3430 | if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) |
3391 | return; | 3431 | return; |
@@ -3394,6 +3434,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) | |||
3394 | u32 tempval1 = gfar_read(®s->maccfg1); | 3434 | u32 tempval1 = gfar_read(®s->maccfg1); |
3395 | u32 tempval = gfar_read(®s->maccfg2); | 3435 | u32 tempval = gfar_read(®s->maccfg2); |
3396 | u32 ecntrl = gfar_read(®s->ecntrl); | 3436 | u32 ecntrl = gfar_read(®s->ecntrl); |
3437 | u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW); | ||
3397 | 3438 | ||
3398 | if (phydev->duplex != priv->oldduplex) { | 3439 | if (phydev->duplex != priv->oldduplex) { |
3399 | if (!(phydev->duplex)) | 3440 | if (!(phydev->duplex)) |
@@ -3438,6 +3479,26 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) | |||
3438 | tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); | 3479 | tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
3439 | tempval1 |= gfar_get_flowctrl_cfg(priv); | 3480 | tempval1 |= gfar_get_flowctrl_cfg(priv); |
3440 | 3481 | ||
3482 | /* Turn last free buffer recording on */ | ||
3483 | if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { | ||
3484 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
3485 | rx_queue = priv->rx_queue[i]; | ||
3486 | bdp = rx_queue->cur_rx; | ||
3487 | /* skip to previous bd */ | ||
3488 | bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, | ||
3489 | rx_queue->rx_bd_base, | ||
3490 | rx_queue->rx_ring_size); | ||
3491 | |||
3492 | if (rx_queue->rfbptr) | ||
3493 | gfar_write(rx_queue->rfbptr, (u32)bdp); | ||
3494 | } | ||
3495 | |||
3496 | priv->tx_actual_en = 1; | ||
3497 | } | ||
3498 | |||
3499 | if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) | ||
3500 | priv->tx_actual_en = 0; | ||
3501 | |||
3441 | gfar_write(®s->maccfg1, tempval1); | 3502 | gfar_write(®s->maccfg1, tempval1); |
3442 | gfar_write(®s->maccfg2, tempval); | 3503 | gfar_write(®s->maccfg2, tempval); |
3443 | gfar_write(®s->ecntrl, ecntrl); | 3504 | gfar_write(®s->ecntrl, ecntrl); |
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 2805cfbf1765..b581b8823a2a 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h | |||
@@ -99,6 +99,10 @@ extern const char gfar_driver_version[]; | |||
99 | #define GFAR_MAX_FIFO_STARVE 511 | 99 | #define GFAR_MAX_FIFO_STARVE 511 |
100 | #define GFAR_MAX_FIFO_STARVE_OFF 511 | 100 | #define GFAR_MAX_FIFO_STARVE_OFF 511 |
101 | 101 | ||
102 | #define FBTHR_SHIFT 24 | ||
103 | #define DEFAULT_RX_LFC_THR 16 | ||
104 | #define DEFAULT_LFC_PTVVAL 4 | ||
105 | |||
102 | #define DEFAULT_RX_BUFFER_SIZE 1536 | 106 | #define DEFAULT_RX_BUFFER_SIZE 1536 |
103 | #define TX_RING_MOD_MASK(size) (size-1) | 107 | #define TX_RING_MOD_MASK(size) (size-1) |
104 | #define RX_RING_MOD_MASK(size) (size-1) | 108 | #define RX_RING_MOD_MASK(size) (size-1) |
@@ -145,9 +149,7 @@ extern const char gfar_driver_version[]; | |||
145 | | SUPPORTED_Autoneg \ | 149 | | SUPPORTED_Autoneg \ |
146 | | SUPPORTED_MII) | 150 | | SUPPORTED_MII) |
147 | 151 | ||
148 | #define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \ | 152 | #define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full |
149 | | SUPPORTED_Pause \ | ||
150 | | SUPPORTED_Asym_Pause) | ||
151 | 153 | ||
152 | /* TBI register addresses */ | 154 | /* TBI register addresses */ |
153 | #define MII_TBICON 0x11 | 155 | #define MII_TBICON 0x11 |
@@ -275,6 +277,7 @@ extern const char gfar_driver_version[]; | |||
275 | 277 | ||
276 | #define RCTRL_TS_ENABLE 0x01000000 | 278 | #define RCTRL_TS_ENABLE 0x01000000 |
277 | #define RCTRL_PAL_MASK 0x001f0000 | 279 | #define RCTRL_PAL_MASK 0x001f0000 |
280 | #define RCTRL_LFC 0x00004000 | ||
278 | #define RCTRL_VLEX 0x00002000 | 281 | #define RCTRL_VLEX 0x00002000 |
279 | #define RCTRL_FILREN 0x00001000 | 282 | #define RCTRL_FILREN 0x00001000 |
280 | #define RCTRL_GHTX 0x00000400 | 283 | #define RCTRL_GHTX 0x00000400 |
@@ -851,7 +854,32 @@ struct gfar { | |||
851 | u8 res23c[248]; | 854 | u8 res23c[248]; |
852 | u32 attr; /* 0x.bf8 - Attributes Register */ | 855 | u32 attr; /* 0x.bf8 - Attributes Register */ |
853 | u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ | 856 | u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ |
854 | u8 res24[688]; | 857 | u32 rqprm0; /* 0x.c00 - Receive queue parameters register 0 */ |
858 | u32 rqprm1; /* 0x.c04 - Receive queue parameters register 1 */ | ||
859 | u32 rqprm2; /* 0x.c08 - Receive queue parameters register 2 */ | ||
860 | u32 rqprm3; /* 0x.c0c - Receive queue parameters register 3 */ | ||
861 | u32 rqprm4; /* 0x.c10 - Receive queue parameters register 4 */ | ||
862 | u32 rqprm5; /* 0x.c14 - Receive queue parameters register 5 */ | ||
863 | u32 rqprm6; /* 0x.c18 - Receive queue parameters register 6 */ | ||
864 | u32 rqprm7; /* 0x.c1c - Receive queue parameters register 7 */ | ||
865 | u8 res24[36]; | ||
866 | u32 rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */ | ||
867 | u8 res24a[4]; | ||
868 | u32 rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */ | ||
869 | u8 res24b[4]; | ||
870 | u32 rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */ | ||
871 | u8 res24c[4]; | ||
872 | u32 rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */ | ||
873 | u8 res24d[4]; | ||
874 | u32 rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */ | ||
875 | u8 res24e[4]; | ||
876 | u32 rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */ | ||
877 | u8 res24f[4]; | ||
878 | u32 rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */ | ||
879 | u8 res24g[4]; | ||
880 | u32 rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */ | ||
881 | u8 res24h[4]; | ||
882 | u8 res24x[556]; | ||
855 | u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */ | 883 | u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */ |
856 | u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */ | 884 | u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */ |
857 | u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */ | 885 | u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */ |
@@ -1011,6 +1039,7 @@ struct gfar_priv_rx_q { | |||
1011 | /* RX Coalescing values */ | 1039 | /* RX Coalescing values */ |
1012 | unsigned char rxcoalescing; | 1040 | unsigned char rxcoalescing; |
1013 | unsigned long rxic; | 1041 | unsigned long rxic; |
1042 | u32 *rfbptr; | ||
1014 | }; | 1043 | }; |
1015 | 1044 | ||
1016 | enum gfar_irqinfo_id { | 1045 | enum gfar_irqinfo_id { |
@@ -1101,6 +1130,7 @@ struct gfar_private { | |||
1101 | unsigned int num_tx_queues; | 1130 | unsigned int num_tx_queues; |
1102 | unsigned int num_rx_queues; | 1131 | unsigned int num_rx_queues; |
1103 | unsigned int num_grps; | 1132 | unsigned int num_grps; |
1133 | int tx_actual_en; | ||
1104 | 1134 | ||
1105 | /* Network Statistics */ | 1135 | /* Network Statistics */ |
1106 | struct gfar_extra_stats extra_stats; | 1136 | struct gfar_extra_stats extra_stats; |
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 76d70708f864..3e1a9c1a67a9 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c | |||
@@ -579,8 +579,13 @@ static int gfar_spauseparam(struct net_device *dev, | |||
579 | u32 tempval; | 579 | u32 tempval; |
580 | tempval = gfar_read(®s->maccfg1); | 580 | tempval = gfar_read(®s->maccfg1); |
581 | tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); | 581 | tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
582 | if (priv->tx_pause_en) | 582 | |
583 | priv->tx_actual_en = 0; | ||
584 | if (priv->tx_pause_en) { | ||
585 | priv->tx_actual_en = 1; | ||
583 | tempval |= MACCFG1_TX_FLOW; | 586 | tempval |= MACCFG1_TX_FLOW; |
587 | } | ||
588 | |||
584 | if (priv->rx_pause_en) | 589 | if (priv->rx_pause_en) |
585 | tempval |= MACCFG1_RX_FLOW; | 590 | tempval |= MACCFG1_RX_FLOW; |
586 | gfar_write(®s->maccfg1, tempval); | 591 | gfar_write(®s->maccfg1, tempval); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 15f289f2917f..a65bc4398971 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | |||
@@ -33,8 +33,8 @@ | |||
33 | * This file needs to comply with the Linux Kernel coding style. | 33 | * This file needs to comply with the Linux Kernel coding style. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #define I40E_FW_API_VERSION_MAJOR 0x0001 | 36 | #define I40E_FW_API_VERSION_MAJOR 0x0001 |
37 | #define I40E_FW_API_VERSION_MINOR 0x0002 | 37 | #define I40E_FW_API_VERSION_MINOR 0x0002 |
38 | 38 | ||
39 | struct i40e_aq_desc { | 39 | struct i40e_aq_desc { |
40 | __le16 flags; | 40 | __le16 flags; |
@@ -66,216 +66,216 @@ struct i40e_aq_desc { | |||
66 | */ | 66 | */ |
67 | 67 | ||
68 | /* command flags and offsets*/ | 68 | /* command flags and offsets*/ |
69 | #define I40E_AQ_FLAG_DD_SHIFT 0 | 69 | #define I40E_AQ_FLAG_DD_SHIFT 0 |
70 | #define I40E_AQ_FLAG_CMP_SHIFT 1 | 70 | #define I40E_AQ_FLAG_CMP_SHIFT 1 |
71 | #define I40E_AQ_FLAG_ERR_SHIFT 2 | 71 | #define I40E_AQ_FLAG_ERR_SHIFT 2 |
72 | #define I40E_AQ_FLAG_VFE_SHIFT 3 | 72 | #define I40E_AQ_FLAG_VFE_SHIFT 3 |
73 | #define I40E_AQ_FLAG_LB_SHIFT 9 | 73 | #define I40E_AQ_FLAG_LB_SHIFT 9 |
74 | #define I40E_AQ_FLAG_RD_SHIFT 10 | 74 | #define I40E_AQ_FLAG_RD_SHIFT 10 |
75 | #define I40E_AQ_FLAG_VFC_SHIFT 11 | 75 | #define I40E_AQ_FLAG_VFC_SHIFT 11 |
76 | #define I40E_AQ_FLAG_BUF_SHIFT 12 | 76 | #define I40E_AQ_FLAG_BUF_SHIFT 12 |
77 | #define I40E_AQ_FLAG_SI_SHIFT 13 | 77 | #define I40E_AQ_FLAG_SI_SHIFT 13 |
78 | #define I40E_AQ_FLAG_EI_SHIFT 14 | 78 | #define I40E_AQ_FLAG_EI_SHIFT 14 |
79 | #define I40E_AQ_FLAG_FE_SHIFT 15 | 79 | #define I40E_AQ_FLAG_FE_SHIFT 15 |
80 | 80 | ||
81 | #define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ | 81 | #define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ |
82 | #define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ | 82 | #define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ |
83 | #define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ | 83 | #define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ |
84 | #define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ | 84 | #define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ |
85 | #define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ | 85 | #define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ |
86 | #define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ | 86 | #define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ |
87 | #define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ | 87 | #define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ |
88 | #define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ | 88 | #define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ |
89 | #define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ | 89 | #define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ |
90 | #define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ | 90 | #define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ |
91 | #define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ | 91 | #define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ |
92 | 92 | ||
93 | /* error codes */ | 93 | /* error codes */ |
94 | enum i40e_admin_queue_err { | 94 | enum i40e_admin_queue_err { |
95 | I40E_AQ_RC_OK = 0, /* success */ | 95 | I40E_AQ_RC_OK = 0, /* success */ |
96 | I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ | 96 | I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ |
97 | I40E_AQ_RC_ENOENT = 2, /* No such element */ | 97 | I40E_AQ_RC_ENOENT = 2, /* No such element */ |
98 | I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ | 98 | I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ |
99 | I40E_AQ_RC_EINTR = 4, /* operation interrupted */ | 99 | I40E_AQ_RC_EINTR = 4, /* operation interrupted */ |
100 | I40E_AQ_RC_EIO = 5, /* I/O error */ | 100 | I40E_AQ_RC_EIO = 5, /* I/O error */ |
101 | I40E_AQ_RC_ENXIO = 6, /* No such resource */ | 101 | I40E_AQ_RC_ENXIO = 6, /* No such resource */ |
102 | I40E_AQ_RC_E2BIG = 7, /* Arg too long */ | 102 | I40E_AQ_RC_E2BIG = 7, /* Arg too long */ |
103 | I40E_AQ_RC_EAGAIN = 8, /* Try again */ | 103 | I40E_AQ_RC_EAGAIN = 8, /* Try again */ |
104 | I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ | 104 | I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ |
105 | I40E_AQ_RC_EACCES = 10, /* Permission denied */ | 105 | I40E_AQ_RC_EACCES = 10, /* Permission denied */ |
106 | I40E_AQ_RC_EFAULT = 11, /* Bad address */ | 106 | I40E_AQ_RC_EFAULT = 11, /* Bad address */ |
107 | I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ | 107 | I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ |
108 | I40E_AQ_RC_EEXIST = 13, /* object already exists */ | 108 | I40E_AQ_RC_EEXIST = 13, /* object already exists */ |
109 | I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ | 109 | I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ |
110 | I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ | 110 | I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ |
111 | I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ | 111 | I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ |
112 | I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ | 112 | I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ |
113 | I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ | 113 | I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ |
114 | I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ | 114 | I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ |
115 | I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ | 115 | I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ |
116 | I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ | 116 | I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ |
117 | I40E_AQ_RC_EFBIG = 22, /* File too large */ | 117 | I40E_AQ_RC_EFBIG = 22, /* File too large */ |
118 | }; | 118 | }; |
119 | 119 | ||
120 | /* Admin Queue command opcodes */ | 120 | /* Admin Queue command opcodes */ |
121 | enum i40e_admin_queue_opc { | 121 | enum i40e_admin_queue_opc { |
122 | /* aq commands */ | 122 | /* aq commands */ |
123 | i40e_aqc_opc_get_version = 0x0001, | 123 | i40e_aqc_opc_get_version = 0x0001, |
124 | i40e_aqc_opc_driver_version = 0x0002, | 124 | i40e_aqc_opc_driver_version = 0x0002, |
125 | i40e_aqc_opc_queue_shutdown = 0x0003, | 125 | i40e_aqc_opc_queue_shutdown = 0x0003, |
126 | i40e_aqc_opc_set_pf_context = 0x0004, | 126 | i40e_aqc_opc_set_pf_context = 0x0004, |
127 | 127 | ||
128 | /* resource ownership */ | 128 | /* resource ownership */ |
129 | i40e_aqc_opc_request_resource = 0x0008, | 129 | i40e_aqc_opc_request_resource = 0x0008, |
130 | i40e_aqc_opc_release_resource = 0x0009, | 130 | i40e_aqc_opc_release_resource = 0x0009, |
131 | 131 | ||
132 | i40e_aqc_opc_list_func_capabilities = 0x000A, | 132 | i40e_aqc_opc_list_func_capabilities = 0x000A, |
133 | i40e_aqc_opc_list_dev_capabilities = 0x000B, | 133 | i40e_aqc_opc_list_dev_capabilities = 0x000B, |
134 | 134 | ||
135 | i40e_aqc_opc_set_cppm_configuration = 0x0103, | 135 | i40e_aqc_opc_set_cppm_configuration = 0x0103, |
136 | i40e_aqc_opc_set_arp_proxy_entry = 0x0104, | 136 | i40e_aqc_opc_set_arp_proxy_entry = 0x0104, |
137 | i40e_aqc_opc_set_ns_proxy_entry = 0x0105, | 137 | i40e_aqc_opc_set_ns_proxy_entry = 0x0105, |
138 | 138 | ||
139 | /* LAA */ | 139 | /* LAA */ |
140 | i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ | 140 | i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ |
141 | i40e_aqc_opc_mac_address_read = 0x0107, | 141 | i40e_aqc_opc_mac_address_read = 0x0107, |
142 | i40e_aqc_opc_mac_address_write = 0x0108, | 142 | i40e_aqc_opc_mac_address_write = 0x0108, |
143 | 143 | ||
144 | /* PXE */ | 144 | /* PXE */ |
145 | i40e_aqc_opc_clear_pxe_mode = 0x0110, | 145 | i40e_aqc_opc_clear_pxe_mode = 0x0110, |
146 | 146 | ||
147 | /* internal switch commands */ | 147 | /* internal switch commands */ |
148 | i40e_aqc_opc_get_switch_config = 0x0200, | 148 | i40e_aqc_opc_get_switch_config = 0x0200, |
149 | i40e_aqc_opc_add_statistics = 0x0201, | 149 | i40e_aqc_opc_add_statistics = 0x0201, |
150 | i40e_aqc_opc_remove_statistics = 0x0202, | 150 | i40e_aqc_opc_remove_statistics = 0x0202, |
151 | i40e_aqc_opc_set_port_parameters = 0x0203, | 151 | i40e_aqc_opc_set_port_parameters = 0x0203, |
152 | i40e_aqc_opc_get_switch_resource_alloc = 0x0204, | 152 | i40e_aqc_opc_get_switch_resource_alloc = 0x0204, |
153 | 153 | ||
154 | i40e_aqc_opc_add_vsi = 0x0210, | 154 | i40e_aqc_opc_add_vsi = 0x0210, |
155 | i40e_aqc_opc_update_vsi_parameters = 0x0211, | 155 | i40e_aqc_opc_update_vsi_parameters = 0x0211, |
156 | i40e_aqc_opc_get_vsi_parameters = 0x0212, | 156 | i40e_aqc_opc_get_vsi_parameters = 0x0212, |
157 | 157 | ||
158 | i40e_aqc_opc_add_pv = 0x0220, | 158 | i40e_aqc_opc_add_pv = 0x0220, |
159 | i40e_aqc_opc_update_pv_parameters = 0x0221, | 159 | i40e_aqc_opc_update_pv_parameters = 0x0221, |
160 | i40e_aqc_opc_get_pv_parameters = 0x0222, | 160 | i40e_aqc_opc_get_pv_parameters = 0x0222, |
161 | 161 | ||
162 | i40e_aqc_opc_add_veb = 0x0230, | 162 | i40e_aqc_opc_add_veb = 0x0230, |
163 | i40e_aqc_opc_update_veb_parameters = 0x0231, | 163 | i40e_aqc_opc_update_veb_parameters = 0x0231, |
164 | i40e_aqc_opc_get_veb_parameters = 0x0232, | 164 | i40e_aqc_opc_get_veb_parameters = 0x0232, |
165 | 165 | ||
166 | i40e_aqc_opc_delete_element = 0x0243, | 166 | i40e_aqc_opc_delete_element = 0x0243, |
167 | 167 | ||
168 | i40e_aqc_opc_add_macvlan = 0x0250, | 168 | i40e_aqc_opc_add_macvlan = 0x0250, |
169 | i40e_aqc_opc_remove_macvlan = 0x0251, | 169 | i40e_aqc_opc_remove_macvlan = 0x0251, |
170 | i40e_aqc_opc_add_vlan = 0x0252, | 170 | i40e_aqc_opc_add_vlan = 0x0252, |
171 | i40e_aqc_opc_remove_vlan = 0x0253, | 171 | i40e_aqc_opc_remove_vlan = 0x0253, |
172 | i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, | 172 | i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, |
173 | i40e_aqc_opc_add_tag = 0x0255, | 173 | i40e_aqc_opc_add_tag = 0x0255, |
174 | i40e_aqc_opc_remove_tag = 0x0256, | 174 | i40e_aqc_opc_remove_tag = 0x0256, |
175 | i40e_aqc_opc_add_multicast_etag = 0x0257, | 175 | i40e_aqc_opc_add_multicast_etag = 0x0257, |
176 | i40e_aqc_opc_remove_multicast_etag = 0x0258, | 176 | i40e_aqc_opc_remove_multicast_etag = 0x0258, |
177 | i40e_aqc_opc_update_tag = 0x0259, | 177 | i40e_aqc_opc_update_tag = 0x0259, |
178 | i40e_aqc_opc_add_control_packet_filter = 0x025A, | 178 | i40e_aqc_opc_add_control_packet_filter = 0x025A, |
179 | i40e_aqc_opc_remove_control_packet_filter = 0x025B, | 179 | i40e_aqc_opc_remove_control_packet_filter = 0x025B, |
180 | i40e_aqc_opc_add_cloud_filters = 0x025C, | 180 | i40e_aqc_opc_add_cloud_filters = 0x025C, |
181 | i40e_aqc_opc_remove_cloud_filters = 0x025D, | 181 | i40e_aqc_opc_remove_cloud_filters = 0x025D, |
182 | 182 | ||
183 | i40e_aqc_opc_add_mirror_rule = 0x0260, | 183 | i40e_aqc_opc_add_mirror_rule = 0x0260, |
184 | i40e_aqc_opc_delete_mirror_rule = 0x0261, | 184 | i40e_aqc_opc_delete_mirror_rule = 0x0261, |
185 | 185 | ||
186 | /* DCB commands */ | 186 | /* DCB commands */ |
187 | i40e_aqc_opc_dcb_ignore_pfc = 0x0301, | 187 | i40e_aqc_opc_dcb_ignore_pfc = 0x0301, |
188 | i40e_aqc_opc_dcb_updated = 0x0302, | 188 | i40e_aqc_opc_dcb_updated = 0x0302, |
189 | 189 | ||
190 | /* TX scheduler */ | 190 | /* TX scheduler */ |
191 | i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, | 191 | i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, |
192 | i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, | 192 | i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, |
193 | i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, | 193 | i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, |
194 | i40e_aqc_opc_query_vsi_bw_config = 0x0408, | 194 | i40e_aqc_opc_query_vsi_bw_config = 0x0408, |
195 | i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, | 195 | i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, |
196 | i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, | 196 | i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, |
197 | 197 | ||
198 | i40e_aqc_opc_enable_switching_comp_ets = 0x0413, | 198 | i40e_aqc_opc_enable_switching_comp_ets = 0x0413, |
199 | i40e_aqc_opc_modify_switching_comp_ets = 0x0414, | 199 | i40e_aqc_opc_modify_switching_comp_ets = 0x0414, |
200 | i40e_aqc_opc_disable_switching_comp_ets = 0x0415, | 200 | i40e_aqc_opc_disable_switching_comp_ets = 0x0415, |
201 | i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, | 201 | i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, |
202 | i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, | 202 | i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, |
203 | i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, | 203 | i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, |
204 | i40e_aqc_opc_query_port_ets_config = 0x0419, | 204 | i40e_aqc_opc_query_port_ets_config = 0x0419, |
205 | i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, | 205 | i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, |
206 | i40e_aqc_opc_suspend_port_tx = 0x041B, | 206 | i40e_aqc_opc_suspend_port_tx = 0x041B, |
207 | i40e_aqc_opc_resume_port_tx = 0x041C, | 207 | i40e_aqc_opc_resume_port_tx = 0x041C, |
208 | i40e_aqc_opc_configure_partition_bw = 0x041D, | 208 | i40e_aqc_opc_configure_partition_bw = 0x041D, |
209 | 209 | ||
210 | /* hmc */ | 210 | /* hmc */ |
211 | i40e_aqc_opc_query_hmc_resource_profile = 0x0500, | 211 | i40e_aqc_opc_query_hmc_resource_profile = 0x0500, |
212 | i40e_aqc_opc_set_hmc_resource_profile = 0x0501, | 212 | i40e_aqc_opc_set_hmc_resource_profile = 0x0501, |
213 | 213 | ||
214 | /* phy commands*/ | 214 | /* phy commands*/ |
215 | i40e_aqc_opc_get_phy_abilities = 0x0600, | 215 | i40e_aqc_opc_get_phy_abilities = 0x0600, |
216 | i40e_aqc_opc_set_phy_config = 0x0601, | 216 | i40e_aqc_opc_set_phy_config = 0x0601, |
217 | i40e_aqc_opc_set_mac_config = 0x0603, | 217 | i40e_aqc_opc_set_mac_config = 0x0603, |
218 | i40e_aqc_opc_set_link_restart_an = 0x0605, | 218 | i40e_aqc_opc_set_link_restart_an = 0x0605, |
219 | i40e_aqc_opc_get_link_status = 0x0607, | 219 | i40e_aqc_opc_get_link_status = 0x0607, |
220 | i40e_aqc_opc_set_phy_int_mask = 0x0613, | 220 | i40e_aqc_opc_set_phy_int_mask = 0x0613, |
221 | i40e_aqc_opc_get_local_advt_reg = 0x0614, | 221 | i40e_aqc_opc_get_local_advt_reg = 0x0614, |
222 | i40e_aqc_opc_set_local_advt_reg = 0x0615, | 222 | i40e_aqc_opc_set_local_advt_reg = 0x0615, |
223 | i40e_aqc_opc_get_partner_advt = 0x0616, | 223 | i40e_aqc_opc_get_partner_advt = 0x0616, |
224 | i40e_aqc_opc_set_lb_modes = 0x0618, | 224 | i40e_aqc_opc_set_lb_modes = 0x0618, |
225 | i40e_aqc_opc_get_phy_wol_caps = 0x0621, | 225 | i40e_aqc_opc_get_phy_wol_caps = 0x0621, |
226 | i40e_aqc_opc_set_phy_debug = 0x0622, | 226 | i40e_aqc_opc_set_phy_debug = 0x0622, |
227 | i40e_aqc_opc_upload_ext_phy_fm = 0x0625, | 227 | i40e_aqc_opc_upload_ext_phy_fm = 0x0625, |
228 | 228 | ||
229 | /* NVM commands */ | 229 | /* NVM commands */ |
230 | i40e_aqc_opc_nvm_read = 0x0701, | 230 | i40e_aqc_opc_nvm_read = 0x0701, |
231 | i40e_aqc_opc_nvm_erase = 0x0702, | 231 | i40e_aqc_opc_nvm_erase = 0x0702, |
232 | i40e_aqc_opc_nvm_update = 0x0703, | 232 | i40e_aqc_opc_nvm_update = 0x0703, |
233 | i40e_aqc_opc_nvm_config_read = 0x0704, | 233 | i40e_aqc_opc_nvm_config_read = 0x0704, |
234 | i40e_aqc_opc_nvm_config_write = 0x0705, | 234 | i40e_aqc_opc_nvm_config_write = 0x0705, |
235 | 235 | ||
236 | /* virtualization commands */ | 236 | /* virtualization commands */ |
237 | i40e_aqc_opc_send_msg_to_pf = 0x0801, | 237 | i40e_aqc_opc_send_msg_to_pf = 0x0801, |
238 | i40e_aqc_opc_send_msg_to_vf = 0x0802, | 238 | i40e_aqc_opc_send_msg_to_vf = 0x0802, |
239 | i40e_aqc_opc_send_msg_to_peer = 0x0803, | 239 | i40e_aqc_opc_send_msg_to_peer = 0x0803, |
240 | 240 | ||
241 | /* alternate structure */ | 241 | /* alternate structure */ |
242 | i40e_aqc_opc_alternate_write = 0x0900, | 242 | i40e_aqc_opc_alternate_write = 0x0900, |
243 | i40e_aqc_opc_alternate_write_indirect = 0x0901, | 243 | i40e_aqc_opc_alternate_write_indirect = 0x0901, |
244 | i40e_aqc_opc_alternate_read = 0x0902, | 244 | i40e_aqc_opc_alternate_read = 0x0902, |
245 | i40e_aqc_opc_alternate_read_indirect = 0x0903, | 245 | i40e_aqc_opc_alternate_read_indirect = 0x0903, |
246 | i40e_aqc_opc_alternate_write_done = 0x0904, | 246 | i40e_aqc_opc_alternate_write_done = 0x0904, |
247 | i40e_aqc_opc_alternate_set_mode = 0x0905, | 247 | i40e_aqc_opc_alternate_set_mode = 0x0905, |
248 | i40e_aqc_opc_alternate_clear_port = 0x0906, | 248 | i40e_aqc_opc_alternate_clear_port = 0x0906, |
249 | 249 | ||
250 | /* LLDP commands */ | 250 | /* LLDP commands */ |
251 | i40e_aqc_opc_lldp_get_mib = 0x0A00, | 251 | i40e_aqc_opc_lldp_get_mib = 0x0A00, |
252 | i40e_aqc_opc_lldp_update_mib = 0x0A01, | 252 | i40e_aqc_opc_lldp_update_mib = 0x0A01, |
253 | i40e_aqc_opc_lldp_add_tlv = 0x0A02, | 253 | i40e_aqc_opc_lldp_add_tlv = 0x0A02, |
254 | i40e_aqc_opc_lldp_update_tlv = 0x0A03, | 254 | i40e_aqc_opc_lldp_update_tlv = 0x0A03, |
255 | i40e_aqc_opc_lldp_delete_tlv = 0x0A04, | 255 | i40e_aqc_opc_lldp_delete_tlv = 0x0A04, |
256 | i40e_aqc_opc_lldp_stop = 0x0A05, | 256 | i40e_aqc_opc_lldp_stop = 0x0A05, |
257 | i40e_aqc_opc_lldp_start = 0x0A06, | 257 | i40e_aqc_opc_lldp_start = 0x0A06, |
258 | 258 | ||
259 | /* Tunnel commands */ | 259 | /* Tunnel commands */ |
260 | i40e_aqc_opc_add_udp_tunnel = 0x0B00, | 260 | i40e_aqc_opc_add_udp_tunnel = 0x0B00, |
261 | i40e_aqc_opc_del_udp_tunnel = 0x0B01, | 261 | i40e_aqc_opc_del_udp_tunnel = 0x0B01, |
262 | i40e_aqc_opc_tunnel_key_structure = 0x0B10, | 262 | i40e_aqc_opc_tunnel_key_structure = 0x0B10, |
263 | 263 | ||
264 | /* Async Events */ | 264 | /* Async Events */ |
265 | i40e_aqc_opc_event_lan_overflow = 0x1001, | 265 | i40e_aqc_opc_event_lan_overflow = 0x1001, |
266 | 266 | ||
267 | /* OEM commands */ | 267 | /* OEM commands */ |
268 | i40e_aqc_opc_oem_parameter_change = 0xFE00, | 268 | i40e_aqc_opc_oem_parameter_change = 0xFE00, |
269 | i40e_aqc_opc_oem_device_status_change = 0xFE01, | 269 | i40e_aqc_opc_oem_device_status_change = 0xFE01, |
270 | 270 | ||
271 | /* debug commands */ | 271 | /* debug commands */ |
272 | i40e_aqc_opc_debug_get_deviceid = 0xFF00, | 272 | i40e_aqc_opc_debug_get_deviceid = 0xFF00, |
273 | i40e_aqc_opc_debug_set_mode = 0xFF01, | 273 | i40e_aqc_opc_debug_set_mode = 0xFF01, |
274 | i40e_aqc_opc_debug_read_reg = 0xFF03, | 274 | i40e_aqc_opc_debug_read_reg = 0xFF03, |
275 | i40e_aqc_opc_debug_write_reg = 0xFF04, | 275 | i40e_aqc_opc_debug_write_reg = 0xFF04, |
276 | i40e_aqc_opc_debug_modify_reg = 0xFF07, | 276 | i40e_aqc_opc_debug_modify_reg = 0xFF07, |
277 | i40e_aqc_opc_debug_dump_internals = 0xFF08, | 277 | i40e_aqc_opc_debug_dump_internals = 0xFF08, |
278 | i40e_aqc_opc_debug_modify_internals = 0xFF09, | 278 | i40e_aqc_opc_debug_modify_internals = 0xFF09, |
279 | }; | 279 | }; |
280 | 280 | ||
281 | /* command structures and indirect data structures */ | 281 | /* command structures and indirect data structures */ |
@@ -302,7 +302,7 @@ enum i40e_admin_queue_opc { | |||
302 | /* This macro is used extensively to ensure that command structures are 16 | 302 | /* This macro is used extensively to ensure that command structures are 16 |
303 | * bytes in length as they have to map to the raw array of that size. | 303 | * bytes in length as they have to map to the raw array of that size. |
304 | */ | 304 | */ |
305 | #define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) | 305 | #define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) |
306 | 306 | ||
307 | /* internal (0x00XX) commands */ | 307 | /* internal (0x00XX) commands */ |
308 | 308 | ||
@@ -320,22 +320,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); | |||
320 | 320 | ||
321 | /* Send driver version (indirect 0x0002) */ | 321 | /* Send driver version (indirect 0x0002) */ |
322 | struct i40e_aqc_driver_version { | 322 | struct i40e_aqc_driver_version { |
323 | u8 driver_major_ver; | 323 | u8 driver_major_ver; |
324 | u8 driver_minor_ver; | 324 | u8 driver_minor_ver; |
325 | u8 driver_build_ver; | 325 | u8 driver_build_ver; |
326 | u8 driver_subbuild_ver; | 326 | u8 driver_subbuild_ver; |
327 | u8 reserved[4]; | 327 | u8 reserved[4]; |
328 | __le32 address_high; | 328 | __le32 address_high; |
329 | __le32 address_low; | 329 | __le32 address_low; |
330 | }; | 330 | }; |
331 | 331 | ||
332 | I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); | 332 | I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); |
333 | 333 | ||
334 | /* Queue Shutdown (direct 0x0003) */ | 334 | /* Queue Shutdown (direct 0x0003) */ |
335 | struct i40e_aqc_queue_shutdown { | 335 | struct i40e_aqc_queue_shutdown { |
336 | __le32 driver_unloading; | 336 | __le32 driver_unloading; |
337 | #define I40E_AQ_DRIVER_UNLOADING 0x1 | 337 | #define I40E_AQ_DRIVER_UNLOADING 0x1 |
338 | u8 reserved[12]; | 338 | u8 reserved[12]; |
339 | }; | 339 | }; |
340 | 340 | ||
341 | I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); | 341 | I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); |
@@ -351,19 +351,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); | |||
351 | /* Request resource ownership (direct 0x0008) | 351 | /* Request resource ownership (direct 0x0008) |
352 | * Release resource ownership (direct 0x0009) | 352 | * Release resource ownership (direct 0x0009) |
353 | */ | 353 | */ |
354 | #define I40E_AQ_RESOURCE_NVM 1 | 354 | #define I40E_AQ_RESOURCE_NVM 1 |
355 | #define I40E_AQ_RESOURCE_SDP 2 | 355 | #define I40E_AQ_RESOURCE_SDP 2 |
356 | #define I40E_AQ_RESOURCE_ACCESS_READ 1 | 356 | #define I40E_AQ_RESOURCE_ACCESS_READ 1 |
357 | #define I40E_AQ_RESOURCE_ACCESS_WRITE 2 | 357 | #define I40E_AQ_RESOURCE_ACCESS_WRITE 2 |
358 | #define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 | 358 | #define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 |
359 | #define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 | 359 | #define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 |
360 | 360 | ||
361 | struct i40e_aqc_request_resource { | 361 | struct i40e_aqc_request_resource { |
362 | __le16 resource_id; | 362 | __le16 resource_id; |
363 | __le16 access_type; | 363 | __le16 access_type; |
364 | __le32 timeout; | 364 | __le32 timeout; |
365 | __le32 resource_number; | 365 | __le32 resource_number; |
366 | u8 reserved[4]; | 366 | u8 reserved[4]; |
367 | }; | 367 | }; |
368 | 368 | ||
369 | I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); | 369 | I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); |
@@ -373,7 +373,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); | |||
373 | */ | 373 | */ |
374 | struct i40e_aqc_list_capabilites { | 374 | struct i40e_aqc_list_capabilites { |
375 | u8 command_flags; | 375 | u8 command_flags; |
376 | #define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 | 376 | #define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 |
377 | u8 pf_index; | 377 | u8 pf_index; |
378 | u8 reserved[2]; | 378 | u8 reserved[2]; |
379 | __le32 count; | 379 | __le32 count; |
@@ -384,123 +384,123 @@ struct i40e_aqc_list_capabilites { | |||
384 | I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); | 384 | I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); |
385 | 385 | ||
386 | struct i40e_aqc_list_capabilities_element_resp { | 386 | struct i40e_aqc_list_capabilities_element_resp { |
387 | __le16 id; | 387 | __le16 id; |
388 | u8 major_rev; | 388 | u8 major_rev; |
389 | u8 minor_rev; | 389 | u8 minor_rev; |
390 | __le32 number; | 390 | __le32 number; |
391 | __le32 logical_id; | 391 | __le32 logical_id; |
392 | __le32 phys_id; | 392 | __le32 phys_id; |
393 | u8 reserved[16]; | 393 | u8 reserved[16]; |
394 | }; | 394 | }; |
395 | 395 | ||
396 | /* list of caps */ | 396 | /* list of caps */ |
397 | 397 | ||
398 | #define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 | 398 | #define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 |
399 | #define I40E_AQ_CAP_ID_MNG_MODE 0x0002 | 399 | #define I40E_AQ_CAP_ID_MNG_MODE 0x0002 |
400 | #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 | 400 | #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 |
401 | #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 | 401 | #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 |
402 | #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 | 402 | #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 |
403 | #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 | 403 | #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 |
404 | #define I40E_AQ_CAP_ID_SRIOV 0x0012 | 404 | #define I40E_AQ_CAP_ID_SRIOV 0x0012 |
405 | #define I40E_AQ_CAP_ID_VF 0x0013 | 405 | #define I40E_AQ_CAP_ID_VF 0x0013 |
406 | #define I40E_AQ_CAP_ID_VMDQ 0x0014 | 406 | #define I40E_AQ_CAP_ID_VMDQ 0x0014 |
407 | #define I40E_AQ_CAP_ID_8021QBG 0x0015 | 407 | #define I40E_AQ_CAP_ID_8021QBG 0x0015 |
408 | #define I40E_AQ_CAP_ID_8021QBR 0x0016 | 408 | #define I40E_AQ_CAP_ID_8021QBR 0x0016 |
409 | #define I40E_AQ_CAP_ID_VSI 0x0017 | 409 | #define I40E_AQ_CAP_ID_VSI 0x0017 |
410 | #define I40E_AQ_CAP_ID_DCB 0x0018 | 410 | #define I40E_AQ_CAP_ID_DCB 0x0018 |
411 | #define I40E_AQ_CAP_ID_FCOE 0x0021 | 411 | #define I40E_AQ_CAP_ID_FCOE 0x0021 |
412 | #define I40E_AQ_CAP_ID_RSS 0x0040 | 412 | #define I40E_AQ_CAP_ID_RSS 0x0040 |
413 | #define I40E_AQ_CAP_ID_RXQ 0x0041 | 413 | #define I40E_AQ_CAP_ID_RXQ 0x0041 |
414 | #define I40E_AQ_CAP_ID_TXQ 0x0042 | 414 | #define I40E_AQ_CAP_ID_TXQ 0x0042 |
415 | #define I40E_AQ_CAP_ID_MSIX 0x0043 | 415 | #define I40E_AQ_CAP_ID_MSIX 0x0043 |
416 | #define I40E_AQ_CAP_ID_VF_MSIX 0x0044 | 416 | #define I40E_AQ_CAP_ID_VF_MSIX 0x0044 |
417 | #define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 | 417 | #define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 |
418 | #define I40E_AQ_CAP_ID_1588 0x0046 | 418 | #define I40E_AQ_CAP_ID_1588 0x0046 |
419 | #define I40E_AQ_CAP_ID_IWARP 0x0051 | 419 | #define I40E_AQ_CAP_ID_IWARP 0x0051 |
420 | #define I40E_AQ_CAP_ID_LED 0x0061 | 420 | #define I40E_AQ_CAP_ID_LED 0x0061 |
421 | #define I40E_AQ_CAP_ID_SDP 0x0062 | 421 | #define I40E_AQ_CAP_ID_SDP 0x0062 |
422 | #define I40E_AQ_CAP_ID_MDIO 0x0063 | 422 | #define I40E_AQ_CAP_ID_MDIO 0x0063 |
423 | #define I40E_AQ_CAP_ID_FLEX10 0x00F1 | 423 | #define I40E_AQ_CAP_ID_FLEX10 0x00F1 |
424 | #define I40E_AQ_CAP_ID_CEM 0x00F2 | 424 | #define I40E_AQ_CAP_ID_CEM 0x00F2 |
425 | 425 | ||
426 | /* Set CPPM Configuration (direct 0x0103) */ | 426 | /* Set CPPM Configuration (direct 0x0103) */ |
427 | struct i40e_aqc_cppm_configuration { | 427 | struct i40e_aqc_cppm_configuration { |
428 | __le16 command_flags; | 428 | __le16 command_flags; |
429 | #define I40E_AQ_CPPM_EN_LTRC 0x0800 | 429 | #define I40E_AQ_CPPM_EN_LTRC 0x0800 |
430 | #define I40E_AQ_CPPM_EN_DMCTH 0x1000 | 430 | #define I40E_AQ_CPPM_EN_DMCTH 0x1000 |
431 | #define I40E_AQ_CPPM_EN_DMCTLX 0x2000 | 431 | #define I40E_AQ_CPPM_EN_DMCTLX 0x2000 |
432 | #define I40E_AQ_CPPM_EN_HPTC 0x4000 | 432 | #define I40E_AQ_CPPM_EN_HPTC 0x4000 |
433 | #define I40E_AQ_CPPM_EN_DMARC 0x8000 | 433 | #define I40E_AQ_CPPM_EN_DMARC 0x8000 |
434 | __le16 ttlx; | 434 | __le16 ttlx; |
435 | __le32 dmacr; | 435 | __le32 dmacr; |
436 | __le16 dmcth; | 436 | __le16 dmcth; |
437 | u8 hptc; | 437 | u8 hptc; |
438 | u8 reserved; | 438 | u8 reserved; |
439 | __le32 pfltrc; | 439 | __le32 pfltrc; |
440 | }; | 440 | }; |
441 | 441 | ||
442 | I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); | 442 | I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); |
443 | 443 | ||
444 | /* Set ARP Proxy command / response (indirect 0x0104) */ | 444 | /* Set ARP Proxy command / response (indirect 0x0104) */ |
445 | struct i40e_aqc_arp_proxy_data { | 445 | struct i40e_aqc_arp_proxy_data { |
446 | __le16 command_flags; | 446 | __le16 command_flags; |
447 | #define I40E_AQ_ARP_INIT_IPV4 0x0008 | 447 | #define I40E_AQ_ARP_INIT_IPV4 0x0008 |
448 | #define I40E_AQ_ARP_UNSUP_CTL 0x0010 | 448 | #define I40E_AQ_ARP_UNSUP_CTL 0x0010 |
449 | #define I40E_AQ_ARP_ENA 0x0020 | 449 | #define I40E_AQ_ARP_ENA 0x0020 |
450 | #define I40E_AQ_ARP_ADD_IPV4 0x0040 | 450 | #define I40E_AQ_ARP_ADD_IPV4 0x0040 |
451 | #define I40E_AQ_ARP_DEL_IPV4 0x0080 | 451 | #define I40E_AQ_ARP_DEL_IPV4 0x0080 |
452 | __le16 table_id; | 452 | __le16 table_id; |
453 | __le32 pfpm_proxyfc; | 453 | __le32 pfpm_proxyfc; |
454 | __le32 ip_addr; | 454 | __le32 ip_addr; |
455 | u8 mac_addr[6]; | 455 | u8 mac_addr[6]; |
456 | }; | 456 | }; |
457 | 457 | ||
458 | /* Set NS Proxy Table Entry Command (indirect 0x0105) */ | 458 | /* Set NS Proxy Table Entry Command (indirect 0x0105) */ |
459 | struct i40e_aqc_ns_proxy_data { | 459 | struct i40e_aqc_ns_proxy_data { |
460 | __le16 table_idx_mac_addr_0; | 460 | __le16 table_idx_mac_addr_0; |
461 | __le16 table_idx_mac_addr_1; | 461 | __le16 table_idx_mac_addr_1; |
462 | __le16 table_idx_ipv6_0; | 462 | __le16 table_idx_ipv6_0; |
463 | __le16 table_idx_ipv6_1; | 463 | __le16 table_idx_ipv6_1; |
464 | __le16 control; | 464 | __le16 control; |
465 | #define I40E_AQ_NS_PROXY_ADD_0 0x0100 | 465 | #define I40E_AQ_NS_PROXY_ADD_0 0x0100 |
466 | #define I40E_AQ_NS_PROXY_DEL_0 0x0200 | 466 | #define I40E_AQ_NS_PROXY_DEL_0 0x0200 |
467 | #define I40E_AQ_NS_PROXY_ADD_1 0x0400 | 467 | #define I40E_AQ_NS_PROXY_ADD_1 0x0400 |
468 | #define I40E_AQ_NS_PROXY_DEL_1 0x0800 | 468 | #define I40E_AQ_NS_PROXY_DEL_1 0x0800 |
469 | #define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 | 469 | #define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 |
470 | #define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 | 470 | #define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 |
471 | #define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 | 471 | #define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 |
472 | #define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 | 472 | #define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 |
473 | #define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 | 473 | #define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 |
474 | #define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 | 474 | #define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 |
475 | #define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 | 475 | #define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 |
476 | u8 mac_addr_0[6]; | 476 | u8 mac_addr_0[6]; |
477 | u8 mac_addr_1[6]; | 477 | u8 mac_addr_1[6]; |
478 | u8 local_mac_addr[6]; | 478 | u8 local_mac_addr[6]; |
479 | u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ | 479 | u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ |
480 | u8 ipv6_addr_1[16]; | 480 | u8 ipv6_addr_1[16]; |
481 | }; | 481 | }; |
482 | 482 | ||
483 | /* Manage LAA Command (0x0106) - obsolete */ | 483 | /* Manage LAA Command (0x0106) - obsolete */ |
484 | struct i40e_aqc_mng_laa { | 484 | struct i40e_aqc_mng_laa { |
485 | __le16 command_flags; | 485 | __le16 command_flags; |
486 | #define I40E_AQ_LAA_FLAG_WR 0x8000 | 486 | #define I40E_AQ_LAA_FLAG_WR 0x8000 |
487 | u8 reserved[2]; | 487 | u8 reserved[2]; |
488 | __le32 sal; | 488 | __le32 sal; |
489 | __le16 sah; | 489 | __le16 sah; |
490 | u8 reserved2[6]; | 490 | u8 reserved2[6]; |
491 | }; | 491 | }; |
492 | 492 | ||
493 | /* Manage MAC Address Read Command (indirect 0x0107) */ | 493 | /* Manage MAC Address Read Command (indirect 0x0107) */ |
494 | struct i40e_aqc_mac_address_read { | 494 | struct i40e_aqc_mac_address_read { |
495 | __le16 command_flags; | 495 | __le16 command_flags; |
496 | #define I40E_AQC_LAN_ADDR_VALID 0x10 | 496 | #define I40E_AQC_LAN_ADDR_VALID 0x10 |
497 | #define I40E_AQC_SAN_ADDR_VALID 0x20 | 497 | #define I40E_AQC_SAN_ADDR_VALID 0x20 |
498 | #define I40E_AQC_PORT_ADDR_VALID 0x40 | 498 | #define I40E_AQC_PORT_ADDR_VALID 0x40 |
499 | #define I40E_AQC_WOL_ADDR_VALID 0x80 | 499 | #define I40E_AQC_WOL_ADDR_VALID 0x80 |
500 | #define I40E_AQC_ADDR_VALID_MASK 0xf0 | 500 | #define I40E_AQC_ADDR_VALID_MASK 0xf0 |
501 | u8 reserved[6]; | 501 | u8 reserved[6]; |
502 | __le32 addr_high; | 502 | __le32 addr_high; |
503 | __le32 addr_low; | 503 | __le32 addr_low; |
504 | }; | 504 | }; |
505 | 505 | ||
506 | I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); | 506 | I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); |
@@ -516,14 +516,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); | |||
516 | 516 | ||
517 | /* Manage MAC Address Write Command (0x0108) */ | 517 | /* Manage MAC Address Write Command (0x0108) */ |
518 | struct i40e_aqc_mac_address_write { | 518 | struct i40e_aqc_mac_address_write { |
519 | __le16 command_flags; | 519 | __le16 command_flags; |
520 | #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 | 520 | #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 |
521 | #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 | 521 | #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 |
522 | #define I40E_AQC_WRITE_TYPE_PORT 0x8000 | 522 | #define I40E_AQC_WRITE_TYPE_PORT 0x8000 |
523 | #define I40E_AQC_WRITE_TYPE_MASK 0xc000 | 523 | #define I40E_AQC_WRITE_TYPE_MASK 0xc000 |
524 | __le16 mac_sah; | 524 | __le16 mac_sah; |
525 | __le32 mac_sal; | 525 | __le32 mac_sal; |
526 | u8 reserved[8]; | 526 | u8 reserved[8]; |
527 | }; | 527 | }; |
528 | 528 | ||
529 | I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); | 529 | I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); |
@@ -544,10 +544,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); | |||
544 | * command | 544 | * command |
545 | */ | 545 | */ |
546 | struct i40e_aqc_switch_seid { | 546 | struct i40e_aqc_switch_seid { |
547 | __le16 seid; | 547 | __le16 seid; |
548 | u8 reserved[6]; | 548 | u8 reserved[6]; |
549 | __le32 addr_high; | 549 | __le32 addr_high; |
550 | __le32 addr_low; | 550 | __le32 addr_low; |
551 | }; | 551 | }; |
552 | 552 | ||
553 | I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); | 553 | I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); |
@@ -556,34 +556,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); | |||
556 | * uses i40e_aqc_switch_seid for the descriptor | 556 | * uses i40e_aqc_switch_seid for the descriptor |
557 | */ | 557 | */ |
558 | struct i40e_aqc_get_switch_config_header_resp { | 558 | struct i40e_aqc_get_switch_config_header_resp { |
559 | __le16 num_reported; | 559 | __le16 num_reported; |
560 | __le16 num_total; | 560 | __le16 num_total; |
561 | u8 reserved[12]; | 561 | u8 reserved[12]; |
562 | }; | 562 | }; |
563 | 563 | ||
564 | struct i40e_aqc_switch_config_element_resp { | 564 | struct i40e_aqc_switch_config_element_resp { |
565 | u8 element_type; | 565 | u8 element_type; |
566 | #define I40E_AQ_SW_ELEM_TYPE_MAC 1 | 566 | #define I40E_AQ_SW_ELEM_TYPE_MAC 1 |
567 | #define I40E_AQ_SW_ELEM_TYPE_PF 2 | 567 | #define I40E_AQ_SW_ELEM_TYPE_PF 2 |
568 | #define I40E_AQ_SW_ELEM_TYPE_VF 3 | 568 | #define I40E_AQ_SW_ELEM_TYPE_VF 3 |
569 | #define I40E_AQ_SW_ELEM_TYPE_EMP 4 | 569 | #define I40E_AQ_SW_ELEM_TYPE_EMP 4 |
570 | #define I40E_AQ_SW_ELEM_TYPE_BMC 5 | 570 | #define I40E_AQ_SW_ELEM_TYPE_BMC 5 |
571 | #define I40E_AQ_SW_ELEM_TYPE_PV 16 | 571 | #define I40E_AQ_SW_ELEM_TYPE_PV 16 |
572 | #define I40E_AQ_SW_ELEM_TYPE_VEB 17 | 572 | #define I40E_AQ_SW_ELEM_TYPE_VEB 17 |
573 | #define I40E_AQ_SW_ELEM_TYPE_PA 18 | 573 | #define I40E_AQ_SW_ELEM_TYPE_PA 18 |
574 | #define I40E_AQ_SW_ELEM_TYPE_VSI 19 | 574 | #define I40E_AQ_SW_ELEM_TYPE_VSI 19 |
575 | u8 revision; | 575 | u8 revision; |
576 | #define I40E_AQ_SW_ELEM_REV_1 1 | 576 | #define I40E_AQ_SW_ELEM_REV_1 1 |
577 | __le16 seid; | 577 | __le16 seid; |
578 | __le16 uplink_seid; | 578 | __le16 uplink_seid; |
579 | __le16 downlink_seid; | 579 | __le16 downlink_seid; |
580 | u8 reserved[3]; | 580 | u8 reserved[3]; |
581 | u8 connection_type; | 581 | u8 connection_type; |
582 | #define I40E_AQ_CONN_TYPE_REGULAR 0x1 | 582 | #define I40E_AQ_CONN_TYPE_REGULAR 0x1 |
583 | #define I40E_AQ_CONN_TYPE_DEFAULT 0x2 | 583 | #define I40E_AQ_CONN_TYPE_DEFAULT 0x2 |
584 | #define I40E_AQ_CONN_TYPE_CASCADED 0x3 | 584 | #define I40E_AQ_CONN_TYPE_CASCADED 0x3 |
585 | __le16 scheduler_id; | 585 | __le16 scheduler_id; |
586 | __le16 element_info; | 586 | __le16 element_info; |
587 | }; | 587 | }; |
588 | 588 | ||
589 | /* Get Switch Configuration (indirect 0x0200) | 589 | /* Get Switch Configuration (indirect 0x0200) |
@@ -591,73 +591,73 @@ struct i40e_aqc_switch_config_element_resp { | |||
591 | * the first in the array is the header, remainder are elements | 591 | * the first in the array is the header, remainder are elements |
592 | */ | 592 | */ |
593 | struct i40e_aqc_get_switch_config_resp { | 593 | struct i40e_aqc_get_switch_config_resp { |
594 | struct i40e_aqc_get_switch_config_header_resp header; | 594 | struct i40e_aqc_get_switch_config_header_resp header; |
595 | struct i40e_aqc_switch_config_element_resp element[1]; | 595 | struct i40e_aqc_switch_config_element_resp element[1]; |
596 | }; | 596 | }; |
597 | 597 | ||
598 | /* Add Statistics (direct 0x0201) | 598 | /* Add Statistics (direct 0x0201) |
599 | * Remove Statistics (direct 0x0202) | 599 | * Remove Statistics (direct 0x0202) |
600 | */ | 600 | */ |
601 | struct i40e_aqc_add_remove_statistics { | 601 | struct i40e_aqc_add_remove_statistics { |
602 | __le16 seid; | 602 | __le16 seid; |
603 | __le16 vlan; | 603 | __le16 vlan; |
604 | __le16 stat_index; | 604 | __le16 stat_index; |
605 | u8 reserved[10]; | 605 | u8 reserved[10]; |
606 | }; | 606 | }; |
607 | 607 | ||
608 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); | 608 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); |
609 | 609 | ||
610 | /* Set Port Parameters command (direct 0x0203) */ | 610 | /* Set Port Parameters command (direct 0x0203) */ |
611 | struct i40e_aqc_set_port_parameters { | 611 | struct i40e_aqc_set_port_parameters { |
612 | __le16 command_flags; | 612 | __le16 command_flags; |
613 | #define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 | 613 | #define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 |
614 | #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ | 614 | #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ |
615 | #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 | 615 | #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 |
616 | __le16 bad_frame_vsi; | 616 | __le16 bad_frame_vsi; |
617 | __le16 default_seid; /* reserved for command */ | 617 | __le16 default_seid; /* reserved for command */ |
618 | u8 reserved[10]; | 618 | u8 reserved[10]; |
619 | }; | 619 | }; |
620 | 620 | ||
621 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); | 621 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); |
622 | 622 | ||
623 | /* Get Switch Resource Allocation (indirect 0x0204) */ | 623 | /* Get Switch Resource Allocation (indirect 0x0204) */ |
624 | struct i40e_aqc_get_switch_resource_alloc { | 624 | struct i40e_aqc_get_switch_resource_alloc { |
625 | u8 num_entries; /* reserved for command */ | 625 | u8 num_entries; /* reserved for command */ |
626 | u8 reserved[7]; | 626 | u8 reserved[7]; |
627 | __le32 addr_high; | 627 | __le32 addr_high; |
628 | __le32 addr_low; | 628 | __le32 addr_low; |
629 | }; | 629 | }; |
630 | 630 | ||
631 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); | 631 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); |
632 | 632 | ||
633 | /* expect an array of these structs in the response buffer */ | 633 | /* expect an array of these structs in the response buffer */ |
634 | struct i40e_aqc_switch_resource_alloc_element_resp { | 634 | struct i40e_aqc_switch_resource_alloc_element_resp { |
635 | u8 resource_type; | 635 | u8 resource_type; |
636 | #define I40E_AQ_RESOURCE_TYPE_VEB 0x0 | 636 | #define I40E_AQ_RESOURCE_TYPE_VEB 0x0 |
637 | #define I40E_AQ_RESOURCE_TYPE_VSI 0x1 | 637 | #define I40E_AQ_RESOURCE_TYPE_VSI 0x1 |
638 | #define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 | 638 | #define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 |
639 | #define I40E_AQ_RESOURCE_TYPE_STAG 0x3 | 639 | #define I40E_AQ_RESOURCE_TYPE_STAG 0x3 |
640 | #define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 | 640 | #define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 |
641 | #define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 | 641 | #define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 |
642 | #define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 | 642 | #define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 |
643 | #define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 | 643 | #define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 |
644 | #define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 | 644 | #define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 |
645 | #define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 | 645 | #define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 |
646 | #define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA | 646 | #define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA |
647 | #define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB | 647 | #define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB |
648 | #define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC | 648 | #define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC |
649 | #define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD | 649 | #define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD |
650 | #define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF | 650 | #define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF |
651 | #define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 | 651 | #define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 |
652 | #define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 | 652 | #define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 |
653 | #define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 | 653 | #define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 |
654 | #define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 | 654 | #define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 |
655 | u8 reserved1; | 655 | u8 reserved1; |
656 | __le16 guaranteed; | 656 | __le16 guaranteed; |
657 | __le16 total; | 657 | __le16 total; |
658 | __le16 used; | 658 | __le16 used; |
659 | __le16 total_unalloced; | 659 | __le16 total_unalloced; |
660 | u8 reserved2[6]; | 660 | u8 reserved2[6]; |
661 | }; | 661 | }; |
662 | 662 | ||
663 | /* Add VSI (indirect 0x0210) | 663 | /* Add VSI (indirect 0x0210) |
@@ -671,24 +671,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp { | |||
671 | * uses the same completion and data structure as Add VSI | 671 | * uses the same completion and data structure as Add VSI |
672 | */ | 672 | */ |
673 | struct i40e_aqc_add_get_update_vsi { | 673 | struct i40e_aqc_add_get_update_vsi { |
674 | __le16 uplink_seid; | 674 | __le16 uplink_seid; |
675 | u8 connection_type; | 675 | u8 connection_type; |
676 | #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 | 676 | #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 |
677 | #define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 | 677 | #define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 |
678 | #define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 | 678 | #define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 |
679 | u8 reserved1; | 679 | u8 reserved1; |
680 | u8 vf_id; | 680 | u8 vf_id; |
681 | u8 reserved2; | 681 | u8 reserved2; |
682 | __le16 vsi_flags; | 682 | __le16 vsi_flags; |
683 | #define I40E_AQ_VSI_TYPE_SHIFT 0x0 | 683 | #define I40E_AQ_VSI_TYPE_SHIFT 0x0 |
684 | #define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) | 684 | #define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) |
685 | #define I40E_AQ_VSI_TYPE_VF 0x0 | 685 | #define I40E_AQ_VSI_TYPE_VF 0x0 |
686 | #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 | 686 | #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 |
687 | #define I40E_AQ_VSI_TYPE_PF 0x2 | 687 | #define I40E_AQ_VSI_TYPE_PF 0x2 |
688 | #define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 | 688 | #define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 |
689 | #define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 | 689 | #define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 |
690 | __le32 addr_high; | 690 | __le32 addr_high; |
691 | __le32 addr_low; | 691 | __le32 addr_low; |
692 | }; | 692 | }; |
693 | 693 | ||
694 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); | 694 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); |
@@ -706,121 +706,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); | |||
706 | 706 | ||
707 | struct i40e_aqc_vsi_properties_data { | 707 | struct i40e_aqc_vsi_properties_data { |
708 | /* first 96 byte are written by SW */ | 708 | /* first 96 byte are written by SW */ |
709 | __le16 valid_sections; | 709 | __le16 valid_sections; |
710 | #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 | 710 | #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 |
711 | #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 | 711 | #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 |
712 | #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 | 712 | #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 |
713 | #define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 | 713 | #define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 |
714 | #define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 | 714 | #define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 |
715 | #define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 | 715 | #define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 |
716 | #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 | 716 | #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 |
717 | #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 | 717 | #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 |
718 | #define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 | 718 | #define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 |
719 | #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 | 719 | #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 |
720 | /* switch section */ | 720 | /* switch section */ |
721 | __le16 switch_id; /* 12bit id combined with flags below */ | 721 | __le16 switch_id; /* 12bit id combined with flags below */ |
722 | #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 | 722 | #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 |
723 | #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) | 723 | #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) |
724 | #define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 | 724 | #define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 |
725 | #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 | 725 | #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 |
726 | #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 | 726 | #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 |
727 | u8 sw_reserved[2]; | 727 | u8 sw_reserved[2]; |
728 | /* security section */ | 728 | /* security section */ |
729 | u8 sec_flags; | 729 | u8 sec_flags; |
730 | #define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 | 730 | #define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 |
731 | #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 | 731 | #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 |
732 | #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 | 732 | #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 |
733 | u8 sec_reserved; | 733 | u8 sec_reserved; |
734 | /* VLAN section */ | 734 | /* VLAN section */ |
735 | __le16 pvid; /* VLANS include priority bits */ | 735 | __le16 pvid; /* VLANS include priority bits */ |
736 | __le16 fcoe_pvid; | 736 | __le16 fcoe_pvid; |
737 | u8 port_vlan_flags; | 737 | u8 port_vlan_flags; |
738 | #define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 | 738 | #define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 |
739 | #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ | 739 | #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ |
740 | I40E_AQ_VSI_PVLAN_MODE_SHIFT) | 740 | I40E_AQ_VSI_PVLAN_MODE_SHIFT) |
741 | #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 | 741 | #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 |
742 | #define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 | 742 | #define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 |
743 | #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 | 743 | #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 |
744 | #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 | 744 | #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 |
745 | #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 | 745 | #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 |
746 | #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ | 746 | #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ |
747 | I40E_AQ_VSI_PVLAN_EMOD_SHIFT) | 747 | I40E_AQ_VSI_PVLAN_EMOD_SHIFT) |
748 | #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 | 748 | #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 |
749 | #define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 | 749 | #define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 |
750 | #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 | 750 | #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 |
751 | #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 | 751 | #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 |
752 | u8 pvlan_reserved[3]; | 752 | u8 pvlan_reserved[3]; |
753 | /* ingress egress up sections */ | 753 | /* ingress egress up sections */ |
754 | __le32 ingress_table; /* bitmap, 3 bits per up */ | 754 | __le32 ingress_table; /* bitmap, 3 bits per up */ |
755 | #define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 | 755 | #define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 |
756 | #define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ | 756 | #define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ |
757 | I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) | 757 | I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) |
758 | #define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 | 758 | #define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 |
759 | #define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ | 759 | #define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ |
760 | I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) | 760 | I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) |
761 | #define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 | 761 | #define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 |
762 | #define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ | 762 | #define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ |
763 | I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) | 763 | I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) |
764 | #define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 | 764 | #define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 |
765 | #define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ | 765 | #define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ |
766 | I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) | 766 | I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) |
767 | #define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 | 767 | #define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 |
768 | #define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ | 768 | #define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ |
769 | I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) | 769 | I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) |
770 | #define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 | 770 | #define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 |
771 | #define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ | 771 | #define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ |
772 | I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) | 772 | I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) |
773 | #define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 | 773 | #define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 |
774 | #define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ | 774 | #define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ |
775 | I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) | 775 | I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) |
776 | #define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 | 776 | #define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 |
777 | #define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ | 777 | #define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ |
778 | I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) | 778 | I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) |
779 | __le32 egress_table; /* same defines as for ingress table */ | 779 | __le32 egress_table; /* same defines as for ingress table */ |
780 | /* cascaded PV section */ | 780 | /* cascaded PV section */ |
781 | __le16 cas_pv_tag; | 781 | __le16 cas_pv_tag; |
782 | u8 cas_pv_flags; | 782 | u8 cas_pv_flags; |
783 | #define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 | 783 | #define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 |
784 | #define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ | 784 | #define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ |
785 | I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) | 785 | I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) |
786 | #define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 | 786 | #define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 |
787 | #define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 | 787 | #define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 |
788 | #define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 | 788 | #define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 |
789 | #define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 | 789 | #define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 |
790 | #define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 | 790 | #define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 |
791 | #define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 | 791 | #define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 |
792 | u8 cas_pv_reserved; | 792 | u8 cas_pv_reserved; |
793 | /* queue mapping section */ | 793 | /* queue mapping section */ |
794 | __le16 mapping_flags; | 794 | __le16 mapping_flags; |
795 | #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 | 795 | #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 |
796 | #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 | 796 | #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 |
797 | __le16 queue_mapping[16]; | 797 | __le16 queue_mapping[16]; |
798 | #define I40E_AQ_VSI_QUEUE_SHIFT 0x0 | 798 | #define I40E_AQ_VSI_QUEUE_SHIFT 0x0 |
799 | #define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) | 799 | #define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) |
800 | __le16 tc_mapping[8]; | 800 | __le16 tc_mapping[8]; |
801 | #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 | 801 | #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 |
802 | #define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ | 802 | #define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ |
803 | I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 803 | I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
804 | #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 | 804 | #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 |
805 | #define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ | 805 | #define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ |
806 | I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) | 806 | I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) |
807 | /* queueing option section */ | 807 | /* queueing option section */ |
808 | u8 queueing_opt_flags; | 808 | u8 queueing_opt_flags; |
809 | #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 | 809 | #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 |
810 | #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 | 810 | #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 |
811 | u8 queueing_opt_reserved[3]; | 811 | u8 queueing_opt_reserved[3]; |
812 | /* scheduler section */ | 812 | /* scheduler section */ |
813 | u8 up_enable_bits; | 813 | u8 up_enable_bits; |
814 | u8 sched_reserved; | 814 | u8 sched_reserved; |
815 | /* outer up section */ | 815 | /* outer up section */ |
816 | __le32 outer_up_table; /* same structure and defines as ingress table */ | 816 | __le32 outer_up_table; /* same structure and defines as ingress tbl */ |
817 | u8 cmd_reserved[8]; | 817 | u8 cmd_reserved[8]; |
818 | /* last 32 bytes are written by FW */ | 818 | /* last 32 bytes are written by FW */ |
819 | __le16 qs_handle[8]; | 819 | __le16 qs_handle[8]; |
820 | #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF | 820 | #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF |
821 | __le16 stat_counter_idx; | 821 | __le16 stat_counter_idx; |
822 | __le16 sched_id; | 822 | __le16 sched_id; |
823 | u8 resp_reserved[12]; | 823 | u8 resp_reserved[12]; |
824 | }; | 824 | }; |
825 | 825 | ||
826 | I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); | 826 | I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); |
@@ -830,26 +830,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); | |||
830 | * (IS_CTRL_PORT only works on add PV) | 830 | * (IS_CTRL_PORT only works on add PV) |
831 | */ | 831 | */ |
832 | struct i40e_aqc_add_update_pv { | 832 | struct i40e_aqc_add_update_pv { |
833 | __le16 command_flags; | 833 | __le16 command_flags; |
834 | #define I40E_AQC_PV_FLAG_PV_TYPE 0x1 | 834 | #define I40E_AQC_PV_FLAG_PV_TYPE 0x1 |
835 | #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 | 835 | #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 |
836 | #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 | 836 | #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 |
837 | #define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 | 837 | #define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 |
838 | __le16 uplink_seid; | 838 | __le16 uplink_seid; |
839 | __le16 connected_seid; | 839 | __le16 connected_seid; |
840 | u8 reserved[10]; | 840 | u8 reserved[10]; |
841 | }; | 841 | }; |
842 | 842 | ||
843 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); | 843 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); |
844 | 844 | ||
845 | struct i40e_aqc_add_update_pv_completion { | 845 | struct i40e_aqc_add_update_pv_completion { |
846 | /* reserved for update; for add also encodes error if rc == ENOSPC */ | 846 | /* reserved for update; for add also encodes error if rc == ENOSPC */ |
847 | __le16 pv_seid; | 847 | __le16 pv_seid; |
848 | #define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 | 848 | #define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 |
849 | #define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 | 849 | #define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 |
850 | #define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 | 850 | #define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 |
851 | #define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 | 851 | #define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 |
852 | u8 reserved[14]; | 852 | u8 reserved[14]; |
853 | }; | 853 | }; |
854 | 854 | ||
855 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); | 855 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); |
@@ -859,48 +859,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); | |||
859 | */ | 859 | */ |
860 | 860 | ||
861 | struct i40e_aqc_get_pv_params_completion { | 861 | struct i40e_aqc_get_pv_params_completion { |
862 | __le16 seid; | 862 | __le16 seid; |
863 | __le16 default_stag; | 863 | __le16 default_stag; |
864 | __le16 pv_flags; /* same flags as add_pv */ | 864 | __le16 pv_flags; /* same flags as add_pv */ |
865 | #define I40E_AQC_GET_PV_PV_TYPE 0x1 | 865 | #define I40E_AQC_GET_PV_PV_TYPE 0x1 |
866 | #define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 | 866 | #define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 |
867 | #define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 | 867 | #define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 |
868 | u8 reserved[8]; | 868 | u8 reserved[8]; |
869 | __le16 default_port_seid; | 869 | __le16 default_port_seid; |
870 | }; | 870 | }; |
871 | 871 | ||
872 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); | 872 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); |
873 | 873 | ||
874 | /* Add VEB (direct 0x0230) */ | 874 | /* Add VEB (direct 0x0230) */ |
875 | struct i40e_aqc_add_veb { | 875 | struct i40e_aqc_add_veb { |
876 | __le16 uplink_seid; | 876 | __le16 uplink_seid; |
877 | __le16 downlink_seid; | 877 | __le16 downlink_seid; |
878 | __le16 veb_flags; | 878 | __le16 veb_flags; |
879 | #define I40E_AQC_ADD_VEB_FLOATING 0x1 | 879 | #define I40E_AQC_ADD_VEB_FLOATING 0x1 |
880 | #define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 | 880 | #define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 |
881 | #define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ | 881 | #define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ |
882 | I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) | 882 | I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) |
883 | #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 | 883 | #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 |
884 | #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 | 884 | #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 |
885 | #define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 | 885 | #define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 |
886 | u8 enable_tcs; | 886 | u8 enable_tcs; |
887 | u8 reserved[9]; | 887 | u8 reserved[9]; |
888 | }; | 888 | }; |
889 | 889 | ||
890 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); | 890 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); |
891 | 891 | ||
892 | struct i40e_aqc_add_veb_completion { | 892 | struct i40e_aqc_add_veb_completion { |
893 | u8 reserved[6]; | 893 | u8 reserved[6]; |
894 | __le16 switch_seid; | 894 | __le16 switch_seid; |
895 | /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ | 895 | /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ |
896 | __le16 veb_seid; | 896 | __le16 veb_seid; |
897 | #define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 | 897 | #define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 |
898 | #define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 | 898 | #define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 |
899 | #define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 | 899 | #define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 |
900 | #define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 | 900 | #define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 |
901 | __le16 statistic_index; | 901 | __le16 statistic_index; |
902 | __le16 vebs_used; | 902 | __le16 vebs_used; |
903 | __le16 vebs_free; | 903 | __le16 vebs_free; |
904 | }; | 904 | }; |
905 | 905 | ||
906 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); | 906 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); |
@@ -909,13 +909,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); | |||
909 | * uses i40e_aqc_switch_seid for the descriptor | 909 | * uses i40e_aqc_switch_seid for the descriptor |
910 | */ | 910 | */ |
911 | struct i40e_aqc_get_veb_parameters_completion { | 911 | struct i40e_aqc_get_veb_parameters_completion { |
912 | __le16 seid; | 912 | __le16 seid; |
913 | __le16 switch_id; | 913 | __le16 switch_id; |
914 | __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ | 914 | __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ |
915 | __le16 statistic_index; | 915 | __le16 statistic_index; |
916 | __le16 vebs_used; | 916 | __le16 vebs_used; |
917 | __le16 vebs_free; | 917 | __le16 vebs_free; |
918 | u8 reserved[4]; | 918 | u8 reserved[4]; |
919 | }; | 919 | }; |
920 | 920 | ||
921 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); | 921 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); |
@@ -928,37 +928,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); | |||
928 | 928 | ||
929 | /* used for the command for most vlan commands */ | 929 | /* used for the command for most vlan commands */ |
930 | struct i40e_aqc_macvlan { | 930 | struct i40e_aqc_macvlan { |
931 | __le16 num_addresses; | 931 | __le16 num_addresses; |
932 | __le16 seid[3]; | 932 | __le16 seid[3]; |
933 | #define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 | 933 | #define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 |
934 | #define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ | 934 | #define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ |
935 | I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) | 935 | I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) |
936 | #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 | 936 | #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 |
937 | __le32 addr_high; | 937 | __le32 addr_high; |
938 | __le32 addr_low; | 938 | __le32 addr_low; |
939 | }; | 939 | }; |
940 | 940 | ||
941 | I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); | 941 | I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); |
942 | 942 | ||
943 | /* indirect data for command and response */ | 943 | /* indirect data for command and response */ |
944 | struct i40e_aqc_add_macvlan_element_data { | 944 | struct i40e_aqc_add_macvlan_element_data { |
945 | u8 mac_addr[6]; | 945 | u8 mac_addr[6]; |
946 | __le16 vlan_tag; | 946 | __le16 vlan_tag; |
947 | __le16 flags; | 947 | __le16 flags; |
948 | #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 | 948 | #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 |
949 | #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 | 949 | #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 |
950 | #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 | 950 | #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 |
951 | #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 | 951 | #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 |
952 | __le16 queue_number; | 952 | __le16 queue_number; |
953 | #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 | 953 | #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 |
954 | #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ | 954 | #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ |
955 | I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) | 955 | I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) |
956 | /* response section */ | 956 | /* response section */ |
957 | u8 match_method; | 957 | u8 match_method; |
958 | #define I40E_AQC_MM_PERFECT_MATCH 0x01 | 958 | #define I40E_AQC_MM_PERFECT_MATCH 0x01 |
959 | #define I40E_AQC_MM_HASH_MATCH 0x02 | 959 | #define I40E_AQC_MM_HASH_MATCH 0x02 |
960 | #define I40E_AQC_MM_ERR_NO_RES 0xFF | 960 | #define I40E_AQC_MM_ERR_NO_RES 0xFF |
961 | u8 reserved1[3]; | 961 | u8 reserved1[3]; |
962 | }; | 962 | }; |
963 | 963 | ||
964 | struct i40e_aqc_add_remove_macvlan_completion { | 964 | struct i40e_aqc_add_remove_macvlan_completion { |
@@ -978,19 +978,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); | |||
978 | */ | 978 | */ |
979 | 979 | ||
980 | struct i40e_aqc_remove_macvlan_element_data { | 980 | struct i40e_aqc_remove_macvlan_element_data { |
981 | u8 mac_addr[6]; | 981 | u8 mac_addr[6]; |
982 | __le16 vlan_tag; | 982 | __le16 vlan_tag; |
983 | u8 flags; | 983 | u8 flags; |
984 | #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 | 984 | #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 |
985 | #define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 | 985 | #define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 |
986 | #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 | 986 | #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 |
987 | #define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 | 987 | #define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 |
988 | u8 reserved[3]; | 988 | u8 reserved[3]; |
989 | /* reply section */ | 989 | /* reply section */ |
990 | u8 error_code; | 990 | u8 error_code; |
991 | #define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 | 991 | #define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 |
992 | #define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF | 992 | #define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF |
993 | u8 reply_reserved[3]; | 993 | u8 reply_reserved[3]; |
994 | }; | 994 | }; |
995 | 995 | ||
996 | /* Add VLAN (indirect 0x0252) | 996 | /* Add VLAN (indirect 0x0252) |
@@ -998,59 +998,58 @@ struct i40e_aqc_remove_macvlan_element_data { | |||
998 | * use the generic i40e_aqc_macvlan for the command | 998 | * use the generic i40e_aqc_macvlan for the command |
999 | */ | 999 | */ |
1000 | struct i40e_aqc_add_remove_vlan_element_data { | 1000 | struct i40e_aqc_add_remove_vlan_element_data { |
1001 | __le16 vlan_tag; | 1001 | __le16 vlan_tag; |
1002 | u8 vlan_flags; | 1002 | u8 vlan_flags; |
1003 | /* flags for add VLAN */ | 1003 | /* flags for add VLAN */ |
1004 | #define I40E_AQC_ADD_VLAN_LOCAL 0x1 | 1004 | #define I40E_AQC_ADD_VLAN_LOCAL 0x1 |
1005 | #define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 | 1005 | #define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 |
1006 | #define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ | 1006 | #define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) |
1007 | I40E_AQC_ADD_PVLAN_TYPE_SHIFT) | 1007 | #define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 |
1008 | #define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 | 1008 | #define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 |
1009 | #define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 | 1009 | #define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 |
1010 | #define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 | 1010 | #define I40E_AQC_VLAN_PTYPE_SHIFT 3 |
1011 | #define I40E_AQC_VLAN_PTYPE_SHIFT 3 | 1011 | #define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) |
1012 | #define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) | 1012 | #define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 |
1013 | #define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 | 1013 | #define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 |
1014 | #define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 | 1014 | #define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 |
1015 | #define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 | 1015 | #define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 |
1016 | #define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 | ||
1017 | /* flags for remove VLAN */ | 1016 | /* flags for remove VLAN */ |
1018 | #define I40E_AQC_REMOVE_VLAN_ALL 0x1 | 1017 | #define I40E_AQC_REMOVE_VLAN_ALL 0x1 |
1019 | u8 reserved; | 1018 | u8 reserved; |
1020 | u8 result; | 1019 | u8 result; |
1021 | /* flags for add VLAN */ | 1020 | /* flags for add VLAN */ |
1022 | #define I40E_AQC_ADD_VLAN_SUCCESS 0x0 | 1021 | #define I40E_AQC_ADD_VLAN_SUCCESS 0x0 |
1023 | #define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE | 1022 | #define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE |
1024 | #define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF | 1023 | #define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF |
1025 | /* flags for remove VLAN */ | 1024 | /* flags for remove VLAN */ |
1026 | #define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 | 1025 | #define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 |
1027 | #define I40E_AQC_REMOVE_VLAN_FAIL 0xFF | 1026 | #define I40E_AQC_REMOVE_VLAN_FAIL 0xFF |
1028 | u8 reserved1[3]; | 1027 | u8 reserved1[3]; |
1029 | }; | 1028 | }; |
1030 | 1029 | ||
1031 | struct i40e_aqc_add_remove_vlan_completion { | 1030 | struct i40e_aqc_add_remove_vlan_completion { |
1032 | u8 reserved[4]; | 1031 | u8 reserved[4]; |
1033 | __le16 vlans_used; | 1032 | __le16 vlans_used; |
1034 | __le16 vlans_free; | 1033 | __le16 vlans_free; |
1035 | __le32 addr_high; | 1034 | __le32 addr_high; |
1036 | __le32 addr_low; | 1035 | __le32 addr_low; |
1037 | }; | 1036 | }; |
1038 | 1037 | ||
1039 | /* Set VSI Promiscuous Modes (direct 0x0254) */ | 1038 | /* Set VSI Promiscuous Modes (direct 0x0254) */ |
1040 | struct i40e_aqc_set_vsi_promiscuous_modes { | 1039 | struct i40e_aqc_set_vsi_promiscuous_modes { |
1041 | __le16 promiscuous_flags; | 1040 | __le16 promiscuous_flags; |
1042 | __le16 valid_flags; | 1041 | __le16 valid_flags; |
1043 | /* flags used for both fields above */ | 1042 | /* flags used for both fields above */ |
1044 | #define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 | 1043 | #define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 |
1045 | #define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 | 1044 | #define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 |
1046 | #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 | 1045 | #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 |
1047 | #define I40E_AQC_SET_VSI_DEFAULT 0x08 | 1046 | #define I40E_AQC_SET_VSI_DEFAULT 0x08 |
1048 | #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 | 1047 | #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 |
1049 | __le16 seid; | 1048 | __le16 seid; |
1050 | #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF | 1049 | #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF |
1051 | __le16 vlan_tag; | 1050 | __le16 vlan_tag; |
1052 | #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 | 1051 | #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 |
1053 | u8 reserved[8]; | 1052 | u8 reserved[8]; |
1054 | }; | 1053 | }; |
1055 | 1054 | ||
1056 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); | 1055 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); |
@@ -1059,23 +1058,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); | |||
1059 | * Uses generic i40e_aqc_add_remove_tag_completion for completion | 1058 | * Uses generic i40e_aqc_add_remove_tag_completion for completion |
1060 | */ | 1059 | */ |
1061 | struct i40e_aqc_add_tag { | 1060 | struct i40e_aqc_add_tag { |
1062 | __le16 flags; | 1061 | __le16 flags; |
1063 | #define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 | 1062 | #define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 |
1064 | __le16 seid; | 1063 | __le16 seid; |
1065 | #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 | 1064 | #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 |
1066 | #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ | 1065 | #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ |
1067 | I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) | 1066 | I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) |
1068 | __le16 tag; | 1067 | __le16 tag; |
1069 | __le16 queue_number; | 1068 | __le16 queue_number; |
1070 | u8 reserved[8]; | 1069 | u8 reserved[8]; |
1071 | }; | 1070 | }; |
1072 | 1071 | ||
1073 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); | 1072 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); |
1074 | 1073 | ||
1075 | struct i40e_aqc_add_remove_tag_completion { | 1074 | struct i40e_aqc_add_remove_tag_completion { |
1076 | u8 reserved[12]; | 1075 | u8 reserved[12]; |
1077 | __le16 tags_used; | 1076 | __le16 tags_used; |
1078 | __le16 tags_free; | 1077 | __le16 tags_free; |
1079 | }; | 1078 | }; |
1080 | 1079 | ||
1081 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); | 1080 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); |
@@ -1084,12 +1083,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); | |||
1084 | * Uses generic i40e_aqc_add_remove_tag_completion for completion | 1083 | * Uses generic i40e_aqc_add_remove_tag_completion for completion |
1085 | */ | 1084 | */ |
1086 | struct i40e_aqc_remove_tag { | 1085 | struct i40e_aqc_remove_tag { |
1087 | __le16 seid; | 1086 | __le16 seid; |
1088 | #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 | 1087 | #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 |
1089 | #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ | 1088 | #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ |
1090 | I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) | 1089 | I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) |
1091 | __le16 tag; | 1090 | __le16 tag; |
1092 | u8 reserved[12]; | 1091 | u8 reserved[12]; |
1093 | }; | 1092 | }; |
1094 | 1093 | ||
1095 | /* Add multicast E-Tag (direct 0x0257) | 1094 | /* Add multicast E-Tag (direct 0x0257) |
@@ -1097,22 +1096,22 @@ struct i40e_aqc_remove_tag { | |||
1097 | * and no external data | 1096 | * and no external data |
1098 | */ | 1097 | */ |
1099 | struct i40e_aqc_add_remove_mcast_etag { | 1098 | struct i40e_aqc_add_remove_mcast_etag { |
1100 | __le16 pv_seid; | 1099 | __le16 pv_seid; |
1101 | __le16 etag; | 1100 | __le16 etag; |
1102 | u8 num_unicast_etags; | 1101 | u8 num_unicast_etags; |
1103 | u8 reserved[3]; | 1102 | u8 reserved[3]; |
1104 | __le32 addr_high; /* address of array of 2-byte s-tags */ | 1103 | __le32 addr_high; /* address of array of 2-byte s-tags */ |
1105 | __le32 addr_low; | 1104 | __le32 addr_low; |
1106 | }; | 1105 | }; |
1107 | 1106 | ||
1108 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); | 1107 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); |
1109 | 1108 | ||
1110 | struct i40e_aqc_add_remove_mcast_etag_completion { | 1109 | struct i40e_aqc_add_remove_mcast_etag_completion { |
1111 | u8 reserved[4]; | 1110 | u8 reserved[4]; |
1112 | __le16 mcast_etags_used; | 1111 | __le16 mcast_etags_used; |
1113 | __le16 mcast_etags_free; | 1112 | __le16 mcast_etags_free; |
1114 | __le32 addr_high; | 1113 | __le32 addr_high; |
1115 | __le32 addr_low; | 1114 | __le32 addr_low; |
1116 | 1115 | ||
1117 | }; | 1116 | }; |
1118 | 1117 | ||
@@ -1120,21 +1119,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); | |||
1120 | 1119 | ||
1121 | /* Update S/E-Tag (direct 0x0259) */ | 1120 | /* Update S/E-Tag (direct 0x0259) */ |
1122 | struct i40e_aqc_update_tag { | 1121 | struct i40e_aqc_update_tag { |
1123 | __le16 seid; | 1122 | __le16 seid; |
1124 | #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 | 1123 | #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 |
1125 | #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ | 1124 | #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ |
1126 | I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) | 1125 | I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) |
1127 | __le16 old_tag; | 1126 | __le16 old_tag; |
1128 | __le16 new_tag; | 1127 | __le16 new_tag; |
1129 | u8 reserved[10]; | 1128 | u8 reserved[10]; |
1130 | }; | 1129 | }; |
1131 | 1130 | ||
1132 | I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); | 1131 | I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); |
1133 | 1132 | ||
1134 | struct i40e_aqc_update_tag_completion { | 1133 | struct i40e_aqc_update_tag_completion { |
1135 | u8 reserved[12]; | 1134 | u8 reserved[12]; |
1136 | __le16 tags_used; | 1135 | __le16 tags_used; |
1137 | __le16 tags_free; | 1136 | __le16 tags_free; |
1138 | }; | 1137 | }; |
1139 | 1138 | ||
1140 | I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); | 1139 | I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); |
@@ -1145,30 +1144,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); | |||
1145 | * and the generic direct completion structure | 1144 | * and the generic direct completion structure |
1146 | */ | 1145 | */ |
1147 | struct i40e_aqc_add_remove_control_packet_filter { | 1146 | struct i40e_aqc_add_remove_control_packet_filter { |
1148 | u8 mac[6]; | 1147 | u8 mac[6]; |
1149 | __le16 etype; | 1148 | __le16 etype; |
1150 | __le16 flags; | 1149 | __le16 flags; |
1151 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 | 1150 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 |
1152 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 | 1151 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 |
1153 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 | 1152 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 |
1154 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 | 1153 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 |
1155 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 | 1154 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 |
1156 | __le16 seid; | 1155 | __le16 seid; |
1157 | #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 | 1156 | #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 |
1158 | #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ | 1157 | #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ |
1159 | I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) | 1158 | I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) |
1160 | __le16 queue; | 1159 | __le16 queue; |
1161 | u8 reserved[2]; | 1160 | u8 reserved[2]; |
1162 | }; | 1161 | }; |
1163 | 1162 | ||
1164 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); | 1163 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); |
1165 | 1164 | ||
1166 | struct i40e_aqc_add_remove_control_packet_filter_completion { | 1165 | struct i40e_aqc_add_remove_control_packet_filter_completion { |
1167 | __le16 mac_etype_used; | 1166 | __le16 mac_etype_used; |
1168 | __le16 etype_used; | 1167 | __le16 etype_used; |
1169 | __le16 mac_etype_free; | 1168 | __le16 mac_etype_free; |
1170 | __le16 etype_free; | 1169 | __le16 etype_free; |
1171 | u8 reserved[8]; | 1170 | u8 reserved[8]; |
1172 | }; | 1171 | }; |
1173 | 1172 | ||
1174 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); | 1173 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); |
@@ -1179,23 +1178,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); | |||
1179 | * and the generic indirect completion structure | 1178 | * and the generic indirect completion structure |
1180 | */ | 1179 | */ |
1181 | struct i40e_aqc_add_remove_cloud_filters { | 1180 | struct i40e_aqc_add_remove_cloud_filters { |
1182 | u8 num_filters; | 1181 | u8 num_filters; |
1183 | u8 reserved; | 1182 | u8 reserved; |
1184 | __le16 seid; | 1183 | __le16 seid; |
1185 | #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 | 1184 | #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 |
1186 | #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ | 1185 | #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ |
1187 | I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) | 1186 | I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) |
1188 | u8 reserved2[4]; | 1187 | u8 reserved2[4]; |
1189 | __le32 addr_high; | 1188 | __le32 addr_high; |
1190 | __le32 addr_low; | 1189 | __le32 addr_low; |
1191 | }; | 1190 | }; |
1192 | 1191 | ||
1193 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); | 1192 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); |
1194 | 1193 | ||
1195 | struct i40e_aqc_add_remove_cloud_filters_element_data { | 1194 | struct i40e_aqc_add_remove_cloud_filters_element_data { |
1196 | u8 outer_mac[6]; | 1195 | u8 outer_mac[6]; |
1197 | u8 inner_mac[6]; | 1196 | u8 inner_mac[6]; |
1198 | __le16 inner_vlan; | 1197 | __le16 inner_vlan; |
1199 | union { | 1198 | union { |
1200 | struct { | 1199 | struct { |
1201 | u8 reserved[12]; | 1200 | u8 reserved[12]; |
@@ -1205,49 +1204,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { | |||
1205 | u8 data[16]; | 1204 | u8 data[16]; |
1206 | } v6; | 1205 | } v6; |
1207 | } ipaddr; | 1206 | } ipaddr; |
1208 | __le16 flags; | 1207 | __le16 flags; |
1209 | #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 | 1208 | #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 |
1210 | #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ | 1209 | #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ |
1211 | I40E_AQC_ADD_CLOUD_FILTER_SHIFT) | 1210 | I40E_AQC_ADD_CLOUD_FILTER_SHIFT) |
1212 | /* 0x0000 reserved */ | 1211 | /* 0x0000 reserved */ |
1213 | #define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 | 1212 | #define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 |
1214 | /* 0x0002 reserved */ | 1213 | /* 0x0002 reserved */ |
1215 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 | 1214 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 |
1216 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 | 1215 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 |
1217 | /* 0x0005 reserved */ | 1216 | /* 0x0005 reserved */ |
1218 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 | 1217 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 |
1219 | /* 0x0007 reserved */ | 1218 | /* 0x0007 reserved */ |
1220 | /* 0x0008 reserved */ | 1219 | /* 0x0008 reserved */ |
1221 | #define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 | 1220 | #define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 |
1222 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A | 1221 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A |
1223 | #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B | 1222 | #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B |
1224 | #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C | 1223 | #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C |
1225 | 1224 | ||
1226 | #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 | 1225 | #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 |
1227 | #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 | 1226 | #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 |
1228 | #define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 | 1227 | #define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 |
1229 | #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 | 1228 | #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 |
1230 | #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 | 1229 | #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 |
1231 | 1230 | ||
1232 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 | 1231 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 |
1233 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 | 1232 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 |
1234 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 | 1233 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 |
1235 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 | 1234 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 |
1236 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 | 1235 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 |
1237 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 | 1236 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 |
1238 | 1237 | ||
1239 | __le32 tenant_id; | 1238 | __le32 tenant_id; |
1240 | u8 reserved[4]; | 1239 | u8 reserved[4]; |
1241 | __le16 queue_number; | 1240 | __le16 queue_number; |
1242 | #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 | 1241 | #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 |
1243 | #define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ | 1242 | #define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ |
1244 | I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) | 1243 | I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) |
1245 | u8 reserved2[14]; | 1244 | u8 reserved2[14]; |
1246 | /* response section */ | 1245 | /* response section */ |
1247 | u8 allocation_result; | 1246 | u8 allocation_result; |
1248 | #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 | 1247 | #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 |
1249 | #define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF | 1248 | #define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF |
1250 | u8 response_reserved[7]; | 1249 | u8 response_reserved[7]; |
1251 | }; | 1250 | }; |
1252 | 1251 | ||
1253 | struct i40e_aqc_remove_cloud_filters_completion { | 1252 | struct i40e_aqc_remove_cloud_filters_completion { |
@@ -1269,14 +1268,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); | |||
1269 | struct i40e_aqc_add_delete_mirror_rule { | 1268 | struct i40e_aqc_add_delete_mirror_rule { |
1270 | __le16 seid; | 1269 | __le16 seid; |
1271 | __le16 rule_type; | 1270 | __le16 rule_type; |
1272 | #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 | 1271 | #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 |
1273 | #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ | 1272 | #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ |
1274 | I40E_AQC_MIRROR_RULE_TYPE_SHIFT) | 1273 | I40E_AQC_MIRROR_RULE_TYPE_SHIFT) |
1275 | #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 | 1274 | #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 |
1276 | #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 | 1275 | #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 |
1277 | #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 | 1276 | #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 |
1278 | #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 | 1277 | #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 |
1279 | #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 | 1278 | #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 |
1280 | __le16 num_entries; | 1279 | __le16 num_entries; |
1281 | __le16 destination; /* VSI for add, rule id for delete */ | 1280 | __le16 destination; /* VSI for add, rule id for delete */ |
1282 | __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ | 1281 | __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ |
@@ -1286,12 +1285,12 @@ struct i40e_aqc_add_delete_mirror_rule { | |||
1286 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); | 1285 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); |
1287 | 1286 | ||
1288 | struct i40e_aqc_add_delete_mirror_rule_completion { | 1287 | struct i40e_aqc_add_delete_mirror_rule_completion { |
1289 | u8 reserved[2]; | 1288 | u8 reserved[2]; |
1290 | __le16 rule_id; /* only used on add */ | 1289 | __le16 rule_id; /* only used on add */ |
1291 | __le16 mirror_rules_used; | 1290 | __le16 mirror_rules_used; |
1292 | __le16 mirror_rules_free; | 1291 | __le16 mirror_rules_free; |
1293 | __le32 addr_high; | 1292 | __le32 addr_high; |
1294 | __le32 addr_low; | 1293 | __le32 addr_low; |
1295 | }; | 1294 | }; |
1296 | 1295 | ||
1297 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); | 1296 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); |
@@ -1302,11 +1301,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); | |||
1302 | * the command and response use the same descriptor structure | 1301 | * the command and response use the same descriptor structure |
1303 | */ | 1302 | */ |
1304 | struct i40e_aqc_pfc_ignore { | 1303 | struct i40e_aqc_pfc_ignore { |
1305 | u8 tc_bitmap; | 1304 | u8 tc_bitmap; |
1306 | u8 command_flags; /* unused on response */ | 1305 | u8 command_flags; /* unused on response */ |
1307 | #define I40E_AQC_PFC_IGNORE_SET 0x80 | 1306 | #define I40E_AQC_PFC_IGNORE_SET 0x80 |
1308 | #define I40E_AQC_PFC_IGNORE_CLEAR 0x0 | 1307 | #define I40E_AQC_PFC_IGNORE_CLEAR 0x0 |
1309 | u8 reserved[14]; | 1308 | u8 reserved[14]; |
1310 | }; | 1309 | }; |
1311 | 1310 | ||
1312 | I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); | 1311 | I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); |
@@ -1321,10 +1320,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); | |||
1321 | * this generic struct to pass the SEID in param0 | 1320 | * this generic struct to pass the SEID in param0 |
1322 | */ | 1321 | */ |
1323 | struct i40e_aqc_tx_sched_ind { | 1322 | struct i40e_aqc_tx_sched_ind { |
1324 | __le16 vsi_seid; | 1323 | __le16 vsi_seid; |
1325 | u8 reserved[6]; | 1324 | u8 reserved[6]; |
1326 | __le32 addr_high; | 1325 | __le32 addr_high; |
1327 | __le32 addr_low; | 1326 | __le32 addr_low; |
1328 | }; | 1327 | }; |
1329 | 1328 | ||
1330 | I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); | 1329 | I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); |
@@ -1336,12 +1335,12 @@ struct i40e_aqc_qs_handles_resp { | |||
1336 | 1335 | ||
1337 | /* Configure VSI BW limits (direct 0x0400) */ | 1336 | /* Configure VSI BW limits (direct 0x0400) */ |
1338 | struct i40e_aqc_configure_vsi_bw_limit { | 1337 | struct i40e_aqc_configure_vsi_bw_limit { |
1339 | __le16 vsi_seid; | 1338 | __le16 vsi_seid; |
1340 | u8 reserved[2]; | 1339 | u8 reserved[2]; |
1341 | __le16 credit; | 1340 | __le16 credit; |
1342 | u8 reserved1[2]; | 1341 | u8 reserved1[2]; |
1343 | u8 max_credit; /* 0-3, limit = 2^max */ | 1342 | u8 max_credit; /* 0-3, limit = 2^max */ |
1344 | u8 reserved2[7]; | 1343 | u8 reserved2[7]; |
1345 | }; | 1344 | }; |
1346 | 1345 | ||
1347 | I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); | 1346 | I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); |
@@ -1350,58 +1349,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); | |||
1350 | * responds with i40e_aqc_qs_handles_resp | 1349 | * responds with i40e_aqc_qs_handles_resp |
1351 | */ | 1350 | */ |
1352 | struct i40e_aqc_configure_vsi_ets_sla_bw_data { | 1351 | struct i40e_aqc_configure_vsi_ets_sla_bw_data { |
1353 | u8 tc_valid_bits; | 1352 | u8 tc_valid_bits; |
1354 | u8 reserved[15]; | 1353 | u8 reserved[15]; |
1355 | __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ | 1354 | __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ |
1356 | 1355 | ||
1357 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ | 1356 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ |
1358 | __le16 tc_bw_max[2]; | 1357 | __le16 tc_bw_max[2]; |
1359 | u8 reserved1[28]; | 1358 | u8 reserved1[28]; |
1360 | }; | 1359 | }; |
1361 | 1360 | ||
1362 | /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) | 1361 | /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) |
1363 | * responds with i40e_aqc_qs_handles_resp | 1362 | * responds with i40e_aqc_qs_handles_resp |
1364 | */ | 1363 | */ |
1365 | struct i40e_aqc_configure_vsi_tc_bw_data { | 1364 | struct i40e_aqc_configure_vsi_tc_bw_data { |
1366 | u8 tc_valid_bits; | 1365 | u8 tc_valid_bits; |
1367 | u8 reserved[3]; | 1366 | u8 reserved[3]; |
1368 | u8 tc_bw_credits[8]; | 1367 | u8 tc_bw_credits[8]; |
1369 | u8 reserved1[4]; | 1368 | u8 reserved1[4]; |
1370 | __le16 qs_handles[8]; | 1369 | __le16 qs_handles[8]; |
1371 | }; | 1370 | }; |
1372 | 1371 | ||
1373 | /* Query vsi bw configuration (indirect 0x0408) */ | 1372 | /* Query vsi bw configuration (indirect 0x0408) */ |
1374 | struct i40e_aqc_query_vsi_bw_config_resp { | 1373 | struct i40e_aqc_query_vsi_bw_config_resp { |
1375 | u8 tc_valid_bits; | 1374 | u8 tc_valid_bits; |
1376 | u8 tc_suspended_bits; | 1375 | u8 tc_suspended_bits; |
1377 | u8 reserved[14]; | 1376 | u8 reserved[14]; |
1378 | __le16 qs_handles[8]; | 1377 | __le16 qs_handles[8]; |
1379 | u8 reserved1[4]; | 1378 | u8 reserved1[4]; |
1380 | __le16 port_bw_limit; | 1379 | __le16 port_bw_limit; |
1381 | u8 reserved2[2]; | 1380 | u8 reserved2[2]; |
1382 | u8 max_bw; /* 0-3, limit = 2^max */ | 1381 | u8 max_bw; /* 0-3, limit = 2^max */ |
1383 | u8 reserved3[23]; | 1382 | u8 reserved3[23]; |
1384 | }; | 1383 | }; |
1385 | 1384 | ||
1386 | /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ | 1385 | /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ |
1387 | struct i40e_aqc_query_vsi_ets_sla_config_resp { | 1386 | struct i40e_aqc_query_vsi_ets_sla_config_resp { |
1388 | u8 tc_valid_bits; | 1387 | u8 tc_valid_bits; |
1389 | u8 reserved[3]; | 1388 | u8 reserved[3]; |
1390 | u8 share_credits[8]; | 1389 | u8 share_credits[8]; |
1391 | __le16 credits[8]; | 1390 | __le16 credits[8]; |
1392 | 1391 | ||
1393 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ | 1392 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ |
1394 | __le16 tc_bw_max[2]; | 1393 | __le16 tc_bw_max[2]; |
1395 | }; | 1394 | }; |
1396 | 1395 | ||
1397 | /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ | 1396 | /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ |
1398 | struct i40e_aqc_configure_switching_comp_bw_limit { | 1397 | struct i40e_aqc_configure_switching_comp_bw_limit { |
1399 | __le16 seid; | 1398 | __le16 seid; |
1400 | u8 reserved[2]; | 1399 | u8 reserved[2]; |
1401 | __le16 credit; | 1400 | __le16 credit; |
1402 | u8 reserved1[2]; | 1401 | u8 reserved1[2]; |
1403 | u8 max_bw; /* 0-3, limit = 2^max */ | 1402 | u8 max_bw; /* 0-3, limit = 2^max */ |
1404 | u8 reserved2[7]; | 1403 | u8 reserved2[7]; |
1405 | }; | 1404 | }; |
1406 | 1405 | ||
1407 | I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); | 1406 | I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); |
@@ -1411,75 +1410,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); | |||
1411 | * Disable Physical Port ETS (indirect 0x0415) | 1410 | * Disable Physical Port ETS (indirect 0x0415) |
1412 | */ | 1411 | */ |
1413 | struct i40e_aqc_configure_switching_comp_ets_data { | 1412 | struct i40e_aqc_configure_switching_comp_ets_data { |
1414 | u8 reserved[4]; | 1413 | u8 reserved[4]; |
1415 | u8 tc_valid_bits; | 1414 | u8 tc_valid_bits; |
1416 | u8 seepage; | 1415 | u8 seepage; |
1417 | #define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 | 1416 | #define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 |
1418 | u8 tc_strict_priority_flags; | 1417 | u8 tc_strict_priority_flags; |
1419 | u8 reserved1[17]; | 1418 | u8 reserved1[17]; |
1420 | u8 tc_bw_share_credits[8]; | 1419 | u8 tc_bw_share_credits[8]; |
1421 | u8 reserved2[96]; | 1420 | u8 reserved2[96]; |
1422 | }; | 1421 | }; |
1423 | 1422 | ||
1424 | /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ | 1423 | /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ |
1425 | struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { | 1424 | struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { |
1426 | u8 tc_valid_bits; | 1425 | u8 tc_valid_bits; |
1427 | u8 reserved[15]; | 1426 | u8 reserved[15]; |
1428 | __le16 tc_bw_credit[8]; | 1427 | __le16 tc_bw_credit[8]; |
1429 | 1428 | ||
1430 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ | 1429 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ |
1431 | __le16 tc_bw_max[2]; | 1430 | __le16 tc_bw_max[2]; |
1432 | u8 reserved1[28]; | 1431 | u8 reserved1[28]; |
1433 | }; | 1432 | }; |
1434 | 1433 | ||
1435 | /* Configure Switching Component Bandwidth Allocation per Tc | 1434 | /* Configure Switching Component Bandwidth Allocation per Tc |
1436 | * (indirect 0x0417) | 1435 | * (indirect 0x0417) |
1437 | */ | 1436 | */ |
1438 | struct i40e_aqc_configure_switching_comp_bw_config_data { | 1437 | struct i40e_aqc_configure_switching_comp_bw_config_data { |
1439 | u8 tc_valid_bits; | 1438 | u8 tc_valid_bits; |
1440 | u8 reserved[2]; | 1439 | u8 reserved[2]; |
1441 | u8 absolute_credits; /* bool */ | 1440 | u8 absolute_credits; /* bool */ |
1442 | u8 tc_bw_share_credits[8]; | 1441 | u8 tc_bw_share_credits[8]; |
1443 | u8 reserved1[20]; | 1442 | u8 reserved1[20]; |
1444 | }; | 1443 | }; |
1445 | 1444 | ||
1446 | /* Query Switching Component Configuration (indirect 0x0418) */ | 1445 | /* Query Switching Component Configuration (indirect 0x0418) */ |
1447 | struct i40e_aqc_query_switching_comp_ets_config_resp { | 1446 | struct i40e_aqc_query_switching_comp_ets_config_resp { |
1448 | u8 tc_valid_bits; | 1447 | u8 tc_valid_bits; |
1449 | u8 reserved[35]; | 1448 | u8 reserved[35]; |
1450 | __le16 port_bw_limit; | 1449 | __le16 port_bw_limit; |
1451 | u8 reserved1[2]; | 1450 | u8 reserved1[2]; |
1452 | u8 tc_bw_max; /* 0-3, limit = 2^max */ | 1451 | u8 tc_bw_max; /* 0-3, limit = 2^max */ |
1453 | u8 reserved2[23]; | 1452 | u8 reserved2[23]; |
1454 | }; | 1453 | }; |
1455 | 1454 | ||
1456 | /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ | 1455 | /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ |
1457 | struct i40e_aqc_query_port_ets_config_resp { | 1456 | struct i40e_aqc_query_port_ets_config_resp { |
1458 | u8 reserved[4]; | 1457 | u8 reserved[4]; |
1459 | u8 tc_valid_bits; | 1458 | u8 tc_valid_bits; |
1460 | u8 reserved1; | 1459 | u8 reserved1; |
1461 | u8 tc_strict_priority_bits; | 1460 | u8 tc_strict_priority_bits; |
1462 | u8 reserved2; | 1461 | u8 reserved2; |
1463 | u8 tc_bw_share_credits[8]; | 1462 | u8 tc_bw_share_credits[8]; |
1464 | __le16 tc_bw_limits[8]; | 1463 | __le16 tc_bw_limits[8]; |
1465 | 1464 | ||
1466 | /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ | 1465 | /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ |
1467 | __le16 tc_bw_max[2]; | 1466 | __le16 tc_bw_max[2]; |
1468 | u8 reserved3[32]; | 1467 | u8 reserved3[32]; |
1469 | }; | 1468 | }; |
1470 | 1469 | ||
1471 | /* Query Switching Component Bandwidth Allocation per Traffic Type | 1470 | /* Query Switching Component Bandwidth Allocation per Traffic Type |
1472 | * (indirect 0x041A) | 1471 | * (indirect 0x041A) |
1473 | */ | 1472 | */ |
1474 | struct i40e_aqc_query_switching_comp_bw_config_resp { | 1473 | struct i40e_aqc_query_switching_comp_bw_config_resp { |
1475 | u8 tc_valid_bits; | 1474 | u8 tc_valid_bits; |
1476 | u8 reserved[2]; | 1475 | u8 reserved[2]; |
1477 | u8 absolute_credits_enable; /* bool */ | 1476 | u8 absolute_credits_enable; /* bool */ |
1478 | u8 tc_bw_share_credits[8]; | 1477 | u8 tc_bw_share_credits[8]; |
1479 | __le16 tc_bw_limits[8]; | 1478 | __le16 tc_bw_limits[8]; |
1480 | 1479 | ||
1481 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ | 1480 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ |
1482 | __le16 tc_bw_max[2]; | 1481 | __le16 tc_bw_max[2]; |
1483 | }; | 1482 | }; |
1484 | 1483 | ||
1485 | /* Suspend/resume port TX traffic | 1484 | /* Suspend/resume port TX traffic |
@@ -1490,37 +1489,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp { | |||
1490 | * (indirect 0x041D) | 1489 | * (indirect 0x041D) |
1491 | */ | 1490 | */ |
1492 | struct i40e_aqc_configure_partition_bw_data { | 1491 | struct i40e_aqc_configure_partition_bw_data { |
1493 | __le16 pf_valid_bits; | 1492 | __le16 pf_valid_bits; |
1494 | u8 min_bw[16]; /* guaranteed bandwidth */ | 1493 | u8 min_bw[16]; /* guaranteed bandwidth */ |
1495 | u8 max_bw[16]; /* bandwidth limit */ | 1494 | u8 max_bw[16]; /* bandwidth limit */ |
1496 | }; | 1495 | }; |
1497 | 1496 | ||
1498 | /* Get and set the active HMC resource profile and status. | 1497 | /* Get and set the active HMC resource profile and status. |
1499 | * (direct 0x0500) and (direct 0x0501) | 1498 | * (direct 0x0500) and (direct 0x0501) |
1500 | */ | 1499 | */ |
1501 | struct i40e_aq_get_set_hmc_resource_profile { | 1500 | struct i40e_aq_get_set_hmc_resource_profile { |
1502 | u8 pm_profile; | 1501 | u8 pm_profile; |
1503 | u8 pe_vf_enabled; | 1502 | u8 pe_vf_enabled; |
1504 | u8 reserved[14]; | 1503 | u8 reserved[14]; |
1505 | }; | 1504 | }; |
1506 | 1505 | ||
1507 | I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); | 1506 | I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); |
1508 | 1507 | ||
1509 | enum i40e_aq_hmc_profile { | 1508 | enum i40e_aq_hmc_profile { |
1510 | /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ | 1509 | /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ |
1511 | I40E_HMC_PROFILE_DEFAULT = 1, | 1510 | I40E_HMC_PROFILE_DEFAULT = 1, |
1512 | I40E_HMC_PROFILE_FAVOR_VF = 2, | 1511 | I40E_HMC_PROFILE_FAVOR_VF = 2, |
1513 | I40E_HMC_PROFILE_EQUAL = 3, | 1512 | I40E_HMC_PROFILE_EQUAL = 3, |
1514 | }; | 1513 | }; |
1515 | 1514 | ||
1516 | #define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF | 1515 | #define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF |
1517 | #define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F | 1516 | #define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F |
1518 | 1517 | ||
1519 | /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ | 1518 | /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ |
1520 | 1519 | ||
1521 | /* set in param0 for get phy abilities to report qualified modules */ | 1520 | /* set in param0 for get phy abilities to report qualified modules */ |
1522 | #define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 | 1521 | #define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 |
1523 | #define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 | 1522 | #define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 |
1524 | 1523 | ||
1525 | enum i40e_aq_phy_type { | 1524 | enum i40e_aq_phy_type { |
1526 | I40E_PHY_TYPE_SGMII = 0x0, | 1525 | I40E_PHY_TYPE_SGMII = 0x0, |
@@ -1578,147 +1577,147 @@ struct i40e_aqc_module_desc { | |||
1578 | }; | 1577 | }; |
1579 | 1578 | ||
1580 | struct i40e_aq_get_phy_abilities_resp { | 1579 | struct i40e_aq_get_phy_abilities_resp { |
1581 | __le32 phy_type; /* bitmap using the above enum for offsets */ | 1580 | __le32 phy_type; /* bitmap using the above enum for offsets */ |
1582 | u8 link_speed; /* bitmap using the above enum bit patterns */ | 1581 | u8 link_speed; /* bitmap using the above enum bit patterns */ |
1583 | u8 abilities; | 1582 | u8 abilities; |
1584 | #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 | 1583 | #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 |
1585 | #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 | 1584 | #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 |
1586 | #define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 | 1585 | #define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 |
1587 | #define I40E_AQ_PHY_LINK_ENABLED 0x08 | 1586 | #define I40E_AQ_PHY_LINK_ENABLED 0x08 |
1588 | #define I40E_AQ_PHY_AN_ENABLED 0x10 | 1587 | #define I40E_AQ_PHY_AN_ENABLED 0x10 |
1589 | #define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 | 1588 | #define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 |
1590 | __le16 eee_capability; | 1589 | __le16 eee_capability; |
1591 | #define I40E_AQ_EEE_100BASE_TX 0x0002 | 1590 | #define I40E_AQ_EEE_100BASE_TX 0x0002 |
1592 | #define I40E_AQ_EEE_1000BASE_T 0x0004 | 1591 | #define I40E_AQ_EEE_1000BASE_T 0x0004 |
1593 | #define I40E_AQ_EEE_10GBASE_T 0x0008 | 1592 | #define I40E_AQ_EEE_10GBASE_T 0x0008 |
1594 | #define I40E_AQ_EEE_1000BASE_KX 0x0010 | 1593 | #define I40E_AQ_EEE_1000BASE_KX 0x0010 |
1595 | #define I40E_AQ_EEE_10GBASE_KX4 0x0020 | 1594 | #define I40E_AQ_EEE_10GBASE_KX4 0x0020 |
1596 | #define I40E_AQ_EEE_10GBASE_KR 0x0040 | 1595 | #define I40E_AQ_EEE_10GBASE_KR 0x0040 |
1597 | __le32 eeer_val; | 1596 | __le32 eeer_val; |
1598 | u8 d3_lpan; | 1597 | u8 d3_lpan; |
1599 | #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 | 1598 | #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 |
1600 | u8 reserved[3]; | 1599 | u8 reserved[3]; |
1601 | u8 phy_id[4]; | 1600 | u8 phy_id[4]; |
1602 | u8 module_type[3]; | 1601 | u8 module_type[3]; |
1603 | u8 qualified_module_count; | 1602 | u8 qualified_module_count; |
1604 | #define I40E_AQ_PHY_MAX_QMS 16 | 1603 | #define I40E_AQ_PHY_MAX_QMS 16 |
1605 | struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; | 1604 | struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; |
1606 | }; | 1605 | }; |
1607 | 1606 | ||
1608 | /* Set PHY Config (direct 0x0601) */ | 1607 | /* Set PHY Config (direct 0x0601) */ |
1609 | struct i40e_aq_set_phy_config { /* same bits as above in all */ | 1608 | struct i40e_aq_set_phy_config { /* same bits as above in all */ |
1610 | __le32 phy_type; | 1609 | __le32 phy_type; |
1611 | u8 link_speed; | 1610 | u8 link_speed; |
1612 | u8 abilities; | 1611 | u8 abilities; |
1613 | /* bits 0-2 use the values from get_phy_abilities_resp */ | 1612 | /* bits 0-2 use the values from get_phy_abilities_resp */ |
1614 | #define I40E_AQ_PHY_ENABLE_LINK 0x08 | 1613 | #define I40E_AQ_PHY_ENABLE_LINK 0x08 |
1615 | #define I40E_AQ_PHY_ENABLE_AN 0x10 | 1614 | #define I40E_AQ_PHY_ENABLE_AN 0x10 |
1616 | #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 | 1615 | #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 |
1617 | __le16 eee_capability; | 1616 | __le16 eee_capability; |
1618 | __le32 eeer; | 1617 | __le32 eeer; |
1619 | u8 low_power_ctrl; | 1618 | u8 low_power_ctrl; |
1620 | u8 reserved[3]; | 1619 | u8 reserved[3]; |
1621 | }; | 1620 | }; |
1622 | 1621 | ||
1623 | I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); | 1622 | I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); |
1624 | 1623 | ||
1625 | /* Set MAC Config command data structure (direct 0x0603) */ | 1624 | /* Set MAC Config command data structure (direct 0x0603) */ |
1626 | struct i40e_aq_set_mac_config { | 1625 | struct i40e_aq_set_mac_config { |
1627 | __le16 max_frame_size; | 1626 | __le16 max_frame_size; |
1628 | u8 params; | 1627 | u8 params; |
1629 | #define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 | 1628 | #define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 |
1630 | #define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 | 1629 | #define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 |
1631 | #define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 | 1630 | #define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 |
1632 | #define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 | 1631 | #define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 |
1633 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF | 1632 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF |
1634 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 | 1633 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 |
1635 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 | 1634 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 |
1636 | #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 | 1635 | #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 |
1637 | #define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 | 1636 | #define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 |
1638 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 | 1637 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 |
1639 | #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 | 1638 | #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 |
1640 | #define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 | 1639 | #define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 |
1641 | #define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 | 1640 | #define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 |
1642 | #define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 | 1641 | #define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 |
1643 | u8 tx_timer_priority; /* bitmap */ | 1642 | u8 tx_timer_priority; /* bitmap */ |
1644 | __le16 tx_timer_value; | 1643 | __le16 tx_timer_value; |
1645 | __le16 fc_refresh_threshold; | 1644 | __le16 fc_refresh_threshold; |
1646 | u8 reserved[8]; | 1645 | u8 reserved[8]; |
1647 | }; | 1646 | }; |
1648 | 1647 | ||
1649 | I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); | 1648 | I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); |
1650 | 1649 | ||
1651 | /* Restart Auto-Negotiation (direct 0x605) */ | 1650 | /* Restart Auto-Negotiation (direct 0x605) */ |
1652 | struct i40e_aqc_set_link_restart_an { | 1651 | struct i40e_aqc_set_link_restart_an { |
1653 | u8 command; | 1652 | u8 command; |
1654 | #define I40E_AQ_PHY_RESTART_AN 0x02 | 1653 | #define I40E_AQ_PHY_RESTART_AN 0x02 |
1655 | #define I40E_AQ_PHY_LINK_ENABLE 0x04 | 1654 | #define I40E_AQ_PHY_LINK_ENABLE 0x04 |
1656 | u8 reserved[15]; | 1655 | u8 reserved[15]; |
1657 | }; | 1656 | }; |
1658 | 1657 | ||
1659 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); | 1658 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); |
1660 | 1659 | ||
1661 | /* Get Link Status cmd & response data structure (direct 0x0607) */ | 1660 | /* Get Link Status cmd & response data structure (direct 0x0607) */ |
1662 | struct i40e_aqc_get_link_status { | 1661 | struct i40e_aqc_get_link_status { |
1663 | __le16 command_flags; /* only field set on command */ | 1662 | __le16 command_flags; /* only field set on command */ |
1664 | #define I40E_AQ_LSE_MASK 0x3 | 1663 | #define I40E_AQ_LSE_MASK 0x3 |
1665 | #define I40E_AQ_LSE_NOP 0x0 | 1664 | #define I40E_AQ_LSE_NOP 0x0 |
1666 | #define I40E_AQ_LSE_DISABLE 0x2 | 1665 | #define I40E_AQ_LSE_DISABLE 0x2 |
1667 | #define I40E_AQ_LSE_ENABLE 0x3 | 1666 | #define I40E_AQ_LSE_ENABLE 0x3 |
1668 | /* only response uses this flag */ | 1667 | /* only response uses this flag */ |
1669 | #define I40E_AQ_LSE_IS_ENABLED 0x1 | 1668 | #define I40E_AQ_LSE_IS_ENABLED 0x1 |
1670 | u8 phy_type; /* i40e_aq_phy_type */ | 1669 | u8 phy_type; /* i40e_aq_phy_type */ |
1671 | u8 link_speed; /* i40e_aq_link_speed */ | 1670 | u8 link_speed; /* i40e_aq_link_speed */ |
1672 | u8 link_info; | 1671 | u8 link_info; |
1673 | #define I40E_AQ_LINK_UP 0x01 | 1672 | #define I40E_AQ_LINK_UP 0x01 |
1674 | #define I40E_AQ_LINK_FAULT 0x02 | 1673 | #define I40E_AQ_LINK_FAULT 0x02 |
1675 | #define I40E_AQ_LINK_FAULT_TX 0x04 | 1674 | #define I40E_AQ_LINK_FAULT_TX 0x04 |
1676 | #define I40E_AQ_LINK_FAULT_RX 0x08 | 1675 | #define I40E_AQ_LINK_FAULT_RX 0x08 |
1677 | #define I40E_AQ_LINK_FAULT_REMOTE 0x10 | 1676 | #define I40E_AQ_LINK_FAULT_REMOTE 0x10 |
1678 | #define I40E_AQ_MEDIA_AVAILABLE 0x40 | 1677 | #define I40E_AQ_MEDIA_AVAILABLE 0x40 |
1679 | #define I40E_AQ_SIGNAL_DETECT 0x80 | 1678 | #define I40E_AQ_SIGNAL_DETECT 0x80 |
1680 | u8 an_info; | 1679 | u8 an_info; |
1681 | #define I40E_AQ_AN_COMPLETED 0x01 | 1680 | #define I40E_AQ_AN_COMPLETED 0x01 |
1682 | #define I40E_AQ_LP_AN_ABILITY 0x02 | 1681 | #define I40E_AQ_LP_AN_ABILITY 0x02 |
1683 | #define I40E_AQ_PD_FAULT 0x04 | 1682 | #define I40E_AQ_PD_FAULT 0x04 |
1684 | #define I40E_AQ_FEC_EN 0x08 | 1683 | #define I40E_AQ_FEC_EN 0x08 |
1685 | #define I40E_AQ_PHY_LOW_POWER 0x10 | 1684 | #define I40E_AQ_PHY_LOW_POWER 0x10 |
1686 | #define I40E_AQ_LINK_PAUSE_TX 0x20 | 1685 | #define I40E_AQ_LINK_PAUSE_TX 0x20 |
1687 | #define I40E_AQ_LINK_PAUSE_RX 0x40 | 1686 | #define I40E_AQ_LINK_PAUSE_RX 0x40 |
1688 | #define I40E_AQ_QUALIFIED_MODULE 0x80 | 1687 | #define I40E_AQ_QUALIFIED_MODULE 0x80 |
1689 | u8 ext_info; | 1688 | u8 ext_info; |
1690 | #define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 | 1689 | #define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 |
1691 | #define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 | 1690 | #define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 |
1692 | #define I40E_AQ_LINK_TX_SHIFT 0x02 | 1691 | #define I40E_AQ_LINK_TX_SHIFT 0x02 |
1693 | #define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) | 1692 | #define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) |
1694 | #define I40E_AQ_LINK_TX_ACTIVE 0x00 | 1693 | #define I40E_AQ_LINK_TX_ACTIVE 0x00 |
1695 | #define I40E_AQ_LINK_TX_DRAINED 0x01 | 1694 | #define I40E_AQ_LINK_TX_DRAINED 0x01 |
1696 | #define I40E_AQ_LINK_TX_FLUSHED 0x03 | 1695 | #define I40E_AQ_LINK_TX_FLUSHED 0x03 |
1697 | #define I40E_AQ_LINK_FORCED_40G 0x10 | 1696 | #define I40E_AQ_LINK_FORCED_40G 0x10 |
1698 | u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ | 1697 | u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ |
1699 | __le16 max_frame_size; | 1698 | __le16 max_frame_size; |
1700 | u8 config; | 1699 | u8 config; |
1701 | #define I40E_AQ_CONFIG_CRC_ENA 0x04 | 1700 | #define I40E_AQ_CONFIG_CRC_ENA 0x04 |
1702 | #define I40E_AQ_CONFIG_PACING_MASK 0x78 | 1701 | #define I40E_AQ_CONFIG_PACING_MASK 0x78 |
1703 | u8 reserved[5]; | 1702 | u8 reserved[5]; |
1704 | }; | 1703 | }; |
1705 | 1704 | ||
1706 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); | 1705 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); |
1707 | 1706 | ||
1708 | /* Set event mask command (direct 0x613) */ | 1707 | /* Set event mask command (direct 0x613) */ |
1709 | struct i40e_aqc_set_phy_int_mask { | 1708 | struct i40e_aqc_set_phy_int_mask { |
1710 | u8 reserved[8]; | 1709 | u8 reserved[8]; |
1711 | __le16 event_mask; | 1710 | __le16 event_mask; |
1712 | #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 | 1711 | #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 |
1713 | #define I40E_AQ_EVENT_MEDIA_NA 0x0004 | 1712 | #define I40E_AQ_EVENT_MEDIA_NA 0x0004 |
1714 | #define I40E_AQ_EVENT_LINK_FAULT 0x0008 | 1713 | #define I40E_AQ_EVENT_LINK_FAULT 0x0008 |
1715 | #define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 | 1714 | #define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 |
1716 | #define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 | 1715 | #define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 |
1717 | #define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 | 1716 | #define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 |
1718 | #define I40E_AQ_EVENT_AN_COMPLETED 0x0080 | 1717 | #define I40E_AQ_EVENT_AN_COMPLETED 0x0080 |
1719 | #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 | 1718 | #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 |
1720 | #define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 | 1719 | #define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 |
1721 | u8 reserved1[6]; | 1720 | u8 reserved1[6]; |
1722 | }; | 1721 | }; |
1723 | 1722 | ||
1724 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); | 1723 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); |
@@ -1728,27 +1727,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); | |||
1728 | * Get Link Partner AN advt register (direct 0x0616) | 1727 | * Get Link Partner AN advt register (direct 0x0616) |
1729 | */ | 1728 | */ |
1730 | struct i40e_aqc_an_advt_reg { | 1729 | struct i40e_aqc_an_advt_reg { |
1731 | __le32 local_an_reg0; | 1730 | __le32 local_an_reg0; |
1732 | __le16 local_an_reg1; | 1731 | __le16 local_an_reg1; |
1733 | u8 reserved[10]; | 1732 | u8 reserved[10]; |
1734 | }; | 1733 | }; |
1735 | 1734 | ||
1736 | I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); | 1735 | I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); |
1737 | 1736 | ||
1738 | /* Set Loopback mode (0x0618) */ | 1737 | /* Set Loopback mode (0x0618) */ |
1739 | struct i40e_aqc_set_lb_mode { | 1738 | struct i40e_aqc_set_lb_mode { |
1740 | __le16 lb_mode; | 1739 | __le16 lb_mode; |
1741 | #define I40E_AQ_LB_PHY_LOCAL 0x01 | 1740 | #define I40E_AQ_LB_PHY_LOCAL 0x01 |
1742 | #define I40E_AQ_LB_PHY_REMOTE 0x02 | 1741 | #define I40E_AQ_LB_PHY_REMOTE 0x02 |
1743 | #define I40E_AQ_LB_MAC_LOCAL 0x04 | 1742 | #define I40E_AQ_LB_MAC_LOCAL 0x04 |
1744 | u8 reserved[14]; | 1743 | u8 reserved[14]; |
1745 | }; | 1744 | }; |
1746 | 1745 | ||
1747 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); | 1746 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); |
1748 | 1747 | ||
1749 | /* Set PHY Debug command (0x0622) */ | 1748 | /* Set PHY Debug command (0x0622) */ |
1750 | struct i40e_aqc_set_phy_debug { | 1749 | struct i40e_aqc_set_phy_debug { |
1751 | u8 command_flags; | 1750 | u8 command_flags; |
1752 | #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 | 1751 | #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 |
1753 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 | 1752 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 |
1754 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ | 1753 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ |
@@ -1757,15 +1756,15 @@ struct i40e_aqc_set_phy_debug { | |||
1757 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 | 1756 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 |
1758 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 | 1757 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 |
1759 | #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 | 1758 | #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 |
1760 | u8 reserved[15]; | 1759 | u8 reserved[15]; |
1761 | }; | 1760 | }; |
1762 | 1761 | ||
1763 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); | 1762 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); |
1764 | 1763 | ||
1765 | enum i40e_aq_phy_reg_type { | 1764 | enum i40e_aq_phy_reg_type { |
1766 | I40E_AQC_PHY_REG_INTERNAL = 0x1, | 1765 | I40E_AQC_PHY_REG_INTERNAL = 0x1, |
1767 | I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, | 1766 | I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, |
1768 | I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 | 1767 | I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 |
1769 | }; | 1768 | }; |
1770 | 1769 | ||
1771 | /* NVM Read command (indirect 0x0701) | 1770 | /* NVM Read command (indirect 0x0701) |
@@ -1773,40 +1772,40 @@ enum i40e_aq_phy_reg_type { | |||
1773 | * NVM Update commands (indirect 0x0703) | 1772 | * NVM Update commands (indirect 0x0703) |
1774 | */ | 1773 | */ |
1775 | struct i40e_aqc_nvm_update { | 1774 | struct i40e_aqc_nvm_update { |
1776 | u8 command_flags; | 1775 | u8 command_flags; |
1777 | #define I40E_AQ_NVM_LAST_CMD 0x01 | 1776 | #define I40E_AQ_NVM_LAST_CMD 0x01 |
1778 | #define I40E_AQ_NVM_FLASH_ONLY 0x80 | 1777 | #define I40E_AQ_NVM_FLASH_ONLY 0x80 |
1779 | u8 module_pointer; | 1778 | u8 module_pointer; |
1780 | __le16 length; | 1779 | __le16 length; |
1781 | __le32 offset; | 1780 | __le32 offset; |
1782 | __le32 addr_high; | 1781 | __le32 addr_high; |
1783 | __le32 addr_low; | 1782 | __le32 addr_low; |
1784 | }; | 1783 | }; |
1785 | 1784 | ||
1786 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); | 1785 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); |
1787 | 1786 | ||
1788 | /* NVM Config Read (indirect 0x0704) */ | 1787 | /* NVM Config Read (indirect 0x0704) */ |
1789 | struct i40e_aqc_nvm_config_read { | 1788 | struct i40e_aqc_nvm_config_read { |
1790 | __le16 cmd_flags; | 1789 | __le16 cmd_flags; |
1791 | #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 | 1790 | #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 |
1792 | #define ANVM_READ_SINGLE_FEATURE 0 | 1791 | #define ANVM_READ_SINGLE_FEATURE 0 |
1793 | #define ANVM_READ_MULTIPLE_FEATURES 1 | 1792 | #define ANVM_READ_MULTIPLE_FEATURES 1 |
1794 | __le16 element_count; | 1793 | __le16 element_count; |
1795 | __le16 element_id; /* Feature/field ID */ | 1794 | __le16 element_id; /* Feature/field ID */ |
1796 | u8 reserved[2]; | 1795 | u8 reserved[2]; |
1797 | __le32 address_high; | 1796 | __le32 address_high; |
1798 | __le32 address_low; | 1797 | __le32 address_low; |
1799 | }; | 1798 | }; |
1800 | 1799 | ||
1801 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); | 1800 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); |
1802 | 1801 | ||
1803 | /* NVM Config Write (indirect 0x0705) */ | 1802 | /* NVM Config Write (indirect 0x0705) */ |
1804 | struct i40e_aqc_nvm_config_write { | 1803 | struct i40e_aqc_nvm_config_write { |
1805 | __le16 cmd_flags; | 1804 | __le16 cmd_flags; |
1806 | __le16 element_count; | 1805 | __le16 element_count; |
1807 | u8 reserved[4]; | 1806 | u8 reserved[4]; |
1808 | __le32 address_high; | 1807 | __le32 address_high; |
1809 | __le32 address_low; | 1808 | __le32 address_low; |
1810 | }; | 1809 | }; |
1811 | 1810 | ||
1812 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); | 1811 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); |
@@ -1831,10 +1830,10 @@ struct i40e_aqc_nvm_config_data_immediate_field { | |||
1831 | * Send to Peer PF command (indirect 0x0803) | 1830 | * Send to Peer PF command (indirect 0x0803) |
1832 | */ | 1831 | */ |
1833 | struct i40e_aqc_pf_vf_message { | 1832 | struct i40e_aqc_pf_vf_message { |
1834 | __le32 id; | 1833 | __le32 id; |
1835 | u8 reserved[4]; | 1834 | u8 reserved[4]; |
1836 | __le32 addr_high; | 1835 | __le32 addr_high; |
1837 | __le32 addr_low; | 1836 | __le32 addr_low; |
1838 | }; | 1837 | }; |
1839 | 1838 | ||
1840 | I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); | 1839 | I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); |
@@ -1870,22 +1869,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); | |||
1870 | * uses i40e_aq_desc | 1869 | * uses i40e_aq_desc |
1871 | */ | 1870 | */ |
1872 | struct i40e_aqc_alternate_write_done { | 1871 | struct i40e_aqc_alternate_write_done { |
1873 | __le16 cmd_flags; | 1872 | __le16 cmd_flags; |
1874 | #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 | 1873 | #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 |
1875 | #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 | 1874 | #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 |
1876 | #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 | 1875 | #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 |
1877 | #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 | 1876 | #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 |
1878 | u8 reserved[14]; | 1877 | u8 reserved[14]; |
1879 | }; | 1878 | }; |
1880 | 1879 | ||
1881 | I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); | 1880 | I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); |
1882 | 1881 | ||
1883 | /* Set OEM mode (direct 0x0905) */ | 1882 | /* Set OEM mode (direct 0x0905) */ |
1884 | struct i40e_aqc_alternate_set_mode { | 1883 | struct i40e_aqc_alternate_set_mode { |
1885 | __le32 mode; | 1884 | __le32 mode; |
1886 | #define I40E_AQ_ALTERNATE_MODE_NONE 0 | 1885 | #define I40E_AQ_ALTERNATE_MODE_NONE 0 |
1887 | #define I40E_AQ_ALTERNATE_MODE_OEM 1 | 1886 | #define I40E_AQ_ALTERNATE_MODE_OEM 1 |
1888 | u8 reserved[12]; | 1887 | u8 reserved[12]; |
1889 | }; | 1888 | }; |
1890 | 1889 | ||
1891 | I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); | 1890 | I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); |
@@ -1896,33 +1895,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); | |||
1896 | 1895 | ||
1897 | /* Lan Queue Overflow Event (direct, 0x1001) */ | 1896 | /* Lan Queue Overflow Event (direct, 0x1001) */ |
1898 | struct i40e_aqc_lan_overflow { | 1897 | struct i40e_aqc_lan_overflow { |
1899 | __le32 prtdcb_rupto; | 1898 | __le32 prtdcb_rupto; |
1900 | __le32 otx_ctl; | 1899 | __le32 otx_ctl; |
1901 | u8 reserved[8]; | 1900 | u8 reserved[8]; |
1902 | }; | 1901 | }; |
1903 | 1902 | ||
1904 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); | 1903 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); |
1905 | 1904 | ||
1906 | /* Get LLDP MIB (indirect 0x0A00) */ | 1905 | /* Get LLDP MIB (indirect 0x0A00) */ |
1907 | struct i40e_aqc_lldp_get_mib { | 1906 | struct i40e_aqc_lldp_get_mib { |
1908 | u8 type; | 1907 | u8 type; |
1909 | u8 reserved1; | 1908 | u8 reserved1; |
1910 | #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 | 1909 | #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 |
1911 | #define I40E_AQ_LLDP_MIB_LOCAL 0x0 | 1910 | #define I40E_AQ_LLDP_MIB_LOCAL 0x0 |
1912 | #define I40E_AQ_LLDP_MIB_REMOTE 0x1 | 1911 | #define I40E_AQ_LLDP_MIB_REMOTE 0x1 |
1913 | #define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 | 1912 | #define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 |
1914 | #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC | 1913 | #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC |
1915 | #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 | 1914 | #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 |
1916 | #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 | 1915 | #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 |
1917 | #define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 | 1916 | #define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 |
1918 | #define I40E_AQ_LLDP_TX_SHIFT 0x4 | 1917 | #define I40E_AQ_LLDP_TX_SHIFT 0x4 |
1919 | #define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) | 1918 | #define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) |
1920 | /* TX pause flags use I40E_AQ_LINK_TX_* above */ | 1919 | /* TX pause flags use I40E_AQ_LINK_TX_* above */ |
1921 | __le16 local_len; | 1920 | __le16 local_len; |
1922 | __le16 remote_len; | 1921 | __le16 remote_len; |
1923 | u8 reserved2[2]; | 1922 | u8 reserved2[2]; |
1924 | __le32 addr_high; | 1923 | __le32 addr_high; |
1925 | __le32 addr_low; | 1924 | __le32 addr_low; |
1926 | }; | 1925 | }; |
1927 | 1926 | ||
1928 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); | 1927 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); |
@@ -1931,12 +1930,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); | |||
1931 | * also used for the event (with type in the command field) | 1930 | * also used for the event (with type in the command field) |
1932 | */ | 1931 | */ |
1933 | struct i40e_aqc_lldp_update_mib { | 1932 | struct i40e_aqc_lldp_update_mib { |
1934 | u8 command; | 1933 | u8 command; |
1935 | #define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 | 1934 | #define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 |
1936 | #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 | 1935 | #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 |
1937 | u8 reserved[7]; | 1936 | u8 reserved[7]; |
1938 | __le32 addr_high; | 1937 | __le32 addr_high; |
1939 | __le32 addr_low; | 1938 | __le32 addr_low; |
1940 | }; | 1939 | }; |
1941 | 1940 | ||
1942 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); | 1941 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); |
@@ -1945,35 +1944,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); | |||
1945 | * Delete LLDP TLV (indirect 0x0A04) | 1944 | * Delete LLDP TLV (indirect 0x0A04) |
1946 | */ | 1945 | */ |
1947 | struct i40e_aqc_lldp_add_tlv { | 1946 | struct i40e_aqc_lldp_add_tlv { |
1948 | u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ | 1947 | u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ |
1949 | u8 reserved1[1]; | 1948 | u8 reserved1[1]; |
1950 | __le16 len; | 1949 | __le16 len; |
1951 | u8 reserved2[4]; | 1950 | u8 reserved2[4]; |
1952 | __le32 addr_high; | 1951 | __le32 addr_high; |
1953 | __le32 addr_low; | 1952 | __le32 addr_low; |
1954 | }; | 1953 | }; |
1955 | 1954 | ||
1956 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); | 1955 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); |
1957 | 1956 | ||
1958 | /* Update LLDP TLV (indirect 0x0A03) */ | 1957 | /* Update LLDP TLV (indirect 0x0A03) */ |
1959 | struct i40e_aqc_lldp_update_tlv { | 1958 | struct i40e_aqc_lldp_update_tlv { |
1960 | u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ | 1959 | u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ |
1961 | u8 reserved; | 1960 | u8 reserved; |
1962 | __le16 old_len; | 1961 | __le16 old_len; |
1963 | __le16 new_offset; | 1962 | __le16 new_offset; |
1964 | __le16 new_len; | 1963 | __le16 new_len; |
1965 | __le32 addr_high; | 1964 | __le32 addr_high; |
1966 | __le32 addr_low; | 1965 | __le32 addr_low; |
1967 | }; | 1966 | }; |
1968 | 1967 | ||
1969 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); | 1968 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); |
1970 | 1969 | ||
1971 | /* Stop LLDP (direct 0x0A05) */ | 1970 | /* Stop LLDP (direct 0x0A05) */ |
1972 | struct i40e_aqc_lldp_stop { | 1971 | struct i40e_aqc_lldp_stop { |
1973 | u8 command; | 1972 | u8 command; |
1974 | #define I40E_AQ_LLDP_AGENT_STOP 0x0 | 1973 | #define I40E_AQ_LLDP_AGENT_STOP 0x0 |
1975 | #define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 | 1974 | #define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 |
1976 | u8 reserved[15]; | 1975 | u8 reserved[15]; |
1977 | }; | 1976 | }; |
1978 | 1977 | ||
1979 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); | 1978 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); |
@@ -1981,9 +1980,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); | |||
1981 | /* Start LLDP (direct 0x0A06) */ | 1980 | /* Start LLDP (direct 0x0A06) */ |
1982 | 1981 | ||
1983 | struct i40e_aqc_lldp_start { | 1982 | struct i40e_aqc_lldp_start { |
1984 | u8 command; | 1983 | u8 command; |
1985 | #define I40E_AQ_LLDP_AGENT_START 0x1 | 1984 | #define I40E_AQ_LLDP_AGENT_START 0x1 |
1986 | u8 reserved[15]; | 1985 | u8 reserved[15]; |
1987 | }; | 1986 | }; |
1988 | 1987 | ||
1989 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); | 1988 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); |
@@ -1994,44 +1993,44 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); | |||
1994 | 1993 | ||
1995 | /* Add Udp Tunnel command and completion (direct 0x0B00) */ | 1994 | /* Add Udp Tunnel command and completion (direct 0x0B00) */ |
1996 | struct i40e_aqc_add_udp_tunnel { | 1995 | struct i40e_aqc_add_udp_tunnel { |
1997 | __le16 udp_port; | 1996 | __le16 udp_port; |
1998 | u8 reserved0[3]; | 1997 | u8 reserved0[3]; |
1999 | u8 protocol_type; | 1998 | u8 protocol_type; |
2000 | #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 | 1999 | #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 |
2001 | #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 | 2000 | #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 |
2002 | #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 | 2001 | #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 |
2003 | u8 reserved1[10]; | 2002 | u8 reserved1[10]; |
2004 | }; | 2003 | }; |
2005 | 2004 | ||
2006 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); | 2005 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); |
2007 | 2006 | ||
2008 | struct i40e_aqc_add_udp_tunnel_completion { | 2007 | struct i40e_aqc_add_udp_tunnel_completion { |
2009 | __le16 udp_port; | 2008 | __le16 udp_port; |
2010 | u8 filter_entry_index; | 2009 | u8 filter_entry_index; |
2011 | u8 multiple_pfs; | 2010 | u8 multiple_pfs; |
2012 | #define I40E_AQC_SINGLE_PF 0x0 | 2011 | #define I40E_AQC_SINGLE_PF 0x0 |
2013 | #define I40E_AQC_MULTIPLE_PFS 0x1 | 2012 | #define I40E_AQC_MULTIPLE_PFS 0x1 |
2014 | u8 total_filters; | 2013 | u8 total_filters; |
2015 | u8 reserved[11]; | 2014 | u8 reserved[11]; |
2016 | }; | 2015 | }; |
2017 | 2016 | ||
2018 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); | 2017 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); |
2019 | 2018 | ||
2020 | /* remove UDP Tunnel command (0x0B01) */ | 2019 | /* remove UDP Tunnel command (0x0B01) */ |
2021 | struct i40e_aqc_remove_udp_tunnel { | 2020 | struct i40e_aqc_remove_udp_tunnel { |
2022 | u8 reserved[2]; | 2021 | u8 reserved[2]; |
2023 | u8 index; /* 0 to 15 */ | 2022 | u8 index; /* 0 to 15 */ |
2024 | u8 reserved2[13]; | 2023 | u8 reserved2[13]; |
2025 | }; | 2024 | }; |
2026 | 2025 | ||
2027 | I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); | 2026 | I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); |
2028 | 2027 | ||
2029 | struct i40e_aqc_del_udp_tunnel_completion { | 2028 | struct i40e_aqc_del_udp_tunnel_completion { |
2030 | __le16 udp_port; | 2029 | __le16 udp_port; |
2031 | u8 index; /* 0 to 15 */ | 2030 | u8 index; /* 0 to 15 */ |
2032 | u8 multiple_pfs; | 2031 | u8 multiple_pfs; |
2033 | u8 total_filters_used; | 2032 | u8 total_filters_used; |
2034 | u8 reserved1[11]; | 2033 | u8 reserved1[11]; |
2035 | }; | 2034 | }; |
2036 | 2035 | ||
2037 | I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); | 2036 | I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); |
@@ -2044,11 +2043,11 @@ struct i40e_aqc_tunnel_key_structure { | |||
2044 | u8 key1_len; /* 0 to 15 */ | 2043 | u8 key1_len; /* 0 to 15 */ |
2045 | u8 key2_len; /* 0 to 15 */ | 2044 | u8 key2_len; /* 0 to 15 */ |
2046 | u8 flags; | 2045 | u8 flags; |
2047 | #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 | 2046 | #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 |
2048 | /* response flags */ | 2047 | /* response flags */ |
2049 | #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 | 2048 | #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 |
2050 | #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 | 2049 | #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 |
2051 | #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 | 2050 | #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 |
2052 | u8 network_key_index; | 2051 | u8 network_key_index; |
2053 | #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 | 2052 | #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 |
2054 | #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 | 2053 | #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 |
@@ -2061,21 +2060,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); | |||
2061 | 2060 | ||
2062 | /* OEM mode commands (direct 0xFE0x) */ | 2061 | /* OEM mode commands (direct 0xFE0x) */ |
2063 | struct i40e_aqc_oem_param_change { | 2062 | struct i40e_aqc_oem_param_change { |
2064 | __le32 param_type; | 2063 | __le32 param_type; |
2065 | #define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 | 2064 | #define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 |
2066 | #define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 | 2065 | #define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 |
2067 | #define I40E_AQ_OEM_PARAM_MAC 2 | 2066 | #define I40E_AQ_OEM_PARAM_MAC 2 |
2068 | __le32 param_value1; | 2067 | __le32 param_value1; |
2069 | u8 param_value2[8]; | 2068 | u8 param_value2[8]; |
2070 | }; | 2069 | }; |
2071 | 2070 | ||
2072 | I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); | 2071 | I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); |
2073 | 2072 | ||
2074 | struct i40e_aqc_oem_state_change { | 2073 | struct i40e_aqc_oem_state_change { |
2075 | __le32 state; | 2074 | __le32 state; |
2076 | #define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 | 2075 | #define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 |
2077 | #define I40E_AQ_OEM_STATE_LINK_UP 0x1 | 2076 | #define I40E_AQ_OEM_STATE_LINK_UP 0x1 |
2078 | u8 reserved[12]; | 2077 | u8 reserved[12]; |
2079 | }; | 2078 | }; |
2080 | 2079 | ||
2081 | I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); | 2080 | I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); |
@@ -2087,18 +2086,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); | |||
2087 | /* set test more (0xFF01, internal) */ | 2086 | /* set test more (0xFF01, internal) */ |
2088 | 2087 | ||
2089 | struct i40e_acq_set_test_mode { | 2088 | struct i40e_acq_set_test_mode { |
2090 | u8 mode; | 2089 | u8 mode; |
2091 | #define I40E_AQ_TEST_PARTIAL 0 | 2090 | #define I40E_AQ_TEST_PARTIAL 0 |
2092 | #define I40E_AQ_TEST_FULL 1 | 2091 | #define I40E_AQ_TEST_FULL 1 |
2093 | #define I40E_AQ_TEST_NVM 2 | 2092 | #define I40E_AQ_TEST_NVM 2 |
2094 | u8 reserved[3]; | 2093 | u8 reserved[3]; |
2095 | u8 command; | 2094 | u8 command; |
2096 | #define I40E_AQ_TEST_OPEN 0 | 2095 | #define I40E_AQ_TEST_OPEN 0 |
2097 | #define I40E_AQ_TEST_CLOSE 1 | 2096 | #define I40E_AQ_TEST_CLOSE 1 |
2098 | #define I40E_AQ_TEST_INC 2 | 2097 | #define I40E_AQ_TEST_INC 2 |
2099 | u8 reserved2[3]; | 2098 | u8 reserved2[3]; |
2100 | __le32 address_high; | 2099 | __le32 address_high; |
2101 | __le32 address_low; | 2100 | __le32 address_low; |
2102 | }; | 2101 | }; |
2103 | 2102 | ||
2104 | I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); | 2103 | I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); |
@@ -2151,21 +2150,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); | |||
2151 | #define I40E_AQ_CLUSTER_ID_ALTRAM 11 | 2150 | #define I40E_AQ_CLUSTER_ID_ALTRAM 11 |
2152 | 2151 | ||
2153 | struct i40e_aqc_debug_dump_internals { | 2152 | struct i40e_aqc_debug_dump_internals { |
2154 | u8 cluster_id; | 2153 | u8 cluster_id; |
2155 | u8 table_id; | 2154 | u8 table_id; |
2156 | __le16 data_size; | 2155 | __le16 data_size; |
2157 | __le32 idx; | 2156 | __le32 idx; |
2158 | __le32 address_high; | 2157 | __le32 address_high; |
2159 | __le32 address_low; | 2158 | __le32 address_low; |
2160 | }; | 2159 | }; |
2161 | 2160 | ||
2162 | I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); | 2161 | I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); |
2163 | 2162 | ||
2164 | struct i40e_aqc_debug_modify_internals { | 2163 | struct i40e_aqc_debug_modify_internals { |
2165 | u8 cluster_id; | 2164 | u8 cluster_id; |
2166 | u8 cluster_specific_params[7]; | 2165 | u8 cluster_specific_params[7]; |
2167 | __le32 address_high; | 2166 | __le32 address_high; |
2168 | __le32 address_low; | 2167 | __le32 address_low; |
2169 | }; | 2168 | }; |
2170 | 2169 | ||
2171 | I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); | 2170 | I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 30056b25d94e..c49416cfe616 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c | |||
@@ -50,6 +50,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) | |||
50 | case I40E_DEV_ID_QSFP_A: | 50 | case I40E_DEV_ID_QSFP_A: |
51 | case I40E_DEV_ID_QSFP_B: | 51 | case I40E_DEV_ID_QSFP_B: |
52 | case I40E_DEV_ID_QSFP_C: | 52 | case I40E_DEV_ID_QSFP_C: |
53 | case I40E_DEV_ID_10G_BASE_T: | ||
53 | hw->mac.type = I40E_MAC_XL710; | 54 | hw->mac.type = I40E_MAC_XL710; |
54 | break; | 55 | break; |
55 | case I40E_DEV_ID_VF: | 56 | case I40E_DEV_ID_VF: |
@@ -1420,6 +1421,33 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) | |||
1420 | } | 1421 | } |
1421 | 1422 | ||
1422 | /** | 1423 | /** |
1424 | * i40e_aq_set_phy_int_mask | ||
1425 | * @hw: pointer to the hw struct | ||
1426 | * @mask: interrupt mask to be set | ||
1427 | * @cmd_details: pointer to command details structure or NULL | ||
1428 | * | ||
1429 | * Set link interrupt mask. | ||
1430 | **/ | ||
1431 | i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, | ||
1432 | u16 mask, | ||
1433 | struct i40e_asq_cmd_details *cmd_details) | ||
1434 | { | ||
1435 | struct i40e_aq_desc desc; | ||
1436 | struct i40e_aqc_set_phy_int_mask *cmd = | ||
1437 | (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; | ||
1438 | i40e_status status; | ||
1439 | |||
1440 | i40e_fill_default_direct_cmd_desc(&desc, | ||
1441 | i40e_aqc_opc_set_phy_int_mask); | ||
1442 | |||
1443 | cmd->event_mask = cpu_to_le16(mask); | ||
1444 | |||
1445 | status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); | ||
1446 | |||
1447 | return status; | ||
1448 | } | ||
1449 | |||
1450 | /** | ||
1423 | * i40e_aq_add_vsi | 1451 | * i40e_aq_add_vsi |
1424 | * @hw: pointer to the hw struct | 1452 | * @hw: pointer to the hw struct |
1425 | * @vsi_ctx: pointer to a vsi context struct | 1453 | * @vsi_ctx: pointer to a vsi context struct |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 1dda467ae1ac..12adc08c54dc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -264,6 +264,14 @@ static int i40e_get_settings(struct net_device *netdev, | |||
264 | ecmd->supported = SUPPORTED_10000baseKR_Full; | 264 | ecmd->supported = SUPPORTED_10000baseKR_Full; |
265 | ecmd->advertising = ADVERTISED_10000baseKR_Full; | 265 | ecmd->advertising = ADVERTISED_10000baseKR_Full; |
266 | break; | 266 | break; |
267 | case I40E_DEV_ID_10G_BASE_T: | ||
268 | ecmd->supported = SUPPORTED_10000baseT_Full | | ||
269 | SUPPORTED_1000baseT_Full | | ||
270 | SUPPORTED_100baseT_Full; | ||
271 | ecmd->advertising = ADVERTISED_10000baseT_Full | | ||
272 | ADVERTISED_1000baseT_Full | | ||
273 | ADVERTISED_100baseT_Full; | ||
274 | break; | ||
267 | default: | 275 | default: |
268 | /* all the rest are 10G/1G */ | 276 | /* all the rest are 10G/1G */ |
269 | ecmd->supported = SUPPORTED_10000baseT_Full | | 277 | ecmd->supported = SUPPORTED_10000baseT_Full | |
@@ -322,9 +330,13 @@ static int i40e_get_settings(struct net_device *netdev, | |||
322 | case I40E_PHY_TYPE_10GBASE_CR1: | 330 | case I40E_PHY_TYPE_10GBASE_CR1: |
323 | case I40E_PHY_TYPE_10GBASE_T: | 331 | case I40E_PHY_TYPE_10GBASE_T: |
324 | ecmd->supported = SUPPORTED_Autoneg | | 332 | ecmd->supported = SUPPORTED_Autoneg | |
325 | SUPPORTED_10000baseT_Full; | 333 | SUPPORTED_10000baseT_Full | |
334 | SUPPORTED_1000baseT_Full | | ||
335 | SUPPORTED_100baseT_Full; | ||
326 | ecmd->advertising = ADVERTISED_Autoneg | | 336 | ecmd->advertising = ADVERTISED_Autoneg | |
327 | ADVERTISED_10000baseT_Full; | 337 | ADVERTISED_10000baseT_Full | |
338 | ADVERTISED_1000baseT_Full | | ||
339 | ADVERTISED_100baseT_Full; | ||
328 | break; | 340 | break; |
329 | case I40E_PHY_TYPE_XAUI: | 341 | case I40E_PHY_TYPE_XAUI: |
330 | case I40E_PHY_TYPE_XFI: | 342 | case I40E_PHY_TYPE_XFI: |
@@ -335,14 +347,22 @@ static int i40e_get_settings(struct net_device *netdev, | |||
335 | case I40E_PHY_TYPE_1000BASE_KX: | 347 | case I40E_PHY_TYPE_1000BASE_KX: |
336 | case I40E_PHY_TYPE_1000BASE_T: | 348 | case I40E_PHY_TYPE_1000BASE_T: |
337 | ecmd->supported = SUPPORTED_Autoneg | | 349 | ecmd->supported = SUPPORTED_Autoneg | |
338 | SUPPORTED_1000baseT_Full; | 350 | SUPPORTED_10000baseT_Full | |
351 | SUPPORTED_1000baseT_Full | | ||
352 | SUPPORTED_100baseT_Full; | ||
339 | ecmd->advertising = ADVERTISED_Autoneg | | 353 | ecmd->advertising = ADVERTISED_Autoneg | |
340 | ADVERTISED_1000baseT_Full; | 354 | ADVERTISED_10000baseT_Full | |
355 | ADVERTISED_1000baseT_Full | | ||
356 | ADVERTISED_100baseT_Full; | ||
341 | break; | 357 | break; |
342 | case I40E_PHY_TYPE_100BASE_TX: | 358 | case I40E_PHY_TYPE_100BASE_TX: |
343 | ecmd->supported = SUPPORTED_Autoneg | | 359 | ecmd->supported = SUPPORTED_Autoneg | |
360 | SUPPORTED_10000baseT_Full | | ||
361 | SUPPORTED_1000baseT_Full | | ||
344 | SUPPORTED_100baseT_Full; | 362 | SUPPORTED_100baseT_Full; |
345 | ecmd->advertising = ADVERTISED_Autoneg | | 363 | ecmd->advertising = ADVERTISED_Autoneg | |
364 | ADVERTISED_10000baseT_Full | | ||
365 | ADVERTISED_1000baseT_Full | | ||
346 | ADVERTISED_100baseT_Full; | 366 | ADVERTISED_100baseT_Full; |
347 | break; | 367 | break; |
348 | case I40E_PHY_TYPE_SGMII: | 368 | case I40E_PHY_TYPE_SGMII: |
@@ -426,6 +446,9 @@ no_valid_phy_type: | |||
426 | case I40E_LINK_SPEED_1GB: | 446 | case I40E_LINK_SPEED_1GB: |
427 | ethtool_cmd_speed_set(ecmd, SPEED_1000); | 447 | ethtool_cmd_speed_set(ecmd, SPEED_1000); |
428 | break; | 448 | break; |
449 | case I40E_LINK_SPEED_100MB: | ||
450 | ethtool_cmd_speed_set(ecmd, SPEED_100); | ||
451 | break; | ||
429 | default: | 452 | default: |
430 | break; | 453 | break; |
431 | } | 454 | } |
@@ -528,7 +551,7 @@ static int i40e_set_settings(struct net_device *netdev, | |||
528 | } | 551 | } |
529 | /* If autoneg is currently enabled */ | 552 | /* If autoneg is currently enabled */ |
530 | if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { | 553 | if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { |
531 | config.abilities = abilities.abilities | | 554 | config.abilities = abilities.abilities & |
532 | ~I40E_AQ_PHY_ENABLE_AN; | 555 | ~I40E_AQ_PHY_ENABLE_AN; |
533 | change = true; | 556 | change = true; |
534 | } | 557 | } |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c3a7f4a4b775..834c9ffc6267 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] = | |||
39 | 39 | ||
40 | #define DRV_VERSION_MAJOR 1 | 40 | #define DRV_VERSION_MAJOR 1 |
41 | #define DRV_VERSION_MINOR 0 | 41 | #define DRV_VERSION_MINOR 0 |
42 | #define DRV_VERSION_BUILD 11 | 42 | #define DRV_VERSION_BUILD 21 |
43 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ | 43 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ |
44 | __stringify(DRV_VERSION_MINOR) "." \ | 44 | __stringify(DRV_VERSION_MINOR) "." \ |
45 | __stringify(DRV_VERSION_BUILD) DRV_KERN | 45 | __stringify(DRV_VERSION_BUILD) DRV_KERN |
@@ -74,6 +74,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { | |||
74 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, | 74 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, |
75 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, | 75 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, |
76 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, | 76 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, |
77 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, | ||
77 | /* required last entry */ | 78 | /* required last entry */ |
78 | {0, } | 79 | {0, } |
79 | }; | 80 | }; |
@@ -812,7 +813,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) | |||
812 | struct i40e_eth_stats *oes; | 813 | struct i40e_eth_stats *oes; |
813 | struct i40e_eth_stats *es; /* device's eth stats */ | 814 | struct i40e_eth_stats *es; /* device's eth stats */ |
814 | u32 tx_restart, tx_busy; | 815 | u32 tx_restart, tx_busy; |
816 | struct i40e_ring *p; | ||
815 | u32 rx_page, rx_buf; | 817 | u32 rx_page, rx_buf; |
818 | u64 bytes, packets; | ||
819 | unsigned int start; | ||
816 | u64 rx_p, rx_b; | 820 | u64 rx_p, rx_b; |
817 | u64 tx_p, tx_b; | 821 | u64 tx_p, tx_b; |
818 | u16 q; | 822 | u16 q; |
@@ -836,10 +840,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) | |||
836 | rx_buf = 0; | 840 | rx_buf = 0; |
837 | rcu_read_lock(); | 841 | rcu_read_lock(); |
838 | for (q = 0; q < vsi->num_queue_pairs; q++) { | 842 | for (q = 0; q < vsi->num_queue_pairs; q++) { |
839 | struct i40e_ring *p; | ||
840 | u64 bytes, packets; | ||
841 | unsigned int start; | ||
842 | |||
843 | /* locate Tx ring */ | 843 | /* locate Tx ring */ |
844 | p = ACCESS_ONCE(vsi->tx_rings[q]); | 844 | p = ACCESS_ONCE(vsi->tx_rings[q]); |
845 | 845 | ||
@@ -3440,7 +3440,7 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) | |||
3440 | if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) | 3440 | if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) |
3441 | break; | 3441 | break; |
3442 | 3442 | ||
3443 | udelay(10); | 3443 | usleep_range(10, 20); |
3444 | } | 3444 | } |
3445 | if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) | 3445 | if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) |
3446 | return -ETIMEDOUT; | 3446 | return -ETIMEDOUT; |
@@ -3466,7 +3466,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) | |||
3466 | /* warn the TX unit of coming changes */ | 3466 | /* warn the TX unit of coming changes */ |
3467 | i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); | 3467 | i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); |
3468 | if (!enable) | 3468 | if (!enable) |
3469 | udelay(10); | 3469 | usleep_range(10, 20); |
3470 | 3470 | ||
3471 | for (j = 0; j < 50; j++) { | 3471 | for (j = 0; j < 50; j++) { |
3472 | tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); | 3472 | tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); |
@@ -3526,7 +3526,7 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) | |||
3526 | if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) | 3526 | if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) |
3527 | break; | 3527 | break; |
3528 | 3528 | ||
3529 | udelay(10); | 3529 | usleep_range(10, 20); |
3530 | } | 3530 | } |
3531 | if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) | 3531 | if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) |
3532 | return -ETIMEDOUT; | 3532 | return -ETIMEDOUT; |
@@ -4449,6 +4449,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) | |||
4449 | case I40E_LINK_SPEED_1GB: | 4449 | case I40E_LINK_SPEED_1GB: |
4450 | strlcpy(speed, "1000 Mbps", SPEED_SIZE); | 4450 | strlcpy(speed, "1000 Mbps", SPEED_SIZE); |
4451 | break; | 4451 | break; |
4452 | case I40E_LINK_SPEED_100MB: | ||
4453 | strncpy(speed, "100 Mbps", SPEED_SIZE); | ||
4454 | break; | ||
4452 | default: | 4455 | default: |
4453 | break; | 4456 | break; |
4454 | } | 4457 | } |
@@ -4479,12 +4482,8 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) | |||
4479 | static int i40e_up_complete(struct i40e_vsi *vsi) | 4482 | static int i40e_up_complete(struct i40e_vsi *vsi) |
4480 | { | 4483 | { |
4481 | struct i40e_pf *pf = vsi->back; | 4484 | struct i40e_pf *pf = vsi->back; |
4482 | u8 set_fc_aq_fail = 0; | ||
4483 | int err; | 4485 | int err; |
4484 | 4486 | ||
4485 | /* force flow control off */ | ||
4486 | i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); | ||
4487 | |||
4488 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | 4487 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
4489 | i40e_vsi_configure_msix(vsi); | 4488 | i40e_vsi_configure_msix(vsi); |
4490 | else | 4489 | else |
@@ -5354,10 +5353,14 @@ static void i40e_link_event(struct i40e_pf *pf) | |||
5354 | { | 5353 | { |
5355 | bool new_link, old_link; | 5354 | bool new_link, old_link; |
5356 | 5355 | ||
5357 | new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); | 5356 | /* set this to force the get_link_status call to refresh state */ |
5357 | pf->hw.phy.get_link_info = true; | ||
5358 | |||
5358 | old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); | 5359 | old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); |
5360 | new_link = i40e_get_link_status(&pf->hw); | ||
5359 | 5361 | ||
5360 | if (new_link == old_link) | 5362 | if (new_link == old_link && |
5363 | new_link == netif_carrier_ok(pf->vsi[pf->lan_vsi]->netdev)) | ||
5361 | return; | 5364 | return; |
5362 | if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) | 5365 | if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) |
5363 | i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link); | 5366 | i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link); |
@@ -5525,33 +5528,20 @@ static void i40e_handle_link_event(struct i40e_pf *pf, | |||
5525 | memcpy(&pf->hw.phy.link_info_old, hw_link_info, | 5528 | memcpy(&pf->hw.phy.link_info_old, hw_link_info, |
5526 | sizeof(pf->hw.phy.link_info_old)); | 5529 | sizeof(pf->hw.phy.link_info_old)); |
5527 | 5530 | ||
5531 | /* Do a new status request to re-enable LSE reporting | ||
5532 | * and load new status information into the hw struct | ||
5533 | * This completely ignores any state information | ||
5534 | * in the ARQ event info, instead choosing to always | ||
5535 | * issue the AQ update link status command. | ||
5536 | */ | ||
5537 | i40e_link_event(pf); | ||
5538 | |||
5528 | /* check for unqualified module, if link is down */ | 5539 | /* check for unqualified module, if link is down */ |
5529 | if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && | 5540 | if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && |
5530 | (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && | 5541 | (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && |
5531 | (!(status->link_info & I40E_AQ_LINK_UP))) | 5542 | (!(status->link_info & I40E_AQ_LINK_UP))) |
5532 | dev_err(&pf->pdev->dev, | 5543 | dev_err(&pf->pdev->dev, |
5533 | "The driver failed to link because an unqualified module was detected.\n"); | 5544 | "The driver failed to link because an unqualified module was detected.\n"); |
5534 | |||
5535 | /* update link status */ | ||
5536 | hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; | ||
5537 | hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; | ||
5538 | hw_link_info->link_info = status->link_info; | ||
5539 | hw_link_info->an_info = status->an_info; | ||
5540 | hw_link_info->ext_info = status->ext_info; | ||
5541 | hw_link_info->lse_enable = | ||
5542 | le16_to_cpu(status->command_flags) & | ||
5543 | I40E_AQ_LSE_ENABLE; | ||
5544 | |||
5545 | /* process the event */ | ||
5546 | i40e_link_event(pf); | ||
5547 | |||
5548 | /* Do a new status request to re-enable LSE reporting | ||
5549 | * and load new status information into the hw struct, | ||
5550 | * then see if the status changed while processing the | ||
5551 | * initial event. | ||
5552 | */ | ||
5553 | i40e_update_link_info(&pf->hw, true); | ||
5554 | i40e_link_event(pf); | ||
5555 | } | 5545 | } |
5556 | 5546 | ||
5557 | /** | 5547 | /** |
@@ -5967,6 +5957,7 @@ static void i40e_send_version(struct i40e_pf *pf) | |||
5967 | static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) | 5957 | static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) |
5968 | { | 5958 | { |
5969 | struct i40e_hw *hw = &pf->hw; | 5959 | struct i40e_hw *hw = &pf->hw; |
5960 | u8 set_fc_aq_fail = 0; | ||
5970 | i40e_status ret; | 5961 | i40e_status ret; |
5971 | u32 v; | 5962 | u32 v; |
5972 | 5963 | ||
@@ -6038,6 +6029,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) | |||
6038 | if (ret) | 6029 | if (ret) |
6039 | goto end_core_reset; | 6030 | goto end_core_reset; |
6040 | 6031 | ||
6032 | /* driver is only interested in link up/down and module qualification | ||
6033 | * reports from firmware | ||
6034 | */ | ||
6035 | ret = i40e_aq_set_phy_int_mask(&pf->hw, | ||
6036 | I40E_AQ_EVENT_LINK_UPDOWN | | ||
6037 | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); | ||
6038 | if (ret) | ||
6039 | dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); | ||
6040 | |||
6041 | /* make sure our flow control settings are restored */ | ||
6042 | ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); | ||
6043 | if (ret) | ||
6044 | dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); | ||
6045 | |||
6041 | /* Rebuild the VSIs and VEBs that existed before reset. | 6046 | /* Rebuild the VSIs and VEBs that existed before reset. |
6042 | * They are still in our local switch element arrays, so only | 6047 | * They are still in our local switch element arrays, so only |
6043 | * need to rebuild the switch model in the HW. | 6048 | * need to rebuild the switch model in the HW. |
@@ -6092,6 +6097,13 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) | |||
6092 | } | 6097 | } |
6093 | } | 6098 | } |
6094 | 6099 | ||
6100 | msleep(75); | ||
6101 | ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); | ||
6102 | if (ret) { | ||
6103 | dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", | ||
6104 | pf->hw.aq.asq_last_status); | ||
6105 | } | ||
6106 | |||
6095 | /* reinit the misc interrupt */ | 6107 | /* reinit the misc interrupt */ |
6096 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | 6108 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
6097 | ret = i40e_setup_misc_vector(pf); | 6109 | ret = i40e_setup_misc_vector(pf); |
@@ -6305,6 +6317,8 @@ static void i40e_service_task(struct work_struct *work) | |||
6305 | #endif | 6317 | #endif |
6306 | i40e_clean_adminq_subtask(pf); | 6318 | i40e_clean_adminq_subtask(pf); |
6307 | 6319 | ||
6320 | i40e_link_event(pf); | ||
6321 | |||
6308 | i40e_service_event_complete(pf); | 6322 | i40e_service_event_complete(pf); |
6309 | 6323 | ||
6310 | /* If the tasks have taken longer than one timer cycle or there | 6324 | /* If the tasks have taken longer than one timer cycle or there |
@@ -8719,6 +8733,14 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) | |||
8719 | pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & | 8733 | pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & |
8720 | I40E_AQ_AN_COMPLETED) ? true : false); | 8734 | I40E_AQ_AN_COMPLETED) ? true : false); |
8721 | 8735 | ||
8736 | /* fill in link information and enable LSE reporting */ | ||
8737 | i40e_update_link_info(&pf->hw, true); | ||
8738 | i40e_link_event(pf); | ||
8739 | |||
8740 | /* Initialize user-specific link properties */ | ||
8741 | pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & | ||
8742 | I40E_AQ_AN_COMPLETED) ? true : false); | ||
8743 | |||
8722 | i40e_ptp_init(pf); | 8744 | i40e_ptp_init(pf); |
8723 | 8745 | ||
8724 | return ret; | 8746 | return ret; |
@@ -9158,6 +9180,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
9158 | } | 9180 | } |
9159 | } | 9181 | } |
9160 | 9182 | ||
9183 | /* driver is only interested in link up/down and module qualification | ||
9184 | * reports from firmware | ||
9185 | */ | ||
9186 | err = i40e_aq_set_phy_int_mask(&pf->hw, | ||
9187 | I40E_AQ_EVENT_LINK_UPDOWN | | ||
9188 | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); | ||
9189 | if (err) | ||
9190 | dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); | ||
9191 | |||
9192 | msleep(75); | ||
9193 | err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); | ||
9194 | if (err) { | ||
9195 | dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", | ||
9196 | pf->hw.aq.asq_last_status); | ||
9197 | } | ||
9198 | |||
9161 | /* The main driver is (mostly) up and happy. We need to set this state | 9199 | /* The main driver is (mostly) up and happy. We need to set this state |
9162 | * before setting up the misc vector or we get a race and the vector | 9200 | * before setting up the misc vector or we get a race and the vector |
9163 | * ends up disabled forever. | 9201 | * ends up disabled forever. |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 0988b5c1fe87..246c27869a63 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h | |||
@@ -84,6 +84,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, | |||
84 | struct i40e_asq_cmd_details *cmd_details); | 84 | struct i40e_asq_cmd_details *cmd_details); |
85 | enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, | 85 | enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, |
86 | bool atomic_reset); | 86 | bool atomic_reset); |
87 | i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, | ||
88 | struct i40e_asq_cmd_details *cmd_details); | ||
87 | i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, | 89 | i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, |
88 | struct i40e_asq_cmd_details *cmd_details); | 90 | struct i40e_asq_cmd_details *cmd_details); |
89 | i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, | 91 | i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index ce04d9093db6..3a237c3d0dcb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #define I40E_DEV_ID_QSFP_A 0x1583 | 43 | #define I40E_DEV_ID_QSFP_A 0x1583 |
44 | #define I40E_DEV_ID_QSFP_B 0x1584 | 44 | #define I40E_DEV_ID_QSFP_B 0x1584 |
45 | #define I40E_DEV_ID_QSFP_C 0x1585 | 45 | #define I40E_DEV_ID_QSFP_C 0x1585 |
46 | #define I40E_DEV_ID_10G_BASE_T 0x1586 | ||
46 | #define I40E_DEV_ID_VF 0x154C | 47 | #define I40E_DEV_ID_VF 0x154C |
47 | #define I40E_DEV_ID_VF_HV 0x1571 | 48 | #define I40E_DEV_ID_VF_HV 0x1571 |
48 | 49 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4eeed267e4b7..fff3c276736b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
@@ -674,7 +674,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) | |||
674 | * that the requested op was completed | 674 | * that the requested op was completed |
675 | * successfully | 675 | * successfully |
676 | */ | 676 | */ |
677 | udelay(10); | 677 | usleep_range(10, 20); |
678 | reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); | 678 | reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); |
679 | if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { | 679 | if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { |
680 | rsd = true; | 680 | rsd = true; |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index e656ea7a7920..ff1b16370da9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | |||
@@ -33,8 +33,8 @@ | |||
33 | * This file needs to comply with the Linux Kernel coding style. | 33 | * This file needs to comply with the Linux Kernel coding style. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #define I40E_FW_API_VERSION_MAJOR 0x0001 | 36 | #define I40E_FW_API_VERSION_MAJOR 0x0001 |
37 | #define I40E_FW_API_VERSION_MINOR 0x0002 | 37 | #define I40E_FW_API_VERSION_MINOR 0x0002 |
38 | #define I40E_FW_API_VERSION_A0_MINOR 0x0000 | 38 | #define I40E_FW_API_VERSION_A0_MINOR 0x0000 |
39 | 39 | ||
40 | struct i40e_aq_desc { | 40 | struct i40e_aq_desc { |
@@ -67,216 +67,216 @@ struct i40e_aq_desc { | |||
67 | */ | 67 | */ |
68 | 68 | ||
69 | /* command flags and offsets*/ | 69 | /* command flags and offsets*/ |
70 | #define I40E_AQ_FLAG_DD_SHIFT 0 | 70 | #define I40E_AQ_FLAG_DD_SHIFT 0 |
71 | #define I40E_AQ_FLAG_CMP_SHIFT 1 | 71 | #define I40E_AQ_FLAG_CMP_SHIFT 1 |
72 | #define I40E_AQ_FLAG_ERR_SHIFT 2 | 72 | #define I40E_AQ_FLAG_ERR_SHIFT 2 |
73 | #define I40E_AQ_FLAG_VFE_SHIFT 3 | 73 | #define I40E_AQ_FLAG_VFE_SHIFT 3 |
74 | #define I40E_AQ_FLAG_LB_SHIFT 9 | 74 | #define I40E_AQ_FLAG_LB_SHIFT 9 |
75 | #define I40E_AQ_FLAG_RD_SHIFT 10 | 75 | #define I40E_AQ_FLAG_RD_SHIFT 10 |
76 | #define I40E_AQ_FLAG_VFC_SHIFT 11 | 76 | #define I40E_AQ_FLAG_VFC_SHIFT 11 |
77 | #define I40E_AQ_FLAG_BUF_SHIFT 12 | 77 | #define I40E_AQ_FLAG_BUF_SHIFT 12 |
78 | #define I40E_AQ_FLAG_SI_SHIFT 13 | 78 | #define I40E_AQ_FLAG_SI_SHIFT 13 |
79 | #define I40E_AQ_FLAG_EI_SHIFT 14 | 79 | #define I40E_AQ_FLAG_EI_SHIFT 14 |
80 | #define I40E_AQ_FLAG_FE_SHIFT 15 | 80 | #define I40E_AQ_FLAG_FE_SHIFT 15 |
81 | 81 | ||
82 | #define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ | 82 | #define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ |
83 | #define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ | 83 | #define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ |
84 | #define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ | 84 | #define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ |
85 | #define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ | 85 | #define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ |
86 | #define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ | 86 | #define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ |
87 | #define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ | 87 | #define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ |
88 | #define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ | 88 | #define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ |
89 | #define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ | 89 | #define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ |
90 | #define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ | 90 | #define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ |
91 | #define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ | 91 | #define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ |
92 | #define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ | 92 | #define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ |
93 | 93 | ||
94 | /* error codes */ | 94 | /* error codes */ |
95 | enum i40e_admin_queue_err { | 95 | enum i40e_admin_queue_err { |
96 | I40E_AQ_RC_OK = 0, /* success */ | 96 | I40E_AQ_RC_OK = 0, /* success */ |
97 | I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ | 97 | I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ |
98 | I40E_AQ_RC_ENOENT = 2, /* No such element */ | 98 | I40E_AQ_RC_ENOENT = 2, /* No such element */ |
99 | I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ | 99 | I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ |
100 | I40E_AQ_RC_EINTR = 4, /* operation interrupted */ | 100 | I40E_AQ_RC_EINTR = 4, /* operation interrupted */ |
101 | I40E_AQ_RC_EIO = 5, /* I/O error */ | 101 | I40E_AQ_RC_EIO = 5, /* I/O error */ |
102 | I40E_AQ_RC_ENXIO = 6, /* No such resource */ | 102 | I40E_AQ_RC_ENXIO = 6, /* No such resource */ |
103 | I40E_AQ_RC_E2BIG = 7, /* Arg too long */ | 103 | I40E_AQ_RC_E2BIG = 7, /* Arg too long */ |
104 | I40E_AQ_RC_EAGAIN = 8, /* Try again */ | 104 | I40E_AQ_RC_EAGAIN = 8, /* Try again */ |
105 | I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ | 105 | I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ |
106 | I40E_AQ_RC_EACCES = 10, /* Permission denied */ | 106 | I40E_AQ_RC_EACCES = 10, /* Permission denied */ |
107 | I40E_AQ_RC_EFAULT = 11, /* Bad address */ | 107 | I40E_AQ_RC_EFAULT = 11, /* Bad address */ |
108 | I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ | 108 | I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ |
109 | I40E_AQ_RC_EEXIST = 13, /* object already exists */ | 109 | I40E_AQ_RC_EEXIST = 13, /* object already exists */ |
110 | I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ | 110 | I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ |
111 | I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ | 111 | I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ |
112 | I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ | 112 | I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ |
113 | I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ | 113 | I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ |
114 | I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ | 114 | I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ |
115 | I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ | 115 | I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ |
116 | I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ | 116 | I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ |
117 | I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ | 117 | I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ |
118 | I40E_AQ_RC_EFBIG = 22, /* File too large */ | 118 | I40E_AQ_RC_EFBIG = 22, /* File too large */ |
119 | }; | 119 | }; |
120 | 120 | ||
121 | /* Admin Queue command opcodes */ | 121 | /* Admin Queue command opcodes */ |
122 | enum i40e_admin_queue_opc { | 122 | enum i40e_admin_queue_opc { |
123 | /* aq commands */ | 123 | /* aq commands */ |
124 | i40e_aqc_opc_get_version = 0x0001, | 124 | i40e_aqc_opc_get_version = 0x0001, |
125 | i40e_aqc_opc_driver_version = 0x0002, | 125 | i40e_aqc_opc_driver_version = 0x0002, |
126 | i40e_aqc_opc_queue_shutdown = 0x0003, | 126 | i40e_aqc_opc_queue_shutdown = 0x0003, |
127 | i40e_aqc_opc_set_pf_context = 0x0004, | 127 | i40e_aqc_opc_set_pf_context = 0x0004, |
128 | 128 | ||
129 | /* resource ownership */ | 129 | /* resource ownership */ |
130 | i40e_aqc_opc_request_resource = 0x0008, | 130 | i40e_aqc_opc_request_resource = 0x0008, |
131 | i40e_aqc_opc_release_resource = 0x0009, | 131 | i40e_aqc_opc_release_resource = 0x0009, |
132 | 132 | ||
133 | i40e_aqc_opc_list_func_capabilities = 0x000A, | 133 | i40e_aqc_opc_list_func_capabilities = 0x000A, |
134 | i40e_aqc_opc_list_dev_capabilities = 0x000B, | 134 | i40e_aqc_opc_list_dev_capabilities = 0x000B, |
135 | 135 | ||
136 | i40e_aqc_opc_set_cppm_configuration = 0x0103, | 136 | i40e_aqc_opc_set_cppm_configuration = 0x0103, |
137 | i40e_aqc_opc_set_arp_proxy_entry = 0x0104, | 137 | i40e_aqc_opc_set_arp_proxy_entry = 0x0104, |
138 | i40e_aqc_opc_set_ns_proxy_entry = 0x0105, | 138 | i40e_aqc_opc_set_ns_proxy_entry = 0x0105, |
139 | 139 | ||
140 | /* LAA */ | 140 | /* LAA */ |
141 | i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ | 141 | i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ |
142 | i40e_aqc_opc_mac_address_read = 0x0107, | 142 | i40e_aqc_opc_mac_address_read = 0x0107, |
143 | i40e_aqc_opc_mac_address_write = 0x0108, | 143 | i40e_aqc_opc_mac_address_write = 0x0108, |
144 | 144 | ||
145 | /* PXE */ | 145 | /* PXE */ |
146 | i40e_aqc_opc_clear_pxe_mode = 0x0110, | 146 | i40e_aqc_opc_clear_pxe_mode = 0x0110, |
147 | 147 | ||
148 | /* internal switch commands */ | 148 | /* internal switch commands */ |
149 | i40e_aqc_opc_get_switch_config = 0x0200, | 149 | i40e_aqc_opc_get_switch_config = 0x0200, |
150 | i40e_aqc_opc_add_statistics = 0x0201, | 150 | i40e_aqc_opc_add_statistics = 0x0201, |
151 | i40e_aqc_opc_remove_statistics = 0x0202, | 151 | i40e_aqc_opc_remove_statistics = 0x0202, |
152 | i40e_aqc_opc_set_port_parameters = 0x0203, | 152 | i40e_aqc_opc_set_port_parameters = 0x0203, |
153 | i40e_aqc_opc_get_switch_resource_alloc = 0x0204, | 153 | i40e_aqc_opc_get_switch_resource_alloc = 0x0204, |
154 | 154 | ||
155 | i40e_aqc_opc_add_vsi = 0x0210, | 155 | i40e_aqc_opc_add_vsi = 0x0210, |
156 | i40e_aqc_opc_update_vsi_parameters = 0x0211, | 156 | i40e_aqc_opc_update_vsi_parameters = 0x0211, |
157 | i40e_aqc_opc_get_vsi_parameters = 0x0212, | 157 | i40e_aqc_opc_get_vsi_parameters = 0x0212, |
158 | 158 | ||
159 | i40e_aqc_opc_add_pv = 0x0220, | 159 | i40e_aqc_opc_add_pv = 0x0220, |
160 | i40e_aqc_opc_update_pv_parameters = 0x0221, | 160 | i40e_aqc_opc_update_pv_parameters = 0x0221, |
161 | i40e_aqc_opc_get_pv_parameters = 0x0222, | 161 | i40e_aqc_opc_get_pv_parameters = 0x0222, |
162 | 162 | ||
163 | i40e_aqc_opc_add_veb = 0x0230, | 163 | i40e_aqc_opc_add_veb = 0x0230, |
164 | i40e_aqc_opc_update_veb_parameters = 0x0231, | 164 | i40e_aqc_opc_update_veb_parameters = 0x0231, |
165 | i40e_aqc_opc_get_veb_parameters = 0x0232, | 165 | i40e_aqc_opc_get_veb_parameters = 0x0232, |
166 | 166 | ||
167 | i40e_aqc_opc_delete_element = 0x0243, | 167 | i40e_aqc_opc_delete_element = 0x0243, |
168 | 168 | ||
169 | i40e_aqc_opc_add_macvlan = 0x0250, | 169 | i40e_aqc_opc_add_macvlan = 0x0250, |
170 | i40e_aqc_opc_remove_macvlan = 0x0251, | 170 | i40e_aqc_opc_remove_macvlan = 0x0251, |
171 | i40e_aqc_opc_add_vlan = 0x0252, | 171 | i40e_aqc_opc_add_vlan = 0x0252, |
172 | i40e_aqc_opc_remove_vlan = 0x0253, | 172 | i40e_aqc_opc_remove_vlan = 0x0253, |
173 | i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, | 173 | i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, |
174 | i40e_aqc_opc_add_tag = 0x0255, | 174 | i40e_aqc_opc_add_tag = 0x0255, |
175 | i40e_aqc_opc_remove_tag = 0x0256, | 175 | i40e_aqc_opc_remove_tag = 0x0256, |
176 | i40e_aqc_opc_add_multicast_etag = 0x0257, | 176 | i40e_aqc_opc_add_multicast_etag = 0x0257, |
177 | i40e_aqc_opc_remove_multicast_etag = 0x0258, | 177 | i40e_aqc_opc_remove_multicast_etag = 0x0258, |
178 | i40e_aqc_opc_update_tag = 0x0259, | 178 | i40e_aqc_opc_update_tag = 0x0259, |
179 | i40e_aqc_opc_add_control_packet_filter = 0x025A, | 179 | i40e_aqc_opc_add_control_packet_filter = 0x025A, |
180 | i40e_aqc_opc_remove_control_packet_filter = 0x025B, | 180 | i40e_aqc_opc_remove_control_packet_filter = 0x025B, |
181 | i40e_aqc_opc_add_cloud_filters = 0x025C, | 181 | i40e_aqc_opc_add_cloud_filters = 0x025C, |
182 | i40e_aqc_opc_remove_cloud_filters = 0x025D, | 182 | i40e_aqc_opc_remove_cloud_filters = 0x025D, |
183 | 183 | ||
184 | i40e_aqc_opc_add_mirror_rule = 0x0260, | 184 | i40e_aqc_opc_add_mirror_rule = 0x0260, |
185 | i40e_aqc_opc_delete_mirror_rule = 0x0261, | 185 | i40e_aqc_opc_delete_mirror_rule = 0x0261, |
186 | 186 | ||
187 | /* DCB commands */ | 187 | /* DCB commands */ |
188 | i40e_aqc_opc_dcb_ignore_pfc = 0x0301, | 188 | i40e_aqc_opc_dcb_ignore_pfc = 0x0301, |
189 | i40e_aqc_opc_dcb_updated = 0x0302, | 189 | i40e_aqc_opc_dcb_updated = 0x0302, |
190 | 190 | ||
191 | /* TX scheduler */ | 191 | /* TX scheduler */ |
192 | i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, | 192 | i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, |
193 | i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, | 193 | i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, |
194 | i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, | 194 | i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, |
195 | i40e_aqc_opc_query_vsi_bw_config = 0x0408, | 195 | i40e_aqc_opc_query_vsi_bw_config = 0x0408, |
196 | i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, | 196 | i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, |
197 | i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, | 197 | i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, |
198 | 198 | ||
199 | i40e_aqc_opc_enable_switching_comp_ets = 0x0413, | 199 | i40e_aqc_opc_enable_switching_comp_ets = 0x0413, |
200 | i40e_aqc_opc_modify_switching_comp_ets = 0x0414, | 200 | i40e_aqc_opc_modify_switching_comp_ets = 0x0414, |
201 | i40e_aqc_opc_disable_switching_comp_ets = 0x0415, | 201 | i40e_aqc_opc_disable_switching_comp_ets = 0x0415, |
202 | i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, | 202 | i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, |
203 | i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, | 203 | i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, |
204 | i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, | 204 | i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, |
205 | i40e_aqc_opc_query_port_ets_config = 0x0419, | 205 | i40e_aqc_opc_query_port_ets_config = 0x0419, |
206 | i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, | 206 | i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, |
207 | i40e_aqc_opc_suspend_port_tx = 0x041B, | 207 | i40e_aqc_opc_suspend_port_tx = 0x041B, |
208 | i40e_aqc_opc_resume_port_tx = 0x041C, | 208 | i40e_aqc_opc_resume_port_tx = 0x041C, |
209 | i40e_aqc_opc_configure_partition_bw = 0x041D, | 209 | i40e_aqc_opc_configure_partition_bw = 0x041D, |
210 | 210 | ||
211 | /* hmc */ | 211 | /* hmc */ |
212 | i40e_aqc_opc_query_hmc_resource_profile = 0x0500, | 212 | i40e_aqc_opc_query_hmc_resource_profile = 0x0500, |
213 | i40e_aqc_opc_set_hmc_resource_profile = 0x0501, | 213 | i40e_aqc_opc_set_hmc_resource_profile = 0x0501, |
214 | 214 | ||
215 | /* phy commands*/ | 215 | /* phy commands*/ |
216 | i40e_aqc_opc_get_phy_abilities = 0x0600, | 216 | i40e_aqc_opc_get_phy_abilities = 0x0600, |
217 | i40e_aqc_opc_set_phy_config = 0x0601, | 217 | i40e_aqc_opc_set_phy_config = 0x0601, |
218 | i40e_aqc_opc_set_mac_config = 0x0603, | 218 | i40e_aqc_opc_set_mac_config = 0x0603, |
219 | i40e_aqc_opc_set_link_restart_an = 0x0605, | 219 | i40e_aqc_opc_set_link_restart_an = 0x0605, |
220 | i40e_aqc_opc_get_link_status = 0x0607, | 220 | i40e_aqc_opc_get_link_status = 0x0607, |
221 | i40e_aqc_opc_set_phy_int_mask = 0x0613, | 221 | i40e_aqc_opc_set_phy_int_mask = 0x0613, |
222 | i40e_aqc_opc_get_local_advt_reg = 0x0614, | 222 | i40e_aqc_opc_get_local_advt_reg = 0x0614, |
223 | i40e_aqc_opc_set_local_advt_reg = 0x0615, | 223 | i40e_aqc_opc_set_local_advt_reg = 0x0615, |
224 | i40e_aqc_opc_get_partner_advt = 0x0616, | 224 | i40e_aqc_opc_get_partner_advt = 0x0616, |
225 | i40e_aqc_opc_set_lb_modes = 0x0618, | 225 | i40e_aqc_opc_set_lb_modes = 0x0618, |
226 | i40e_aqc_opc_get_phy_wol_caps = 0x0621, | 226 | i40e_aqc_opc_get_phy_wol_caps = 0x0621, |
227 | i40e_aqc_opc_set_phy_debug = 0x0622, | 227 | i40e_aqc_opc_set_phy_debug = 0x0622, |
228 | i40e_aqc_opc_upload_ext_phy_fm = 0x0625, | 228 | i40e_aqc_opc_upload_ext_phy_fm = 0x0625, |
229 | 229 | ||
230 | /* NVM commands */ | 230 | /* NVM commands */ |
231 | i40e_aqc_opc_nvm_read = 0x0701, | 231 | i40e_aqc_opc_nvm_read = 0x0701, |
232 | i40e_aqc_opc_nvm_erase = 0x0702, | 232 | i40e_aqc_opc_nvm_erase = 0x0702, |
233 | i40e_aqc_opc_nvm_update = 0x0703, | 233 | i40e_aqc_opc_nvm_update = 0x0703, |
234 | i40e_aqc_opc_nvm_config_read = 0x0704, | 234 | i40e_aqc_opc_nvm_config_read = 0x0704, |
235 | i40e_aqc_opc_nvm_config_write = 0x0705, | 235 | i40e_aqc_opc_nvm_config_write = 0x0705, |
236 | 236 | ||
237 | /* virtualization commands */ | 237 | /* virtualization commands */ |
238 | i40e_aqc_opc_send_msg_to_pf = 0x0801, | 238 | i40e_aqc_opc_send_msg_to_pf = 0x0801, |
239 | i40e_aqc_opc_send_msg_to_vf = 0x0802, | 239 | i40e_aqc_opc_send_msg_to_vf = 0x0802, |
240 | i40e_aqc_opc_send_msg_to_peer = 0x0803, | 240 | i40e_aqc_opc_send_msg_to_peer = 0x0803, |
241 | 241 | ||
242 | /* alternate structure */ | 242 | /* alternate structure */ |
243 | i40e_aqc_opc_alternate_write = 0x0900, | 243 | i40e_aqc_opc_alternate_write = 0x0900, |
244 | i40e_aqc_opc_alternate_write_indirect = 0x0901, | 244 | i40e_aqc_opc_alternate_write_indirect = 0x0901, |
245 | i40e_aqc_opc_alternate_read = 0x0902, | 245 | i40e_aqc_opc_alternate_read = 0x0902, |
246 | i40e_aqc_opc_alternate_read_indirect = 0x0903, | 246 | i40e_aqc_opc_alternate_read_indirect = 0x0903, |
247 | i40e_aqc_opc_alternate_write_done = 0x0904, | 247 | i40e_aqc_opc_alternate_write_done = 0x0904, |
248 | i40e_aqc_opc_alternate_set_mode = 0x0905, | 248 | i40e_aqc_opc_alternate_set_mode = 0x0905, |
249 | i40e_aqc_opc_alternate_clear_port = 0x0906, | 249 | i40e_aqc_opc_alternate_clear_port = 0x0906, |
250 | 250 | ||
251 | /* LLDP commands */ | 251 | /* LLDP commands */ |
252 | i40e_aqc_opc_lldp_get_mib = 0x0A00, | 252 | i40e_aqc_opc_lldp_get_mib = 0x0A00, |
253 | i40e_aqc_opc_lldp_update_mib = 0x0A01, | 253 | i40e_aqc_opc_lldp_update_mib = 0x0A01, |
254 | i40e_aqc_opc_lldp_add_tlv = 0x0A02, | 254 | i40e_aqc_opc_lldp_add_tlv = 0x0A02, |
255 | i40e_aqc_opc_lldp_update_tlv = 0x0A03, | 255 | i40e_aqc_opc_lldp_update_tlv = 0x0A03, |
256 | i40e_aqc_opc_lldp_delete_tlv = 0x0A04, | 256 | i40e_aqc_opc_lldp_delete_tlv = 0x0A04, |
257 | i40e_aqc_opc_lldp_stop = 0x0A05, | 257 | i40e_aqc_opc_lldp_stop = 0x0A05, |
258 | i40e_aqc_opc_lldp_start = 0x0A06, | 258 | i40e_aqc_opc_lldp_start = 0x0A06, |
259 | 259 | ||
260 | /* Tunnel commands */ | 260 | /* Tunnel commands */ |
261 | i40e_aqc_opc_add_udp_tunnel = 0x0B00, | 261 | i40e_aqc_opc_add_udp_tunnel = 0x0B00, |
262 | i40e_aqc_opc_del_udp_tunnel = 0x0B01, | 262 | i40e_aqc_opc_del_udp_tunnel = 0x0B01, |
263 | i40e_aqc_opc_tunnel_key_structure = 0x0B10, | 263 | i40e_aqc_opc_tunnel_key_structure = 0x0B10, |
264 | 264 | ||
265 | /* Async Events */ | 265 | /* Async Events */ |
266 | i40e_aqc_opc_event_lan_overflow = 0x1001, | 266 | i40e_aqc_opc_event_lan_overflow = 0x1001, |
267 | 267 | ||
268 | /* OEM commands */ | 268 | /* OEM commands */ |
269 | i40e_aqc_opc_oem_parameter_change = 0xFE00, | 269 | i40e_aqc_opc_oem_parameter_change = 0xFE00, |
270 | i40e_aqc_opc_oem_device_status_change = 0xFE01, | 270 | i40e_aqc_opc_oem_device_status_change = 0xFE01, |
271 | 271 | ||
272 | /* debug commands */ | 272 | /* debug commands */ |
273 | i40e_aqc_opc_debug_get_deviceid = 0xFF00, | 273 | i40e_aqc_opc_debug_get_deviceid = 0xFF00, |
274 | i40e_aqc_opc_debug_set_mode = 0xFF01, | 274 | i40e_aqc_opc_debug_set_mode = 0xFF01, |
275 | i40e_aqc_opc_debug_read_reg = 0xFF03, | 275 | i40e_aqc_opc_debug_read_reg = 0xFF03, |
276 | i40e_aqc_opc_debug_write_reg = 0xFF04, | 276 | i40e_aqc_opc_debug_write_reg = 0xFF04, |
277 | i40e_aqc_opc_debug_modify_reg = 0xFF07, | 277 | i40e_aqc_opc_debug_modify_reg = 0xFF07, |
278 | i40e_aqc_opc_debug_dump_internals = 0xFF08, | 278 | i40e_aqc_opc_debug_dump_internals = 0xFF08, |
279 | i40e_aqc_opc_debug_modify_internals = 0xFF09, | 279 | i40e_aqc_opc_debug_modify_internals = 0xFF09, |
280 | }; | 280 | }; |
281 | 281 | ||
282 | /* command structures and indirect data structures */ | 282 | /* command structures and indirect data structures */ |
@@ -303,7 +303,7 @@ enum i40e_admin_queue_opc { | |||
303 | /* This macro is used extensively to ensure that command structures are 16 | 303 | /* This macro is used extensively to ensure that command structures are 16 |
304 | * bytes in length as they have to map to the raw array of that size. | 304 | * bytes in length as they have to map to the raw array of that size. |
305 | */ | 305 | */ |
306 | #define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) | 306 | #define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) |
307 | 307 | ||
308 | /* internal (0x00XX) commands */ | 308 | /* internal (0x00XX) commands */ |
309 | 309 | ||
@@ -321,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); | |||
321 | 321 | ||
322 | /* Send driver version (indirect 0x0002) */ | 322 | /* Send driver version (indirect 0x0002) */ |
323 | struct i40e_aqc_driver_version { | 323 | struct i40e_aqc_driver_version { |
324 | u8 driver_major_ver; | 324 | u8 driver_major_ver; |
325 | u8 driver_minor_ver; | 325 | u8 driver_minor_ver; |
326 | u8 driver_build_ver; | 326 | u8 driver_build_ver; |
327 | u8 driver_subbuild_ver; | 327 | u8 driver_subbuild_ver; |
328 | u8 reserved[4]; | 328 | u8 reserved[4]; |
329 | __le32 address_high; | 329 | __le32 address_high; |
330 | __le32 address_low; | 330 | __le32 address_low; |
331 | }; | 331 | }; |
332 | 332 | ||
333 | I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); | 333 | I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); |
334 | 334 | ||
335 | /* Queue Shutdown (direct 0x0003) */ | 335 | /* Queue Shutdown (direct 0x0003) */ |
336 | struct i40e_aqc_queue_shutdown { | 336 | struct i40e_aqc_queue_shutdown { |
337 | __le32 driver_unloading; | 337 | __le32 driver_unloading; |
338 | #define I40E_AQ_DRIVER_UNLOADING 0x1 | 338 | #define I40E_AQ_DRIVER_UNLOADING 0x1 |
339 | u8 reserved[12]; | 339 | u8 reserved[12]; |
340 | }; | 340 | }; |
341 | 341 | ||
342 | I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); | 342 | I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); |
@@ -352,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); | |||
352 | /* Request resource ownership (direct 0x0008) | 352 | /* Request resource ownership (direct 0x0008) |
353 | * Release resource ownership (direct 0x0009) | 353 | * Release resource ownership (direct 0x0009) |
354 | */ | 354 | */ |
355 | #define I40E_AQ_RESOURCE_NVM 1 | 355 | #define I40E_AQ_RESOURCE_NVM 1 |
356 | #define I40E_AQ_RESOURCE_SDP 2 | 356 | #define I40E_AQ_RESOURCE_SDP 2 |
357 | #define I40E_AQ_RESOURCE_ACCESS_READ 1 | 357 | #define I40E_AQ_RESOURCE_ACCESS_READ 1 |
358 | #define I40E_AQ_RESOURCE_ACCESS_WRITE 2 | 358 | #define I40E_AQ_RESOURCE_ACCESS_WRITE 2 |
359 | #define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 | 359 | #define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 |
360 | #define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 | 360 | #define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 |
361 | 361 | ||
362 | struct i40e_aqc_request_resource { | 362 | struct i40e_aqc_request_resource { |
363 | __le16 resource_id; | 363 | __le16 resource_id; |
364 | __le16 access_type; | 364 | __le16 access_type; |
365 | __le32 timeout; | 365 | __le32 timeout; |
366 | __le32 resource_number; | 366 | __le32 resource_number; |
367 | u8 reserved[4]; | 367 | u8 reserved[4]; |
368 | }; | 368 | }; |
369 | 369 | ||
370 | I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); | 370 | I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); |
@@ -374,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); | |||
374 | */ | 374 | */ |
375 | struct i40e_aqc_list_capabilites { | 375 | struct i40e_aqc_list_capabilites { |
376 | u8 command_flags; | 376 | u8 command_flags; |
377 | #define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 | 377 | #define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 |
378 | u8 pf_index; | 378 | u8 pf_index; |
379 | u8 reserved[2]; | 379 | u8 reserved[2]; |
380 | __le32 count; | 380 | __le32 count; |
@@ -385,123 +385,123 @@ struct i40e_aqc_list_capabilites { | |||
385 | I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); | 385 | I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); |
386 | 386 | ||
387 | struct i40e_aqc_list_capabilities_element_resp { | 387 | struct i40e_aqc_list_capabilities_element_resp { |
388 | __le16 id; | 388 | __le16 id; |
389 | u8 major_rev; | 389 | u8 major_rev; |
390 | u8 minor_rev; | 390 | u8 minor_rev; |
391 | __le32 number; | 391 | __le32 number; |
392 | __le32 logical_id; | 392 | __le32 logical_id; |
393 | __le32 phys_id; | 393 | __le32 phys_id; |
394 | u8 reserved[16]; | 394 | u8 reserved[16]; |
395 | }; | 395 | }; |
396 | 396 | ||
397 | /* list of caps */ | 397 | /* list of caps */ |
398 | 398 | ||
399 | #define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 | 399 | #define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 |
400 | #define I40E_AQ_CAP_ID_MNG_MODE 0x0002 | 400 | #define I40E_AQ_CAP_ID_MNG_MODE 0x0002 |
401 | #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 | 401 | #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 |
402 | #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 | 402 | #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 |
403 | #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 | 403 | #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 |
404 | #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 | 404 | #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 |
405 | #define I40E_AQ_CAP_ID_SRIOV 0x0012 | 405 | #define I40E_AQ_CAP_ID_SRIOV 0x0012 |
406 | #define I40E_AQ_CAP_ID_VF 0x0013 | 406 | #define I40E_AQ_CAP_ID_VF 0x0013 |
407 | #define I40E_AQ_CAP_ID_VMDQ 0x0014 | 407 | #define I40E_AQ_CAP_ID_VMDQ 0x0014 |
408 | #define I40E_AQ_CAP_ID_8021QBG 0x0015 | 408 | #define I40E_AQ_CAP_ID_8021QBG 0x0015 |
409 | #define I40E_AQ_CAP_ID_8021QBR 0x0016 | 409 | #define I40E_AQ_CAP_ID_8021QBR 0x0016 |
410 | #define I40E_AQ_CAP_ID_VSI 0x0017 | 410 | #define I40E_AQ_CAP_ID_VSI 0x0017 |
411 | #define I40E_AQ_CAP_ID_DCB 0x0018 | 411 | #define I40E_AQ_CAP_ID_DCB 0x0018 |
412 | #define I40E_AQ_CAP_ID_FCOE 0x0021 | 412 | #define I40E_AQ_CAP_ID_FCOE 0x0021 |
413 | #define I40E_AQ_CAP_ID_RSS 0x0040 | 413 | #define I40E_AQ_CAP_ID_RSS 0x0040 |
414 | #define I40E_AQ_CAP_ID_RXQ 0x0041 | 414 | #define I40E_AQ_CAP_ID_RXQ 0x0041 |
415 | #define I40E_AQ_CAP_ID_TXQ 0x0042 | 415 | #define I40E_AQ_CAP_ID_TXQ 0x0042 |
416 | #define I40E_AQ_CAP_ID_MSIX 0x0043 | 416 | #define I40E_AQ_CAP_ID_MSIX 0x0043 |
417 | #define I40E_AQ_CAP_ID_VF_MSIX 0x0044 | 417 | #define I40E_AQ_CAP_ID_VF_MSIX 0x0044 |
418 | #define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 | 418 | #define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 |
419 | #define I40E_AQ_CAP_ID_1588 0x0046 | 419 | #define I40E_AQ_CAP_ID_1588 0x0046 |
420 | #define I40E_AQ_CAP_ID_IWARP 0x0051 | 420 | #define I40E_AQ_CAP_ID_IWARP 0x0051 |
421 | #define I40E_AQ_CAP_ID_LED 0x0061 | 421 | #define I40E_AQ_CAP_ID_LED 0x0061 |
422 | #define I40E_AQ_CAP_ID_SDP 0x0062 | 422 | #define I40E_AQ_CAP_ID_SDP 0x0062 |
423 | #define I40E_AQ_CAP_ID_MDIO 0x0063 | 423 | #define I40E_AQ_CAP_ID_MDIO 0x0063 |
424 | #define I40E_AQ_CAP_ID_FLEX10 0x00F1 | 424 | #define I40E_AQ_CAP_ID_FLEX10 0x00F1 |
425 | #define I40E_AQ_CAP_ID_CEM 0x00F2 | 425 | #define I40E_AQ_CAP_ID_CEM 0x00F2 |
426 | 426 | ||
427 | /* Set CPPM Configuration (direct 0x0103) */ | 427 | /* Set CPPM Configuration (direct 0x0103) */ |
428 | struct i40e_aqc_cppm_configuration { | 428 | struct i40e_aqc_cppm_configuration { |
429 | __le16 command_flags; | 429 | __le16 command_flags; |
430 | #define I40E_AQ_CPPM_EN_LTRC 0x0800 | 430 | #define I40E_AQ_CPPM_EN_LTRC 0x0800 |
431 | #define I40E_AQ_CPPM_EN_DMCTH 0x1000 | 431 | #define I40E_AQ_CPPM_EN_DMCTH 0x1000 |
432 | #define I40E_AQ_CPPM_EN_DMCTLX 0x2000 | 432 | #define I40E_AQ_CPPM_EN_DMCTLX 0x2000 |
433 | #define I40E_AQ_CPPM_EN_HPTC 0x4000 | 433 | #define I40E_AQ_CPPM_EN_HPTC 0x4000 |
434 | #define I40E_AQ_CPPM_EN_DMARC 0x8000 | 434 | #define I40E_AQ_CPPM_EN_DMARC 0x8000 |
435 | __le16 ttlx; | 435 | __le16 ttlx; |
436 | __le32 dmacr; | 436 | __le32 dmacr; |
437 | __le16 dmcth; | 437 | __le16 dmcth; |
438 | u8 hptc; | 438 | u8 hptc; |
439 | u8 reserved; | 439 | u8 reserved; |
440 | __le32 pfltrc; | 440 | __le32 pfltrc; |
441 | }; | 441 | }; |
442 | 442 | ||
443 | I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); | 443 | I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); |
444 | 444 | ||
445 | /* Set ARP Proxy command / response (indirect 0x0104) */ | 445 | /* Set ARP Proxy command / response (indirect 0x0104) */ |
446 | struct i40e_aqc_arp_proxy_data { | 446 | struct i40e_aqc_arp_proxy_data { |
447 | __le16 command_flags; | 447 | __le16 command_flags; |
448 | #define I40E_AQ_ARP_INIT_IPV4 0x0008 | 448 | #define I40E_AQ_ARP_INIT_IPV4 0x0008 |
449 | #define I40E_AQ_ARP_UNSUP_CTL 0x0010 | 449 | #define I40E_AQ_ARP_UNSUP_CTL 0x0010 |
450 | #define I40E_AQ_ARP_ENA 0x0020 | 450 | #define I40E_AQ_ARP_ENA 0x0020 |
451 | #define I40E_AQ_ARP_ADD_IPV4 0x0040 | 451 | #define I40E_AQ_ARP_ADD_IPV4 0x0040 |
452 | #define I40E_AQ_ARP_DEL_IPV4 0x0080 | 452 | #define I40E_AQ_ARP_DEL_IPV4 0x0080 |
453 | __le16 table_id; | 453 | __le16 table_id; |
454 | __le32 pfpm_proxyfc; | 454 | __le32 pfpm_proxyfc; |
455 | __le32 ip_addr; | 455 | __le32 ip_addr; |
456 | u8 mac_addr[6]; | 456 | u8 mac_addr[6]; |
457 | }; | 457 | }; |
458 | 458 | ||
459 | /* Set NS Proxy Table Entry Command (indirect 0x0105) */ | 459 | /* Set NS Proxy Table Entry Command (indirect 0x0105) */ |
460 | struct i40e_aqc_ns_proxy_data { | 460 | struct i40e_aqc_ns_proxy_data { |
461 | __le16 table_idx_mac_addr_0; | 461 | __le16 table_idx_mac_addr_0; |
462 | __le16 table_idx_mac_addr_1; | 462 | __le16 table_idx_mac_addr_1; |
463 | __le16 table_idx_ipv6_0; | 463 | __le16 table_idx_ipv6_0; |
464 | __le16 table_idx_ipv6_1; | 464 | __le16 table_idx_ipv6_1; |
465 | __le16 control; | 465 | __le16 control; |
466 | #define I40E_AQ_NS_PROXY_ADD_0 0x0100 | 466 | #define I40E_AQ_NS_PROXY_ADD_0 0x0100 |
467 | #define I40E_AQ_NS_PROXY_DEL_0 0x0200 | 467 | #define I40E_AQ_NS_PROXY_DEL_0 0x0200 |
468 | #define I40E_AQ_NS_PROXY_ADD_1 0x0400 | 468 | #define I40E_AQ_NS_PROXY_ADD_1 0x0400 |
469 | #define I40E_AQ_NS_PROXY_DEL_1 0x0800 | 469 | #define I40E_AQ_NS_PROXY_DEL_1 0x0800 |
470 | #define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 | 470 | #define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 |
471 | #define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 | 471 | #define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 |
472 | #define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 | 472 | #define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 |
473 | #define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 | 473 | #define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 |
474 | #define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 | 474 | #define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 |
475 | #define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 | 475 | #define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 |
476 | #define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 | 476 | #define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 |
477 | u8 mac_addr_0[6]; | 477 | u8 mac_addr_0[6]; |
478 | u8 mac_addr_1[6]; | 478 | u8 mac_addr_1[6]; |
479 | u8 local_mac_addr[6]; | 479 | u8 local_mac_addr[6]; |
480 | u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ | 480 | u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ |
481 | u8 ipv6_addr_1[16]; | 481 | u8 ipv6_addr_1[16]; |
482 | }; | 482 | }; |
483 | 483 | ||
484 | /* Manage LAA Command (0x0106) - obsolete */ | 484 | /* Manage LAA Command (0x0106) - obsolete */ |
485 | struct i40e_aqc_mng_laa { | 485 | struct i40e_aqc_mng_laa { |
486 | __le16 command_flags; | 486 | __le16 command_flags; |
487 | #define I40E_AQ_LAA_FLAG_WR 0x8000 | 487 | #define I40E_AQ_LAA_FLAG_WR 0x8000 |
488 | u8 reserved[2]; | 488 | u8 reserved[2]; |
489 | __le32 sal; | 489 | __le32 sal; |
490 | __le16 sah; | 490 | __le16 sah; |
491 | u8 reserved2[6]; | 491 | u8 reserved2[6]; |
492 | }; | 492 | }; |
493 | 493 | ||
494 | /* Manage MAC Address Read Command (indirect 0x0107) */ | 494 | /* Manage MAC Address Read Command (indirect 0x0107) */ |
495 | struct i40e_aqc_mac_address_read { | 495 | struct i40e_aqc_mac_address_read { |
496 | __le16 command_flags; | 496 | __le16 command_flags; |
497 | #define I40E_AQC_LAN_ADDR_VALID 0x10 | 497 | #define I40E_AQC_LAN_ADDR_VALID 0x10 |
498 | #define I40E_AQC_SAN_ADDR_VALID 0x20 | 498 | #define I40E_AQC_SAN_ADDR_VALID 0x20 |
499 | #define I40E_AQC_PORT_ADDR_VALID 0x40 | 499 | #define I40E_AQC_PORT_ADDR_VALID 0x40 |
500 | #define I40E_AQC_WOL_ADDR_VALID 0x80 | 500 | #define I40E_AQC_WOL_ADDR_VALID 0x80 |
501 | #define I40E_AQC_ADDR_VALID_MASK 0xf0 | 501 | #define I40E_AQC_ADDR_VALID_MASK 0xf0 |
502 | u8 reserved[6]; | 502 | u8 reserved[6]; |
503 | __le32 addr_high; | 503 | __le32 addr_high; |
504 | __le32 addr_low; | 504 | __le32 addr_low; |
505 | }; | 505 | }; |
506 | 506 | ||
507 | I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); | 507 | I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); |
@@ -517,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); | |||
517 | 517 | ||
518 | /* Manage MAC Address Write Command (0x0108) */ | 518 | /* Manage MAC Address Write Command (0x0108) */ |
519 | struct i40e_aqc_mac_address_write { | 519 | struct i40e_aqc_mac_address_write { |
520 | __le16 command_flags; | 520 | __le16 command_flags; |
521 | #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 | 521 | #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 |
522 | #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 | 522 | #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 |
523 | #define I40E_AQC_WRITE_TYPE_PORT 0x8000 | 523 | #define I40E_AQC_WRITE_TYPE_PORT 0x8000 |
524 | #define I40E_AQC_WRITE_TYPE_MASK 0xc000 | 524 | #define I40E_AQC_WRITE_TYPE_MASK 0xc000 |
525 | __le16 mac_sah; | 525 | __le16 mac_sah; |
526 | __le32 mac_sal; | 526 | __le32 mac_sal; |
527 | u8 reserved[8]; | 527 | u8 reserved[8]; |
528 | }; | 528 | }; |
529 | 529 | ||
530 | I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); | 530 | I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); |
@@ -545,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); | |||
545 | * command | 545 | * command |
546 | */ | 546 | */ |
547 | struct i40e_aqc_switch_seid { | 547 | struct i40e_aqc_switch_seid { |
548 | __le16 seid; | 548 | __le16 seid; |
549 | u8 reserved[6]; | 549 | u8 reserved[6]; |
550 | __le32 addr_high; | 550 | __le32 addr_high; |
551 | __le32 addr_low; | 551 | __le32 addr_low; |
552 | }; | 552 | }; |
553 | 553 | ||
554 | I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); | 554 | I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); |
@@ -557,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); | |||
557 | * uses i40e_aqc_switch_seid for the descriptor | 557 | * uses i40e_aqc_switch_seid for the descriptor |
558 | */ | 558 | */ |
559 | struct i40e_aqc_get_switch_config_header_resp { | 559 | struct i40e_aqc_get_switch_config_header_resp { |
560 | __le16 num_reported; | 560 | __le16 num_reported; |
561 | __le16 num_total; | 561 | __le16 num_total; |
562 | u8 reserved[12]; | 562 | u8 reserved[12]; |
563 | }; | 563 | }; |
564 | 564 | ||
565 | struct i40e_aqc_switch_config_element_resp { | 565 | struct i40e_aqc_switch_config_element_resp { |
566 | u8 element_type; | 566 | u8 element_type; |
567 | #define I40E_AQ_SW_ELEM_TYPE_MAC 1 | 567 | #define I40E_AQ_SW_ELEM_TYPE_MAC 1 |
568 | #define I40E_AQ_SW_ELEM_TYPE_PF 2 | 568 | #define I40E_AQ_SW_ELEM_TYPE_PF 2 |
569 | #define I40E_AQ_SW_ELEM_TYPE_VF 3 | 569 | #define I40E_AQ_SW_ELEM_TYPE_VF 3 |
570 | #define I40E_AQ_SW_ELEM_TYPE_EMP 4 | 570 | #define I40E_AQ_SW_ELEM_TYPE_EMP 4 |
571 | #define I40E_AQ_SW_ELEM_TYPE_BMC 5 | 571 | #define I40E_AQ_SW_ELEM_TYPE_BMC 5 |
572 | #define I40E_AQ_SW_ELEM_TYPE_PV 16 | 572 | #define I40E_AQ_SW_ELEM_TYPE_PV 16 |
573 | #define I40E_AQ_SW_ELEM_TYPE_VEB 17 | 573 | #define I40E_AQ_SW_ELEM_TYPE_VEB 17 |
574 | #define I40E_AQ_SW_ELEM_TYPE_PA 18 | 574 | #define I40E_AQ_SW_ELEM_TYPE_PA 18 |
575 | #define I40E_AQ_SW_ELEM_TYPE_VSI 19 | 575 | #define I40E_AQ_SW_ELEM_TYPE_VSI 19 |
576 | u8 revision; | 576 | u8 revision; |
577 | #define I40E_AQ_SW_ELEM_REV_1 1 | 577 | #define I40E_AQ_SW_ELEM_REV_1 1 |
578 | __le16 seid; | 578 | __le16 seid; |
579 | __le16 uplink_seid; | 579 | __le16 uplink_seid; |
580 | __le16 downlink_seid; | 580 | __le16 downlink_seid; |
581 | u8 reserved[3]; | 581 | u8 reserved[3]; |
582 | u8 connection_type; | 582 | u8 connection_type; |
583 | #define I40E_AQ_CONN_TYPE_REGULAR 0x1 | 583 | #define I40E_AQ_CONN_TYPE_REGULAR 0x1 |
584 | #define I40E_AQ_CONN_TYPE_DEFAULT 0x2 | 584 | #define I40E_AQ_CONN_TYPE_DEFAULT 0x2 |
585 | #define I40E_AQ_CONN_TYPE_CASCADED 0x3 | 585 | #define I40E_AQ_CONN_TYPE_CASCADED 0x3 |
586 | __le16 scheduler_id; | 586 | __le16 scheduler_id; |
587 | __le16 element_info; | 587 | __le16 element_info; |
588 | }; | 588 | }; |
589 | 589 | ||
590 | /* Get Switch Configuration (indirect 0x0200) | 590 | /* Get Switch Configuration (indirect 0x0200) |
@@ -592,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp { | |||
592 | * the first in the array is the header, remainder are elements | 592 | * the first in the array is the header, remainder are elements |
593 | */ | 593 | */ |
594 | struct i40e_aqc_get_switch_config_resp { | 594 | struct i40e_aqc_get_switch_config_resp { |
595 | struct i40e_aqc_get_switch_config_header_resp header; | 595 | struct i40e_aqc_get_switch_config_header_resp header; |
596 | struct i40e_aqc_switch_config_element_resp element[1]; | 596 | struct i40e_aqc_switch_config_element_resp element[1]; |
597 | }; | 597 | }; |
598 | 598 | ||
599 | /* Add Statistics (direct 0x0201) | 599 | /* Add Statistics (direct 0x0201) |
600 | * Remove Statistics (direct 0x0202) | 600 | * Remove Statistics (direct 0x0202) |
601 | */ | 601 | */ |
602 | struct i40e_aqc_add_remove_statistics { | 602 | struct i40e_aqc_add_remove_statistics { |
603 | __le16 seid; | 603 | __le16 seid; |
604 | __le16 vlan; | 604 | __le16 vlan; |
605 | __le16 stat_index; | 605 | __le16 stat_index; |
606 | u8 reserved[10]; | 606 | u8 reserved[10]; |
607 | }; | 607 | }; |
608 | 608 | ||
609 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); | 609 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); |
610 | 610 | ||
611 | /* Set Port Parameters command (direct 0x0203) */ | 611 | /* Set Port Parameters command (direct 0x0203) */ |
612 | struct i40e_aqc_set_port_parameters { | 612 | struct i40e_aqc_set_port_parameters { |
613 | __le16 command_flags; | 613 | __le16 command_flags; |
614 | #define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 | 614 | #define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 |
615 | #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ | 615 | #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ |
616 | #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 | 616 | #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 |
617 | __le16 bad_frame_vsi; | 617 | __le16 bad_frame_vsi; |
618 | __le16 default_seid; /* reserved for command */ | 618 | __le16 default_seid; /* reserved for command */ |
619 | u8 reserved[10]; | 619 | u8 reserved[10]; |
620 | }; | 620 | }; |
621 | 621 | ||
622 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); | 622 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); |
623 | 623 | ||
624 | /* Get Switch Resource Allocation (indirect 0x0204) */ | 624 | /* Get Switch Resource Allocation (indirect 0x0204) */ |
625 | struct i40e_aqc_get_switch_resource_alloc { | 625 | struct i40e_aqc_get_switch_resource_alloc { |
626 | u8 num_entries; /* reserved for command */ | 626 | u8 num_entries; /* reserved for command */ |
627 | u8 reserved[7]; | 627 | u8 reserved[7]; |
628 | __le32 addr_high; | 628 | __le32 addr_high; |
629 | __le32 addr_low; | 629 | __le32 addr_low; |
630 | }; | 630 | }; |
631 | 631 | ||
632 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); | 632 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); |
633 | 633 | ||
634 | /* expect an array of these structs in the response buffer */ | 634 | /* expect an array of these structs in the response buffer */ |
635 | struct i40e_aqc_switch_resource_alloc_element_resp { | 635 | struct i40e_aqc_switch_resource_alloc_element_resp { |
636 | u8 resource_type; | 636 | u8 resource_type; |
637 | #define I40E_AQ_RESOURCE_TYPE_VEB 0x0 | 637 | #define I40E_AQ_RESOURCE_TYPE_VEB 0x0 |
638 | #define I40E_AQ_RESOURCE_TYPE_VSI 0x1 | 638 | #define I40E_AQ_RESOURCE_TYPE_VSI 0x1 |
639 | #define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 | 639 | #define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 |
640 | #define I40E_AQ_RESOURCE_TYPE_STAG 0x3 | 640 | #define I40E_AQ_RESOURCE_TYPE_STAG 0x3 |
641 | #define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 | 641 | #define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 |
642 | #define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 | 642 | #define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 |
643 | #define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 | 643 | #define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 |
644 | #define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 | 644 | #define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 |
645 | #define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 | 645 | #define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 |
646 | #define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 | 646 | #define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 |
647 | #define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA | 647 | #define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA |
648 | #define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB | 648 | #define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB |
649 | #define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC | 649 | #define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC |
650 | #define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD | 650 | #define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD |
651 | #define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF | 651 | #define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF |
652 | #define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 | 652 | #define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 |
653 | #define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 | 653 | #define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 |
654 | #define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 | 654 | #define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 |
655 | #define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 | 655 | #define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 |
656 | u8 reserved1; | 656 | u8 reserved1; |
657 | __le16 guaranteed; | 657 | __le16 guaranteed; |
658 | __le16 total; | 658 | __le16 total; |
659 | __le16 used; | 659 | __le16 used; |
660 | __le16 total_unalloced; | 660 | __le16 total_unalloced; |
661 | u8 reserved2[6]; | 661 | u8 reserved2[6]; |
662 | }; | 662 | }; |
663 | 663 | ||
664 | /* Add VSI (indirect 0x0210) | 664 | /* Add VSI (indirect 0x0210) |
@@ -672,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp { | |||
672 | * uses the same completion and data structure as Add VSI | 672 | * uses the same completion and data structure as Add VSI |
673 | */ | 673 | */ |
674 | struct i40e_aqc_add_get_update_vsi { | 674 | struct i40e_aqc_add_get_update_vsi { |
675 | __le16 uplink_seid; | 675 | __le16 uplink_seid; |
676 | u8 connection_type; | 676 | u8 connection_type; |
677 | #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 | 677 | #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 |
678 | #define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 | 678 | #define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 |
679 | #define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 | 679 | #define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 |
680 | u8 reserved1; | 680 | u8 reserved1; |
681 | u8 vf_id; | 681 | u8 vf_id; |
682 | u8 reserved2; | 682 | u8 reserved2; |
683 | __le16 vsi_flags; | 683 | __le16 vsi_flags; |
684 | #define I40E_AQ_VSI_TYPE_SHIFT 0x0 | 684 | #define I40E_AQ_VSI_TYPE_SHIFT 0x0 |
685 | #define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) | 685 | #define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) |
686 | #define I40E_AQ_VSI_TYPE_VF 0x0 | 686 | #define I40E_AQ_VSI_TYPE_VF 0x0 |
687 | #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 | 687 | #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 |
688 | #define I40E_AQ_VSI_TYPE_PF 0x2 | 688 | #define I40E_AQ_VSI_TYPE_PF 0x2 |
689 | #define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 | 689 | #define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 |
690 | #define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 | 690 | #define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 |
691 | __le32 addr_high; | 691 | __le32 addr_high; |
692 | __le32 addr_low; | 692 | __le32 addr_low; |
693 | }; | 693 | }; |
694 | 694 | ||
695 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); | 695 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); |
@@ -707,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); | |||
707 | 707 | ||
708 | struct i40e_aqc_vsi_properties_data { | 708 | struct i40e_aqc_vsi_properties_data { |
709 | /* first 96 byte are written by SW */ | 709 | /* first 96 byte are written by SW */ |
710 | __le16 valid_sections; | 710 | __le16 valid_sections; |
711 | #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 | 711 | #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 |
712 | #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 | 712 | #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 |
713 | #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 | 713 | #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 |
714 | #define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 | 714 | #define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 |
715 | #define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 | 715 | #define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 |
716 | #define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 | 716 | #define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 |
717 | #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 | 717 | #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 |
718 | #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 | 718 | #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 |
719 | #define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 | 719 | #define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 |
720 | #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 | 720 | #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 |
721 | /* switch section */ | 721 | /* switch section */ |
722 | __le16 switch_id; /* 12bit id combined with flags below */ | 722 | __le16 switch_id; /* 12bit id combined with flags below */ |
723 | #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 | 723 | #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 |
724 | #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) | 724 | #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) |
725 | #define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 | 725 | #define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 |
726 | #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 | 726 | #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 |
727 | #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 | 727 | #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 |
728 | u8 sw_reserved[2]; | 728 | u8 sw_reserved[2]; |
729 | /* security section */ | 729 | /* security section */ |
730 | u8 sec_flags; | 730 | u8 sec_flags; |
731 | #define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 | 731 | #define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 |
732 | #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 | 732 | #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 |
733 | #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 | 733 | #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 |
734 | u8 sec_reserved; | 734 | u8 sec_reserved; |
735 | /* VLAN section */ | 735 | /* VLAN section */ |
736 | __le16 pvid; /* VLANS include priority bits */ | 736 | __le16 pvid; /* VLANS include priority bits */ |
737 | __le16 fcoe_pvid; | 737 | __le16 fcoe_pvid; |
738 | u8 port_vlan_flags; | 738 | u8 port_vlan_flags; |
739 | #define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 | 739 | #define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 |
740 | #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ | 740 | #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ |
741 | I40E_AQ_VSI_PVLAN_MODE_SHIFT) | 741 | I40E_AQ_VSI_PVLAN_MODE_SHIFT) |
742 | #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 | 742 | #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 |
743 | #define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 | 743 | #define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 |
744 | #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 | 744 | #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 |
745 | #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 | 745 | #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 |
746 | #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 | 746 | #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 |
747 | #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ | 747 | #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ |
748 | I40E_AQ_VSI_PVLAN_EMOD_SHIFT) | 748 | I40E_AQ_VSI_PVLAN_EMOD_SHIFT) |
749 | #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 | 749 | #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 |
750 | #define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 | 750 | #define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 |
751 | #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 | 751 | #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 |
752 | #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 | 752 | #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 |
753 | u8 pvlan_reserved[3]; | 753 | u8 pvlan_reserved[3]; |
754 | /* ingress egress up sections */ | 754 | /* ingress egress up sections */ |
755 | __le32 ingress_table; /* bitmap, 3 bits per up */ | 755 | __le32 ingress_table; /* bitmap, 3 bits per up */ |
756 | #define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 | 756 | #define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 |
757 | #define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ | 757 | #define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ |
758 | I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) | 758 | I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) |
759 | #define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 | 759 | #define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 |
760 | #define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ | 760 | #define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ |
761 | I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) | 761 | I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) |
762 | #define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 | 762 | #define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 |
763 | #define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ | 763 | #define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ |
764 | I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) | 764 | I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) |
765 | #define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 | 765 | #define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 |
766 | #define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ | 766 | #define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ |
767 | I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) | 767 | I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) |
768 | #define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 | 768 | #define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 |
769 | #define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ | 769 | #define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ |
770 | I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) | 770 | I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) |
771 | #define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 | 771 | #define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 |
772 | #define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ | 772 | #define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ |
773 | I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) | 773 | I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) |
774 | #define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 | 774 | #define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 |
775 | #define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ | 775 | #define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ |
776 | I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) | 776 | I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) |
777 | #define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 | 777 | #define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 |
778 | #define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ | 778 | #define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ |
779 | I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) | 779 | I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) |
780 | __le32 egress_table; /* same defines as for ingress table */ | 780 | __le32 egress_table; /* same defines as for ingress table */ |
781 | /* cascaded PV section */ | 781 | /* cascaded PV section */ |
782 | __le16 cas_pv_tag; | 782 | __le16 cas_pv_tag; |
783 | u8 cas_pv_flags; | 783 | u8 cas_pv_flags; |
784 | #define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 | 784 | #define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 |
785 | #define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ | 785 | #define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ |
786 | I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) | 786 | I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) |
787 | #define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 | 787 | #define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 |
788 | #define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 | 788 | #define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 |
789 | #define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 | 789 | #define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 |
790 | #define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 | 790 | #define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 |
791 | #define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 | 791 | #define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 |
792 | #define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 | 792 | #define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 |
793 | u8 cas_pv_reserved; | 793 | u8 cas_pv_reserved; |
794 | /* queue mapping section */ | 794 | /* queue mapping section */ |
795 | __le16 mapping_flags; | 795 | __le16 mapping_flags; |
796 | #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 | 796 | #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 |
797 | #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 | 797 | #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 |
798 | __le16 queue_mapping[16]; | 798 | __le16 queue_mapping[16]; |
799 | #define I40E_AQ_VSI_QUEUE_SHIFT 0x0 | 799 | #define I40E_AQ_VSI_QUEUE_SHIFT 0x0 |
800 | #define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) | 800 | #define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) |
801 | __le16 tc_mapping[8]; | 801 | __le16 tc_mapping[8]; |
802 | #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 | 802 | #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 |
803 | #define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ | 803 | #define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ |
804 | I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 804 | I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
805 | #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 | 805 | #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 |
806 | #define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ | 806 | #define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ |
807 | I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) | 807 | I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) |
808 | /* queueing option section */ | 808 | /* queueing option section */ |
809 | u8 queueing_opt_flags; | 809 | u8 queueing_opt_flags; |
810 | #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 | 810 | #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 |
811 | #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 | 811 | #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 |
812 | u8 queueing_opt_reserved[3]; | 812 | u8 queueing_opt_reserved[3]; |
813 | /* scheduler section */ | 813 | /* scheduler section */ |
814 | u8 up_enable_bits; | 814 | u8 up_enable_bits; |
815 | u8 sched_reserved; | 815 | u8 sched_reserved; |
816 | /* outer up section */ | 816 | /* outer up section */ |
817 | __le32 outer_up_table; /* same structure and defines as ingress table */ | 817 | __le32 outer_up_table; /* same structure and defines as ingress tbl */ |
818 | u8 cmd_reserved[8]; | 818 | u8 cmd_reserved[8]; |
819 | /* last 32 bytes are written by FW */ | 819 | /* last 32 bytes are written by FW */ |
820 | __le16 qs_handle[8]; | 820 | __le16 qs_handle[8]; |
821 | #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF | 821 | #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF |
822 | __le16 stat_counter_idx; | 822 | __le16 stat_counter_idx; |
823 | __le16 sched_id; | 823 | __le16 sched_id; |
824 | u8 resp_reserved[12]; | 824 | u8 resp_reserved[12]; |
825 | }; | 825 | }; |
826 | 826 | ||
827 | I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); | 827 | I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); |
@@ -831,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); | |||
831 | * (IS_CTRL_PORT only works on add PV) | 831 | * (IS_CTRL_PORT only works on add PV) |
832 | */ | 832 | */ |
833 | struct i40e_aqc_add_update_pv { | 833 | struct i40e_aqc_add_update_pv { |
834 | __le16 command_flags; | 834 | __le16 command_flags; |
835 | #define I40E_AQC_PV_FLAG_PV_TYPE 0x1 | 835 | #define I40E_AQC_PV_FLAG_PV_TYPE 0x1 |
836 | #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 | 836 | #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 |
837 | #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 | 837 | #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 |
838 | #define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 | 838 | #define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 |
839 | __le16 uplink_seid; | 839 | __le16 uplink_seid; |
840 | __le16 connected_seid; | 840 | __le16 connected_seid; |
841 | u8 reserved[10]; | 841 | u8 reserved[10]; |
842 | }; | 842 | }; |
843 | 843 | ||
844 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); | 844 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); |
845 | 845 | ||
846 | struct i40e_aqc_add_update_pv_completion { | 846 | struct i40e_aqc_add_update_pv_completion { |
847 | /* reserved for update; for add also encodes error if rc == ENOSPC */ | 847 | /* reserved for update; for add also encodes error if rc == ENOSPC */ |
848 | __le16 pv_seid; | 848 | __le16 pv_seid; |
849 | #define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 | 849 | #define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 |
850 | #define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 | 850 | #define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 |
851 | #define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 | 851 | #define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 |
852 | #define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 | 852 | #define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 |
853 | u8 reserved[14]; | 853 | u8 reserved[14]; |
854 | }; | 854 | }; |
855 | 855 | ||
856 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); | 856 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); |
@@ -860,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); | |||
860 | */ | 860 | */ |
861 | 861 | ||
862 | struct i40e_aqc_get_pv_params_completion { | 862 | struct i40e_aqc_get_pv_params_completion { |
863 | __le16 seid; | 863 | __le16 seid; |
864 | __le16 default_stag; | 864 | __le16 default_stag; |
865 | __le16 pv_flags; /* same flags as add_pv */ | 865 | __le16 pv_flags; /* same flags as add_pv */ |
866 | #define I40E_AQC_GET_PV_PV_TYPE 0x1 | 866 | #define I40E_AQC_GET_PV_PV_TYPE 0x1 |
867 | #define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 | 867 | #define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 |
868 | #define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 | 868 | #define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 |
869 | u8 reserved[8]; | 869 | u8 reserved[8]; |
870 | __le16 default_port_seid; | 870 | __le16 default_port_seid; |
871 | }; | 871 | }; |
872 | 872 | ||
873 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); | 873 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); |
874 | 874 | ||
875 | /* Add VEB (direct 0x0230) */ | 875 | /* Add VEB (direct 0x0230) */ |
876 | struct i40e_aqc_add_veb { | 876 | struct i40e_aqc_add_veb { |
877 | __le16 uplink_seid; | 877 | __le16 uplink_seid; |
878 | __le16 downlink_seid; | 878 | __le16 downlink_seid; |
879 | __le16 veb_flags; | 879 | __le16 veb_flags; |
880 | #define I40E_AQC_ADD_VEB_FLOATING 0x1 | 880 | #define I40E_AQC_ADD_VEB_FLOATING 0x1 |
881 | #define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 | 881 | #define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 |
882 | #define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ | 882 | #define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ |
883 | I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) | 883 | I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) |
884 | #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 | 884 | #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 |
885 | #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 | 885 | #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 |
886 | #define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 | 886 | #define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 |
887 | u8 enable_tcs; | 887 | u8 enable_tcs; |
888 | u8 reserved[9]; | 888 | u8 reserved[9]; |
889 | }; | 889 | }; |
890 | 890 | ||
891 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); | 891 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); |
892 | 892 | ||
893 | struct i40e_aqc_add_veb_completion { | 893 | struct i40e_aqc_add_veb_completion { |
894 | u8 reserved[6]; | 894 | u8 reserved[6]; |
895 | __le16 switch_seid; | 895 | __le16 switch_seid; |
896 | /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ | 896 | /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ |
897 | __le16 veb_seid; | 897 | __le16 veb_seid; |
898 | #define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 | 898 | #define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 |
899 | #define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 | 899 | #define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 |
900 | #define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 | 900 | #define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 |
901 | #define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 | 901 | #define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 |
902 | __le16 statistic_index; | 902 | __le16 statistic_index; |
903 | __le16 vebs_used; | 903 | __le16 vebs_used; |
904 | __le16 vebs_free; | 904 | __le16 vebs_free; |
905 | }; | 905 | }; |
906 | 906 | ||
907 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); | 907 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); |
@@ -910,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); | |||
910 | * uses i40e_aqc_switch_seid for the descriptor | 910 | * uses i40e_aqc_switch_seid for the descriptor |
911 | */ | 911 | */ |
912 | struct i40e_aqc_get_veb_parameters_completion { | 912 | struct i40e_aqc_get_veb_parameters_completion { |
913 | __le16 seid; | 913 | __le16 seid; |
914 | __le16 switch_id; | 914 | __le16 switch_id; |
915 | __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ | 915 | __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ |
916 | __le16 statistic_index; | 916 | __le16 statistic_index; |
917 | __le16 vebs_used; | 917 | __le16 vebs_used; |
918 | __le16 vebs_free; | 918 | __le16 vebs_free; |
919 | u8 reserved[4]; | 919 | u8 reserved[4]; |
920 | }; | 920 | }; |
921 | 921 | ||
922 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); | 922 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); |
@@ -929,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); | |||
929 | 929 | ||
930 | /* used for the command for most vlan commands */ | 930 | /* used for the command for most vlan commands */ |
931 | struct i40e_aqc_macvlan { | 931 | struct i40e_aqc_macvlan { |
932 | __le16 num_addresses; | 932 | __le16 num_addresses; |
933 | __le16 seid[3]; | 933 | __le16 seid[3]; |
934 | #define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 | 934 | #define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 |
935 | #define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ | 935 | #define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ |
936 | I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) | 936 | I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) |
937 | #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 | 937 | #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 |
938 | __le32 addr_high; | 938 | __le32 addr_high; |
939 | __le32 addr_low; | 939 | __le32 addr_low; |
940 | }; | 940 | }; |
941 | 941 | ||
942 | I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); | 942 | I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); |
943 | 943 | ||
944 | /* indirect data for command and response */ | 944 | /* indirect data for command and response */ |
945 | struct i40e_aqc_add_macvlan_element_data { | 945 | struct i40e_aqc_add_macvlan_element_data { |
946 | u8 mac_addr[6]; | 946 | u8 mac_addr[6]; |
947 | __le16 vlan_tag; | 947 | __le16 vlan_tag; |
948 | __le16 flags; | 948 | __le16 flags; |
949 | #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 | 949 | #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 |
950 | #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 | 950 | #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 |
951 | #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 | 951 | #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 |
952 | #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 | 952 | #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 |
953 | __le16 queue_number; | 953 | __le16 queue_number; |
954 | #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 | 954 | #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 |
955 | #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ | 955 | #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ |
956 | I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) | 956 | I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) |
957 | /* response section */ | 957 | /* response section */ |
958 | u8 match_method; | 958 | u8 match_method; |
959 | #define I40E_AQC_MM_PERFECT_MATCH 0x01 | 959 | #define I40E_AQC_MM_PERFECT_MATCH 0x01 |
960 | #define I40E_AQC_MM_HASH_MATCH 0x02 | 960 | #define I40E_AQC_MM_HASH_MATCH 0x02 |
961 | #define I40E_AQC_MM_ERR_NO_RES 0xFF | 961 | #define I40E_AQC_MM_ERR_NO_RES 0xFF |
962 | u8 reserved1[3]; | 962 | u8 reserved1[3]; |
963 | }; | 963 | }; |
964 | 964 | ||
965 | struct i40e_aqc_add_remove_macvlan_completion { | 965 | struct i40e_aqc_add_remove_macvlan_completion { |
@@ -979,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); | |||
979 | */ | 979 | */ |
980 | 980 | ||
981 | struct i40e_aqc_remove_macvlan_element_data { | 981 | struct i40e_aqc_remove_macvlan_element_data { |
982 | u8 mac_addr[6]; | 982 | u8 mac_addr[6]; |
983 | __le16 vlan_tag; | 983 | __le16 vlan_tag; |
984 | u8 flags; | 984 | u8 flags; |
985 | #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 | 985 | #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 |
986 | #define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 | 986 | #define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 |
987 | #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 | 987 | #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 |
988 | #define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 | 988 | #define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 |
989 | u8 reserved[3]; | 989 | u8 reserved[3]; |
990 | /* reply section */ | 990 | /* reply section */ |
991 | u8 error_code; | 991 | u8 error_code; |
992 | #define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 | 992 | #define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 |
993 | #define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF | 993 | #define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF |
994 | u8 reply_reserved[3]; | 994 | u8 reply_reserved[3]; |
995 | }; | 995 | }; |
996 | 996 | ||
997 | /* Add VLAN (indirect 0x0252) | 997 | /* Add VLAN (indirect 0x0252) |
@@ -999,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data { | |||
999 | * use the generic i40e_aqc_macvlan for the command | 999 | * use the generic i40e_aqc_macvlan for the command |
1000 | */ | 1000 | */ |
1001 | struct i40e_aqc_add_remove_vlan_element_data { | 1001 | struct i40e_aqc_add_remove_vlan_element_data { |
1002 | __le16 vlan_tag; | 1002 | __le16 vlan_tag; |
1003 | u8 vlan_flags; | 1003 | u8 vlan_flags; |
1004 | /* flags for add VLAN */ | 1004 | /* flags for add VLAN */ |
1005 | #define I40E_AQC_ADD_VLAN_LOCAL 0x1 | 1005 | #define I40E_AQC_ADD_VLAN_LOCAL 0x1 |
1006 | #define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 | 1006 | #define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 |
1007 | #define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ | 1007 | #define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) |
1008 | I40E_AQC_ADD_PVLAN_TYPE_SHIFT) | 1008 | #define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 |
1009 | #define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 | 1009 | #define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 |
1010 | #define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 | 1010 | #define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 |
1011 | #define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 | 1011 | #define I40E_AQC_VLAN_PTYPE_SHIFT 3 |
1012 | #define I40E_AQC_VLAN_PTYPE_SHIFT 3 | 1012 | #define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) |
1013 | #define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) | 1013 | #define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 |
1014 | #define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 | 1014 | #define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 |
1015 | #define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 | 1015 | #define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 |
1016 | #define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 | 1016 | #define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 |
1017 | #define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 | ||
1018 | /* flags for remove VLAN */ | 1017 | /* flags for remove VLAN */ |
1019 | #define I40E_AQC_REMOVE_VLAN_ALL 0x1 | 1018 | #define I40E_AQC_REMOVE_VLAN_ALL 0x1 |
1020 | u8 reserved; | 1019 | u8 reserved; |
1021 | u8 result; | 1020 | u8 result; |
1022 | /* flags for add VLAN */ | 1021 | /* flags for add VLAN */ |
1023 | #define I40E_AQC_ADD_VLAN_SUCCESS 0x0 | 1022 | #define I40E_AQC_ADD_VLAN_SUCCESS 0x0 |
1024 | #define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE | 1023 | #define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE |
1025 | #define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF | 1024 | #define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF |
1026 | /* flags for remove VLAN */ | 1025 | /* flags for remove VLAN */ |
1027 | #define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 | 1026 | #define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 |
1028 | #define I40E_AQC_REMOVE_VLAN_FAIL 0xFF | 1027 | #define I40E_AQC_REMOVE_VLAN_FAIL 0xFF |
1029 | u8 reserved1[3]; | 1028 | u8 reserved1[3]; |
1030 | }; | 1029 | }; |
1031 | 1030 | ||
1032 | struct i40e_aqc_add_remove_vlan_completion { | 1031 | struct i40e_aqc_add_remove_vlan_completion { |
1033 | u8 reserved[4]; | 1032 | u8 reserved[4]; |
1034 | __le16 vlans_used; | 1033 | __le16 vlans_used; |
1035 | __le16 vlans_free; | 1034 | __le16 vlans_free; |
1036 | __le32 addr_high; | 1035 | __le32 addr_high; |
1037 | __le32 addr_low; | 1036 | __le32 addr_low; |
1038 | }; | 1037 | }; |
1039 | 1038 | ||
1040 | /* Set VSI Promiscuous Modes (direct 0x0254) */ | 1039 | /* Set VSI Promiscuous Modes (direct 0x0254) */ |
1041 | struct i40e_aqc_set_vsi_promiscuous_modes { | 1040 | struct i40e_aqc_set_vsi_promiscuous_modes { |
1042 | __le16 promiscuous_flags; | 1041 | __le16 promiscuous_flags; |
1043 | __le16 valid_flags; | 1042 | __le16 valid_flags; |
1044 | /* flags used for both fields above */ | 1043 | /* flags used for both fields above */ |
1045 | #define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 | 1044 | #define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 |
1046 | #define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 | 1045 | #define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 |
1047 | #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 | 1046 | #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 |
1048 | #define I40E_AQC_SET_VSI_DEFAULT 0x08 | 1047 | #define I40E_AQC_SET_VSI_DEFAULT 0x08 |
1049 | #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 | 1048 | #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 |
1050 | __le16 seid; | 1049 | __le16 seid; |
1051 | #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF | 1050 | #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF |
1052 | __le16 vlan_tag; | 1051 | __le16 vlan_tag; |
1053 | #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 | 1052 | #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 |
1054 | u8 reserved[8]; | 1053 | u8 reserved[8]; |
1055 | }; | 1054 | }; |
1056 | 1055 | ||
1057 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); | 1056 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); |
@@ -1060,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); | |||
1060 | * Uses generic i40e_aqc_add_remove_tag_completion for completion | 1059 | * Uses generic i40e_aqc_add_remove_tag_completion for completion |
1061 | */ | 1060 | */ |
1062 | struct i40e_aqc_add_tag { | 1061 | struct i40e_aqc_add_tag { |
1063 | __le16 flags; | 1062 | __le16 flags; |
1064 | #define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 | 1063 | #define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 |
1065 | __le16 seid; | 1064 | __le16 seid; |
1066 | #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 | 1065 | #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 |
1067 | #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ | 1066 | #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ |
1068 | I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) | 1067 | I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) |
1069 | __le16 tag; | 1068 | __le16 tag; |
1070 | __le16 queue_number; | 1069 | __le16 queue_number; |
1071 | u8 reserved[8]; | 1070 | u8 reserved[8]; |
1072 | }; | 1071 | }; |
1073 | 1072 | ||
1074 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); | 1073 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); |
1075 | 1074 | ||
1076 | struct i40e_aqc_add_remove_tag_completion { | 1075 | struct i40e_aqc_add_remove_tag_completion { |
1077 | u8 reserved[12]; | 1076 | u8 reserved[12]; |
1078 | __le16 tags_used; | 1077 | __le16 tags_used; |
1079 | __le16 tags_free; | 1078 | __le16 tags_free; |
1080 | }; | 1079 | }; |
1081 | 1080 | ||
1082 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); | 1081 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); |
@@ -1085,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); | |||
1085 | * Uses generic i40e_aqc_add_remove_tag_completion for completion | 1084 | * Uses generic i40e_aqc_add_remove_tag_completion for completion |
1086 | */ | 1085 | */ |
1087 | struct i40e_aqc_remove_tag { | 1086 | struct i40e_aqc_remove_tag { |
1088 | __le16 seid; | 1087 | __le16 seid; |
1089 | #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 | 1088 | #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 |
1090 | #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ | 1089 | #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ |
1091 | I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) | 1090 | I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) |
1092 | __le16 tag; | 1091 | __le16 tag; |
1093 | u8 reserved[12]; | 1092 | u8 reserved[12]; |
1094 | }; | 1093 | }; |
1095 | 1094 | ||
1096 | /* Add multicast E-Tag (direct 0x0257) | 1095 | /* Add multicast E-Tag (direct 0x0257) |
@@ -1098,22 +1097,22 @@ struct i40e_aqc_remove_tag { | |||
1098 | * and no external data | 1097 | * and no external data |
1099 | */ | 1098 | */ |
1100 | struct i40e_aqc_add_remove_mcast_etag { | 1099 | struct i40e_aqc_add_remove_mcast_etag { |
1101 | __le16 pv_seid; | 1100 | __le16 pv_seid; |
1102 | __le16 etag; | 1101 | __le16 etag; |
1103 | u8 num_unicast_etags; | 1102 | u8 num_unicast_etags; |
1104 | u8 reserved[3]; | 1103 | u8 reserved[3]; |
1105 | __le32 addr_high; /* address of array of 2-byte s-tags */ | 1104 | __le32 addr_high; /* address of array of 2-byte s-tags */ |
1106 | __le32 addr_low; | 1105 | __le32 addr_low; |
1107 | }; | 1106 | }; |
1108 | 1107 | ||
1109 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); | 1108 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); |
1110 | 1109 | ||
1111 | struct i40e_aqc_add_remove_mcast_etag_completion { | 1110 | struct i40e_aqc_add_remove_mcast_etag_completion { |
1112 | u8 reserved[4]; | 1111 | u8 reserved[4]; |
1113 | __le16 mcast_etags_used; | 1112 | __le16 mcast_etags_used; |
1114 | __le16 mcast_etags_free; | 1113 | __le16 mcast_etags_free; |
1115 | __le32 addr_high; | 1114 | __le32 addr_high; |
1116 | __le32 addr_low; | 1115 | __le32 addr_low; |
1117 | 1116 | ||
1118 | }; | 1117 | }; |
1119 | 1118 | ||
@@ -1121,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); | |||
1121 | 1120 | ||
1122 | /* Update S/E-Tag (direct 0x0259) */ | 1121 | /* Update S/E-Tag (direct 0x0259) */ |
1123 | struct i40e_aqc_update_tag { | 1122 | struct i40e_aqc_update_tag { |
1124 | __le16 seid; | 1123 | __le16 seid; |
1125 | #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 | 1124 | #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 |
1126 | #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ | 1125 | #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ |
1127 | I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) | 1126 | I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) |
1128 | __le16 old_tag; | 1127 | __le16 old_tag; |
1129 | __le16 new_tag; | 1128 | __le16 new_tag; |
1130 | u8 reserved[10]; | 1129 | u8 reserved[10]; |
1131 | }; | 1130 | }; |
1132 | 1131 | ||
1133 | I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); | 1132 | I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); |
1134 | 1133 | ||
1135 | struct i40e_aqc_update_tag_completion { | 1134 | struct i40e_aqc_update_tag_completion { |
1136 | u8 reserved[12]; | 1135 | u8 reserved[12]; |
1137 | __le16 tags_used; | 1136 | __le16 tags_used; |
1138 | __le16 tags_free; | 1137 | __le16 tags_free; |
1139 | }; | 1138 | }; |
1140 | 1139 | ||
1141 | I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); | 1140 | I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); |
@@ -1146,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); | |||
1146 | * and the generic direct completion structure | 1145 | * and the generic direct completion structure |
1147 | */ | 1146 | */ |
1148 | struct i40e_aqc_add_remove_control_packet_filter { | 1147 | struct i40e_aqc_add_remove_control_packet_filter { |
1149 | u8 mac[6]; | 1148 | u8 mac[6]; |
1150 | __le16 etype; | 1149 | __le16 etype; |
1151 | __le16 flags; | 1150 | __le16 flags; |
1152 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 | 1151 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 |
1153 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 | 1152 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 |
1154 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 | 1153 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 |
1155 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 | 1154 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 |
1156 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 | 1155 | #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 |
1157 | __le16 seid; | 1156 | __le16 seid; |
1158 | #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 | 1157 | #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 |
1159 | #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ | 1158 | #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ |
1160 | I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) | 1159 | I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) |
1161 | __le16 queue; | 1160 | __le16 queue; |
1162 | u8 reserved[2]; | 1161 | u8 reserved[2]; |
1163 | }; | 1162 | }; |
1164 | 1163 | ||
1165 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); | 1164 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); |
1166 | 1165 | ||
1167 | struct i40e_aqc_add_remove_control_packet_filter_completion { | 1166 | struct i40e_aqc_add_remove_control_packet_filter_completion { |
1168 | __le16 mac_etype_used; | 1167 | __le16 mac_etype_used; |
1169 | __le16 etype_used; | 1168 | __le16 etype_used; |
1170 | __le16 mac_etype_free; | 1169 | __le16 mac_etype_free; |
1171 | __le16 etype_free; | 1170 | __le16 etype_free; |
1172 | u8 reserved[8]; | 1171 | u8 reserved[8]; |
1173 | }; | 1172 | }; |
1174 | 1173 | ||
1175 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); | 1174 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); |
@@ -1180,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); | |||
1180 | * and the generic indirect completion structure | 1179 | * and the generic indirect completion structure |
1181 | */ | 1180 | */ |
1182 | struct i40e_aqc_add_remove_cloud_filters { | 1181 | struct i40e_aqc_add_remove_cloud_filters { |
1183 | u8 num_filters; | 1182 | u8 num_filters; |
1184 | u8 reserved; | 1183 | u8 reserved; |
1185 | __le16 seid; | 1184 | __le16 seid; |
1186 | #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 | 1185 | #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 |
1187 | #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ | 1186 | #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ |
1188 | I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) | 1187 | I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) |
1189 | u8 reserved2[4]; | 1188 | u8 reserved2[4]; |
1190 | __le32 addr_high; | 1189 | __le32 addr_high; |
1191 | __le32 addr_low; | 1190 | __le32 addr_low; |
1192 | }; | 1191 | }; |
1193 | 1192 | ||
1194 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); | 1193 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); |
1195 | 1194 | ||
1196 | struct i40e_aqc_add_remove_cloud_filters_element_data { | 1195 | struct i40e_aqc_add_remove_cloud_filters_element_data { |
1197 | u8 outer_mac[6]; | 1196 | u8 outer_mac[6]; |
1198 | u8 inner_mac[6]; | 1197 | u8 inner_mac[6]; |
1199 | __le16 inner_vlan; | 1198 | __le16 inner_vlan; |
1200 | union { | 1199 | union { |
1201 | struct { | 1200 | struct { |
1202 | u8 reserved[12]; | 1201 | u8 reserved[12]; |
@@ -1206,52 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { | |||
1206 | u8 data[16]; | 1205 | u8 data[16]; |
1207 | } v6; | 1206 | } v6; |
1208 | } ipaddr; | 1207 | } ipaddr; |
1209 | __le16 flags; | 1208 | __le16 flags; |
1210 | #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 | 1209 | #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 |
1211 | #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ | 1210 | #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ |
1212 | I40E_AQC_ADD_CLOUD_FILTER_SHIFT) | 1211 | I40E_AQC_ADD_CLOUD_FILTER_SHIFT) |
1213 | #define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002 | ||
1214 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004 | ||
1215 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007 | ||
1216 | /* 0x0000 reserved */ | 1212 | /* 0x0000 reserved */ |
1217 | #define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 | 1213 | #define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 |
1218 | /* 0x0002 reserved */ | 1214 | /* 0x0002 reserved */ |
1219 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 | 1215 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 |
1220 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 | 1216 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 |
1221 | /* 0x0005 reserved */ | 1217 | /* 0x0005 reserved */ |
1222 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 | 1218 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 |
1223 | /* 0x0007 reserved */ | 1219 | /* 0x0007 reserved */ |
1224 | /* 0x0008 reserved */ | 1220 | /* 0x0008 reserved */ |
1225 | #define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 | 1221 | #define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 |
1226 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A | 1222 | #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A |
1227 | #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B | 1223 | #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B |
1228 | #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C | 1224 | #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C |
1229 | 1225 | ||
1230 | #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 | 1226 | #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 |
1231 | #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 | 1227 | #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 |
1232 | #define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 | 1228 | #define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 |
1233 | #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 | 1229 | #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 |
1234 | #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 | 1230 | #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 |
1235 | 1231 | ||
1236 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 | 1232 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 |
1237 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 | 1233 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 |
1238 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 | 1234 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 |
1239 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 | 1235 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 |
1240 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 | 1236 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 |
1241 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 | 1237 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 |
1242 | 1238 | ||
1243 | __le32 tenant_id; | 1239 | __le32 tenant_id; |
1244 | u8 reserved[4]; | 1240 | u8 reserved[4]; |
1245 | __le16 queue_number; | 1241 | __le16 queue_number; |
1246 | #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 | 1242 | #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 |
1247 | #define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ | 1243 | #define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ |
1248 | I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) | 1244 | I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) |
1249 | u8 reserved2[14]; | 1245 | u8 reserved2[14]; |
1250 | /* response section */ | 1246 | /* response section */ |
1251 | u8 allocation_result; | 1247 | u8 allocation_result; |
1252 | #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 | 1248 | #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 |
1253 | #define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF | 1249 | #define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF |
1254 | u8 response_reserved[7]; | 1250 | u8 response_reserved[7]; |
1255 | }; | 1251 | }; |
1256 | 1252 | ||
1257 | struct i40e_aqc_remove_cloud_filters_completion { | 1253 | struct i40e_aqc_remove_cloud_filters_completion { |
@@ -1273,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); | |||
1273 | struct i40e_aqc_add_delete_mirror_rule { | 1269 | struct i40e_aqc_add_delete_mirror_rule { |
1274 | __le16 seid; | 1270 | __le16 seid; |
1275 | __le16 rule_type; | 1271 | __le16 rule_type; |
1276 | #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 | 1272 | #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 |
1277 | #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ | 1273 | #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ |
1278 | I40E_AQC_MIRROR_RULE_TYPE_SHIFT) | 1274 | I40E_AQC_MIRROR_RULE_TYPE_SHIFT) |
1279 | #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 | 1275 | #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 |
1280 | #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 | 1276 | #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 |
1281 | #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 | 1277 | #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 |
1282 | #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 | 1278 | #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 |
1283 | #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 | 1279 | #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 |
1284 | __le16 num_entries; | 1280 | __le16 num_entries; |
1285 | __le16 destination; /* VSI for add, rule id for delete */ | 1281 | __le16 destination; /* VSI for add, rule id for delete */ |
1286 | __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ | 1282 | __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ |
@@ -1290,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule { | |||
1290 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); | 1286 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); |
1291 | 1287 | ||
1292 | struct i40e_aqc_add_delete_mirror_rule_completion { | 1288 | struct i40e_aqc_add_delete_mirror_rule_completion { |
1293 | u8 reserved[2]; | 1289 | u8 reserved[2]; |
1294 | __le16 rule_id; /* only used on add */ | 1290 | __le16 rule_id; /* only used on add */ |
1295 | __le16 mirror_rules_used; | 1291 | __le16 mirror_rules_used; |
1296 | __le16 mirror_rules_free; | 1292 | __le16 mirror_rules_free; |
1297 | __le32 addr_high; | 1293 | __le32 addr_high; |
1298 | __le32 addr_low; | 1294 | __le32 addr_low; |
1299 | }; | 1295 | }; |
1300 | 1296 | ||
1301 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); | 1297 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); |
@@ -1306,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); | |||
1306 | * the command and response use the same descriptor structure | 1302 | * the command and response use the same descriptor structure |
1307 | */ | 1303 | */ |
1308 | struct i40e_aqc_pfc_ignore { | 1304 | struct i40e_aqc_pfc_ignore { |
1309 | u8 tc_bitmap; | 1305 | u8 tc_bitmap; |
1310 | u8 command_flags; /* unused on response */ | 1306 | u8 command_flags; /* unused on response */ |
1311 | #define I40E_AQC_PFC_IGNORE_SET 0x80 | 1307 | #define I40E_AQC_PFC_IGNORE_SET 0x80 |
1312 | #define I40E_AQC_PFC_IGNORE_CLEAR 0x0 | 1308 | #define I40E_AQC_PFC_IGNORE_CLEAR 0x0 |
1313 | u8 reserved[14]; | 1309 | u8 reserved[14]; |
1314 | }; | 1310 | }; |
1315 | 1311 | ||
1316 | I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); | 1312 | I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); |
@@ -1325,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); | |||
1325 | * this generic struct to pass the SEID in param0 | 1321 | * this generic struct to pass the SEID in param0 |
1326 | */ | 1322 | */ |
1327 | struct i40e_aqc_tx_sched_ind { | 1323 | struct i40e_aqc_tx_sched_ind { |
1328 | __le16 vsi_seid; | 1324 | __le16 vsi_seid; |
1329 | u8 reserved[6]; | 1325 | u8 reserved[6]; |
1330 | __le32 addr_high; | 1326 | __le32 addr_high; |
1331 | __le32 addr_low; | 1327 | __le32 addr_low; |
1332 | }; | 1328 | }; |
1333 | 1329 | ||
1334 | I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); | 1330 | I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); |
@@ -1340,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp { | |||
1340 | 1336 | ||
1341 | /* Configure VSI BW limits (direct 0x0400) */ | 1337 | /* Configure VSI BW limits (direct 0x0400) */ |
1342 | struct i40e_aqc_configure_vsi_bw_limit { | 1338 | struct i40e_aqc_configure_vsi_bw_limit { |
1343 | __le16 vsi_seid; | 1339 | __le16 vsi_seid; |
1344 | u8 reserved[2]; | 1340 | u8 reserved[2]; |
1345 | __le16 credit; | 1341 | __le16 credit; |
1346 | u8 reserved1[2]; | 1342 | u8 reserved1[2]; |
1347 | u8 max_credit; /* 0-3, limit = 2^max */ | 1343 | u8 max_credit; /* 0-3, limit = 2^max */ |
1348 | u8 reserved2[7]; | 1344 | u8 reserved2[7]; |
1349 | }; | 1345 | }; |
1350 | 1346 | ||
1351 | I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); | 1347 | I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); |
@@ -1354,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); | |||
1354 | * responds with i40e_aqc_qs_handles_resp | 1350 | * responds with i40e_aqc_qs_handles_resp |
1355 | */ | 1351 | */ |
1356 | struct i40e_aqc_configure_vsi_ets_sla_bw_data { | 1352 | struct i40e_aqc_configure_vsi_ets_sla_bw_data { |
1357 | u8 tc_valid_bits; | 1353 | u8 tc_valid_bits; |
1358 | u8 reserved[15]; | 1354 | u8 reserved[15]; |
1359 | __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ | 1355 | __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ |
1360 | 1356 | ||
1361 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ | 1357 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ |
1362 | __le16 tc_bw_max[2]; | 1358 | __le16 tc_bw_max[2]; |
1363 | u8 reserved1[28]; | 1359 | u8 reserved1[28]; |
1364 | }; | 1360 | }; |
1365 | 1361 | ||
1366 | /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) | 1362 | /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) |
1367 | * responds with i40e_aqc_qs_handles_resp | 1363 | * responds with i40e_aqc_qs_handles_resp |
1368 | */ | 1364 | */ |
1369 | struct i40e_aqc_configure_vsi_tc_bw_data { | 1365 | struct i40e_aqc_configure_vsi_tc_bw_data { |
1370 | u8 tc_valid_bits; | 1366 | u8 tc_valid_bits; |
1371 | u8 reserved[3]; | 1367 | u8 reserved[3]; |
1372 | u8 tc_bw_credits[8]; | 1368 | u8 tc_bw_credits[8]; |
1373 | u8 reserved1[4]; | 1369 | u8 reserved1[4]; |
1374 | __le16 qs_handles[8]; | 1370 | __le16 qs_handles[8]; |
1375 | }; | 1371 | }; |
1376 | 1372 | ||
1377 | /* Query vsi bw configuration (indirect 0x0408) */ | 1373 | /* Query vsi bw configuration (indirect 0x0408) */ |
1378 | struct i40e_aqc_query_vsi_bw_config_resp { | 1374 | struct i40e_aqc_query_vsi_bw_config_resp { |
1379 | u8 tc_valid_bits; | 1375 | u8 tc_valid_bits; |
1380 | u8 tc_suspended_bits; | 1376 | u8 tc_suspended_bits; |
1381 | u8 reserved[14]; | 1377 | u8 reserved[14]; |
1382 | __le16 qs_handles[8]; | 1378 | __le16 qs_handles[8]; |
1383 | u8 reserved1[4]; | 1379 | u8 reserved1[4]; |
1384 | __le16 port_bw_limit; | 1380 | __le16 port_bw_limit; |
1385 | u8 reserved2[2]; | 1381 | u8 reserved2[2]; |
1386 | u8 max_bw; /* 0-3, limit = 2^max */ | 1382 | u8 max_bw; /* 0-3, limit = 2^max */ |
1387 | u8 reserved3[23]; | 1383 | u8 reserved3[23]; |
1388 | }; | 1384 | }; |
1389 | 1385 | ||
1390 | /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ | 1386 | /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ |
1391 | struct i40e_aqc_query_vsi_ets_sla_config_resp { | 1387 | struct i40e_aqc_query_vsi_ets_sla_config_resp { |
1392 | u8 tc_valid_bits; | 1388 | u8 tc_valid_bits; |
1393 | u8 reserved[3]; | 1389 | u8 reserved[3]; |
1394 | u8 share_credits[8]; | 1390 | u8 share_credits[8]; |
1395 | __le16 credits[8]; | 1391 | __le16 credits[8]; |
1396 | 1392 | ||
1397 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ | 1393 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ |
1398 | __le16 tc_bw_max[2]; | 1394 | __le16 tc_bw_max[2]; |
1399 | }; | 1395 | }; |
1400 | 1396 | ||
1401 | /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ | 1397 | /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ |
1402 | struct i40e_aqc_configure_switching_comp_bw_limit { | 1398 | struct i40e_aqc_configure_switching_comp_bw_limit { |
1403 | __le16 seid; | 1399 | __le16 seid; |
1404 | u8 reserved[2]; | 1400 | u8 reserved[2]; |
1405 | __le16 credit; | 1401 | __le16 credit; |
1406 | u8 reserved1[2]; | 1402 | u8 reserved1[2]; |
1407 | u8 max_bw; /* 0-3, limit = 2^max */ | 1403 | u8 max_bw; /* 0-3, limit = 2^max */ |
1408 | u8 reserved2[7]; | 1404 | u8 reserved2[7]; |
1409 | }; | 1405 | }; |
1410 | 1406 | ||
1411 | I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); | 1407 | I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); |
@@ -1415,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); | |||
1415 | * Disable Physical Port ETS (indirect 0x0415) | 1411 | * Disable Physical Port ETS (indirect 0x0415) |
1416 | */ | 1412 | */ |
1417 | struct i40e_aqc_configure_switching_comp_ets_data { | 1413 | struct i40e_aqc_configure_switching_comp_ets_data { |
1418 | u8 reserved[4]; | 1414 | u8 reserved[4]; |
1419 | u8 tc_valid_bits; | 1415 | u8 tc_valid_bits; |
1420 | u8 seepage; | 1416 | u8 seepage; |
1421 | #define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 | 1417 | #define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 |
1422 | u8 tc_strict_priority_flags; | 1418 | u8 tc_strict_priority_flags; |
1423 | u8 reserved1[17]; | 1419 | u8 reserved1[17]; |
1424 | u8 tc_bw_share_credits[8]; | 1420 | u8 tc_bw_share_credits[8]; |
1425 | u8 reserved2[96]; | 1421 | u8 reserved2[96]; |
1426 | }; | 1422 | }; |
1427 | 1423 | ||
1428 | /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ | 1424 | /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ |
1429 | struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { | 1425 | struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { |
1430 | u8 tc_valid_bits; | 1426 | u8 tc_valid_bits; |
1431 | u8 reserved[15]; | 1427 | u8 reserved[15]; |
1432 | __le16 tc_bw_credit[8]; | 1428 | __le16 tc_bw_credit[8]; |
1433 | 1429 | ||
1434 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ | 1430 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ |
1435 | __le16 tc_bw_max[2]; | 1431 | __le16 tc_bw_max[2]; |
1436 | u8 reserved1[28]; | 1432 | u8 reserved1[28]; |
1437 | }; | 1433 | }; |
1438 | 1434 | ||
1439 | /* Configure Switching Component Bandwidth Allocation per Tc | 1435 | /* Configure Switching Component Bandwidth Allocation per Tc |
1440 | * (indirect 0x0417) | 1436 | * (indirect 0x0417) |
1441 | */ | 1437 | */ |
1442 | struct i40e_aqc_configure_switching_comp_bw_config_data { | 1438 | struct i40e_aqc_configure_switching_comp_bw_config_data { |
1443 | u8 tc_valid_bits; | 1439 | u8 tc_valid_bits; |
1444 | u8 reserved[2]; | 1440 | u8 reserved[2]; |
1445 | u8 absolute_credits; /* bool */ | 1441 | u8 absolute_credits; /* bool */ |
1446 | u8 tc_bw_share_credits[8]; | 1442 | u8 tc_bw_share_credits[8]; |
1447 | u8 reserved1[20]; | 1443 | u8 reserved1[20]; |
1448 | }; | 1444 | }; |
1449 | 1445 | ||
1450 | /* Query Switching Component Configuration (indirect 0x0418) */ | 1446 | /* Query Switching Component Configuration (indirect 0x0418) */ |
1451 | struct i40e_aqc_query_switching_comp_ets_config_resp { | 1447 | struct i40e_aqc_query_switching_comp_ets_config_resp { |
1452 | u8 tc_valid_bits; | 1448 | u8 tc_valid_bits; |
1453 | u8 reserved[35]; | 1449 | u8 reserved[35]; |
1454 | __le16 port_bw_limit; | 1450 | __le16 port_bw_limit; |
1455 | u8 reserved1[2]; | 1451 | u8 reserved1[2]; |
1456 | u8 tc_bw_max; /* 0-3, limit = 2^max */ | 1452 | u8 tc_bw_max; /* 0-3, limit = 2^max */ |
1457 | u8 reserved2[23]; | 1453 | u8 reserved2[23]; |
1458 | }; | 1454 | }; |
1459 | 1455 | ||
1460 | /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ | 1456 | /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ |
1461 | struct i40e_aqc_query_port_ets_config_resp { | 1457 | struct i40e_aqc_query_port_ets_config_resp { |
1462 | u8 reserved[4]; | 1458 | u8 reserved[4]; |
1463 | u8 tc_valid_bits; | 1459 | u8 tc_valid_bits; |
1464 | u8 reserved1; | 1460 | u8 reserved1; |
1465 | u8 tc_strict_priority_bits; | 1461 | u8 tc_strict_priority_bits; |
1466 | u8 reserved2; | 1462 | u8 reserved2; |
1467 | u8 tc_bw_share_credits[8]; | 1463 | u8 tc_bw_share_credits[8]; |
1468 | __le16 tc_bw_limits[8]; | 1464 | __le16 tc_bw_limits[8]; |
1469 | 1465 | ||
1470 | /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ | 1466 | /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ |
1471 | __le16 tc_bw_max[2]; | 1467 | __le16 tc_bw_max[2]; |
1472 | u8 reserved3[32]; | 1468 | u8 reserved3[32]; |
1473 | }; | 1469 | }; |
1474 | 1470 | ||
1475 | /* Query Switching Component Bandwidth Allocation per Traffic Type | 1471 | /* Query Switching Component Bandwidth Allocation per Traffic Type |
1476 | * (indirect 0x041A) | 1472 | * (indirect 0x041A) |
1477 | */ | 1473 | */ |
1478 | struct i40e_aqc_query_switching_comp_bw_config_resp { | 1474 | struct i40e_aqc_query_switching_comp_bw_config_resp { |
1479 | u8 tc_valid_bits; | 1475 | u8 tc_valid_bits; |
1480 | u8 reserved[2]; | 1476 | u8 reserved[2]; |
1481 | u8 absolute_credits_enable; /* bool */ | 1477 | u8 absolute_credits_enable; /* bool */ |
1482 | u8 tc_bw_share_credits[8]; | 1478 | u8 tc_bw_share_credits[8]; |
1483 | __le16 tc_bw_limits[8]; | 1479 | __le16 tc_bw_limits[8]; |
1484 | 1480 | ||
1485 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ | 1481 | /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ |
1486 | __le16 tc_bw_max[2]; | 1482 | __le16 tc_bw_max[2]; |
1487 | }; | 1483 | }; |
1488 | 1484 | ||
1489 | /* Suspend/resume port TX traffic | 1485 | /* Suspend/resume port TX traffic |
@@ -1494,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp { | |||
1494 | * (indirect 0x041D) | 1490 | * (indirect 0x041D) |
1495 | */ | 1491 | */ |
1496 | struct i40e_aqc_configure_partition_bw_data { | 1492 | struct i40e_aqc_configure_partition_bw_data { |
1497 | __le16 pf_valid_bits; | 1493 | __le16 pf_valid_bits; |
1498 | u8 min_bw[16]; /* guaranteed bandwidth */ | 1494 | u8 min_bw[16]; /* guaranteed bandwidth */ |
1499 | u8 max_bw[16]; /* bandwidth limit */ | 1495 | u8 max_bw[16]; /* bandwidth limit */ |
1500 | }; | 1496 | }; |
1501 | 1497 | ||
1502 | /* Get and set the active HMC resource profile and status. | 1498 | /* Get and set the active HMC resource profile and status. |
1503 | * (direct 0x0500) and (direct 0x0501) | 1499 | * (direct 0x0500) and (direct 0x0501) |
1504 | */ | 1500 | */ |
1505 | struct i40e_aq_get_set_hmc_resource_profile { | 1501 | struct i40e_aq_get_set_hmc_resource_profile { |
1506 | u8 pm_profile; | 1502 | u8 pm_profile; |
1507 | u8 pe_vf_enabled; | 1503 | u8 pe_vf_enabled; |
1508 | u8 reserved[14]; | 1504 | u8 reserved[14]; |
1509 | }; | 1505 | }; |
1510 | 1506 | ||
1511 | I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); | 1507 | I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); |
1512 | 1508 | ||
1513 | enum i40e_aq_hmc_profile { | 1509 | enum i40e_aq_hmc_profile { |
1514 | /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ | 1510 | /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ |
1515 | I40E_HMC_PROFILE_DEFAULT = 1, | 1511 | I40E_HMC_PROFILE_DEFAULT = 1, |
1516 | I40E_HMC_PROFILE_FAVOR_VF = 2, | 1512 | I40E_HMC_PROFILE_FAVOR_VF = 2, |
1517 | I40E_HMC_PROFILE_EQUAL = 3, | 1513 | I40E_HMC_PROFILE_EQUAL = 3, |
1518 | }; | 1514 | }; |
1519 | 1515 | ||
1520 | #define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF | 1516 | #define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF |
1521 | #define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F | 1517 | #define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F |
1522 | 1518 | ||
1523 | /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ | 1519 | /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ |
1524 | 1520 | ||
1525 | /* set in param0 for get phy abilities to report qualified modules */ | 1521 | /* set in param0 for get phy abilities to report qualified modules */ |
1526 | #define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 | 1522 | #define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 |
1527 | #define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 | 1523 | #define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 |
1528 | 1524 | ||
1529 | enum i40e_aq_phy_type { | 1525 | enum i40e_aq_phy_type { |
1530 | I40E_PHY_TYPE_SGMII = 0x0, | 1526 | I40E_PHY_TYPE_SGMII = 0x0, |
@@ -1582,147 +1578,147 @@ struct i40e_aqc_module_desc { | |||
1582 | }; | 1578 | }; |
1583 | 1579 | ||
1584 | struct i40e_aq_get_phy_abilities_resp { | 1580 | struct i40e_aq_get_phy_abilities_resp { |
1585 | __le32 phy_type; /* bitmap using the above enum for offsets */ | 1581 | __le32 phy_type; /* bitmap using the above enum for offsets */ |
1586 | u8 link_speed; /* bitmap using the above enum bit patterns */ | 1582 | u8 link_speed; /* bitmap using the above enum bit patterns */ |
1587 | u8 abilities; | 1583 | u8 abilities; |
1588 | #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 | 1584 | #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 |
1589 | #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 | 1585 | #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 |
1590 | #define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 | 1586 | #define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 |
1591 | #define I40E_AQ_PHY_LINK_ENABLED 0x08 | 1587 | #define I40E_AQ_PHY_LINK_ENABLED 0x08 |
1592 | #define I40E_AQ_PHY_AN_ENABLED 0x10 | 1588 | #define I40E_AQ_PHY_AN_ENABLED 0x10 |
1593 | #define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 | 1589 | #define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 |
1594 | __le16 eee_capability; | 1590 | __le16 eee_capability; |
1595 | #define I40E_AQ_EEE_100BASE_TX 0x0002 | 1591 | #define I40E_AQ_EEE_100BASE_TX 0x0002 |
1596 | #define I40E_AQ_EEE_1000BASE_T 0x0004 | 1592 | #define I40E_AQ_EEE_1000BASE_T 0x0004 |
1597 | #define I40E_AQ_EEE_10GBASE_T 0x0008 | 1593 | #define I40E_AQ_EEE_10GBASE_T 0x0008 |
1598 | #define I40E_AQ_EEE_1000BASE_KX 0x0010 | 1594 | #define I40E_AQ_EEE_1000BASE_KX 0x0010 |
1599 | #define I40E_AQ_EEE_10GBASE_KX4 0x0020 | 1595 | #define I40E_AQ_EEE_10GBASE_KX4 0x0020 |
1600 | #define I40E_AQ_EEE_10GBASE_KR 0x0040 | 1596 | #define I40E_AQ_EEE_10GBASE_KR 0x0040 |
1601 | __le32 eeer_val; | 1597 | __le32 eeer_val; |
1602 | u8 d3_lpan; | 1598 | u8 d3_lpan; |
1603 | #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 | 1599 | #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 |
1604 | u8 reserved[3]; | 1600 | u8 reserved[3]; |
1605 | u8 phy_id[4]; | 1601 | u8 phy_id[4]; |
1606 | u8 module_type[3]; | 1602 | u8 module_type[3]; |
1607 | u8 qualified_module_count; | 1603 | u8 qualified_module_count; |
1608 | #define I40E_AQ_PHY_MAX_QMS 16 | 1604 | #define I40E_AQ_PHY_MAX_QMS 16 |
1609 | struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; | 1605 | struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; |
1610 | }; | 1606 | }; |
1611 | 1607 | ||
1612 | /* Set PHY Config (direct 0x0601) */ | 1608 | /* Set PHY Config (direct 0x0601) */ |
1613 | struct i40e_aq_set_phy_config { /* same bits as above in all */ | 1609 | struct i40e_aq_set_phy_config { /* same bits as above in all */ |
1614 | __le32 phy_type; | 1610 | __le32 phy_type; |
1615 | u8 link_speed; | 1611 | u8 link_speed; |
1616 | u8 abilities; | 1612 | u8 abilities; |
1617 | /* bits 0-2 use the values from get_phy_abilities_resp */ | 1613 | /* bits 0-2 use the values from get_phy_abilities_resp */ |
1618 | #define I40E_AQ_PHY_ENABLE_LINK 0x08 | 1614 | #define I40E_AQ_PHY_ENABLE_LINK 0x08 |
1619 | #define I40E_AQ_PHY_ENABLE_AN 0x10 | 1615 | #define I40E_AQ_PHY_ENABLE_AN 0x10 |
1620 | #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 | 1616 | #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 |
1621 | __le16 eee_capability; | 1617 | __le16 eee_capability; |
1622 | __le32 eeer; | 1618 | __le32 eeer; |
1623 | u8 low_power_ctrl; | 1619 | u8 low_power_ctrl; |
1624 | u8 reserved[3]; | 1620 | u8 reserved[3]; |
1625 | }; | 1621 | }; |
1626 | 1622 | ||
1627 | I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); | 1623 | I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); |
1628 | 1624 | ||
1629 | /* Set MAC Config command data structure (direct 0x0603) */ | 1625 | /* Set MAC Config command data structure (direct 0x0603) */ |
1630 | struct i40e_aq_set_mac_config { | 1626 | struct i40e_aq_set_mac_config { |
1631 | __le16 max_frame_size; | 1627 | __le16 max_frame_size; |
1632 | u8 params; | 1628 | u8 params; |
1633 | #define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 | 1629 | #define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 |
1634 | #define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 | 1630 | #define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 |
1635 | #define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 | 1631 | #define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 |
1636 | #define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 | 1632 | #define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 |
1637 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF | 1633 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF |
1638 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 | 1634 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 |
1639 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 | 1635 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 |
1640 | #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 | 1636 | #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 |
1641 | #define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 | 1637 | #define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 |
1642 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 | 1638 | #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 |
1643 | #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 | 1639 | #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 |
1644 | #define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 | 1640 | #define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 |
1645 | #define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 | 1641 | #define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 |
1646 | #define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 | 1642 | #define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 |
1647 | u8 tx_timer_priority; /* bitmap */ | 1643 | u8 tx_timer_priority; /* bitmap */ |
1648 | __le16 tx_timer_value; | 1644 | __le16 tx_timer_value; |
1649 | __le16 fc_refresh_threshold; | 1645 | __le16 fc_refresh_threshold; |
1650 | u8 reserved[8]; | 1646 | u8 reserved[8]; |
1651 | }; | 1647 | }; |
1652 | 1648 | ||
1653 | I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); | 1649 | I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); |
1654 | 1650 | ||
1655 | /* Restart Auto-Negotiation (direct 0x605) */ | 1651 | /* Restart Auto-Negotiation (direct 0x605) */ |
1656 | struct i40e_aqc_set_link_restart_an { | 1652 | struct i40e_aqc_set_link_restart_an { |
1657 | u8 command; | 1653 | u8 command; |
1658 | #define I40E_AQ_PHY_RESTART_AN 0x02 | 1654 | #define I40E_AQ_PHY_RESTART_AN 0x02 |
1659 | #define I40E_AQ_PHY_LINK_ENABLE 0x04 | 1655 | #define I40E_AQ_PHY_LINK_ENABLE 0x04 |
1660 | u8 reserved[15]; | 1656 | u8 reserved[15]; |
1661 | }; | 1657 | }; |
1662 | 1658 | ||
1663 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); | 1659 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); |
1664 | 1660 | ||
1665 | /* Get Link Status cmd & response data structure (direct 0x0607) */ | 1661 | /* Get Link Status cmd & response data structure (direct 0x0607) */ |
1666 | struct i40e_aqc_get_link_status { | 1662 | struct i40e_aqc_get_link_status { |
1667 | __le16 command_flags; /* only field set on command */ | 1663 | __le16 command_flags; /* only field set on command */ |
1668 | #define I40E_AQ_LSE_MASK 0x3 | 1664 | #define I40E_AQ_LSE_MASK 0x3 |
1669 | #define I40E_AQ_LSE_NOP 0x0 | 1665 | #define I40E_AQ_LSE_NOP 0x0 |
1670 | #define I40E_AQ_LSE_DISABLE 0x2 | 1666 | #define I40E_AQ_LSE_DISABLE 0x2 |
1671 | #define I40E_AQ_LSE_ENABLE 0x3 | 1667 | #define I40E_AQ_LSE_ENABLE 0x3 |
1672 | /* only response uses this flag */ | 1668 | /* only response uses this flag */ |
1673 | #define I40E_AQ_LSE_IS_ENABLED 0x1 | 1669 | #define I40E_AQ_LSE_IS_ENABLED 0x1 |
1674 | u8 phy_type; /* i40e_aq_phy_type */ | 1670 | u8 phy_type; /* i40e_aq_phy_type */ |
1675 | u8 link_speed; /* i40e_aq_link_speed */ | 1671 | u8 link_speed; /* i40e_aq_link_speed */ |
1676 | u8 link_info; | 1672 | u8 link_info; |
1677 | #define I40E_AQ_LINK_UP 0x01 | 1673 | #define I40E_AQ_LINK_UP 0x01 |
1678 | #define I40E_AQ_LINK_FAULT 0x02 | 1674 | #define I40E_AQ_LINK_FAULT 0x02 |
1679 | #define I40E_AQ_LINK_FAULT_TX 0x04 | 1675 | #define I40E_AQ_LINK_FAULT_TX 0x04 |
1680 | #define I40E_AQ_LINK_FAULT_RX 0x08 | 1676 | #define I40E_AQ_LINK_FAULT_RX 0x08 |
1681 | #define I40E_AQ_LINK_FAULT_REMOTE 0x10 | 1677 | #define I40E_AQ_LINK_FAULT_REMOTE 0x10 |
1682 | #define I40E_AQ_MEDIA_AVAILABLE 0x40 | 1678 | #define I40E_AQ_MEDIA_AVAILABLE 0x40 |
1683 | #define I40E_AQ_SIGNAL_DETECT 0x80 | 1679 | #define I40E_AQ_SIGNAL_DETECT 0x80 |
1684 | u8 an_info; | 1680 | u8 an_info; |
1685 | #define I40E_AQ_AN_COMPLETED 0x01 | 1681 | #define I40E_AQ_AN_COMPLETED 0x01 |
1686 | #define I40E_AQ_LP_AN_ABILITY 0x02 | 1682 | #define I40E_AQ_LP_AN_ABILITY 0x02 |
1687 | #define I40E_AQ_PD_FAULT 0x04 | 1683 | #define I40E_AQ_PD_FAULT 0x04 |
1688 | #define I40E_AQ_FEC_EN 0x08 | 1684 | #define I40E_AQ_FEC_EN 0x08 |
1689 | #define I40E_AQ_PHY_LOW_POWER 0x10 | 1685 | #define I40E_AQ_PHY_LOW_POWER 0x10 |
1690 | #define I40E_AQ_LINK_PAUSE_TX 0x20 | 1686 | #define I40E_AQ_LINK_PAUSE_TX 0x20 |
1691 | #define I40E_AQ_LINK_PAUSE_RX 0x40 | 1687 | #define I40E_AQ_LINK_PAUSE_RX 0x40 |
1692 | #define I40E_AQ_QUALIFIED_MODULE 0x80 | 1688 | #define I40E_AQ_QUALIFIED_MODULE 0x80 |
1693 | u8 ext_info; | 1689 | u8 ext_info; |
1694 | #define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 | 1690 | #define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 |
1695 | #define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 | 1691 | #define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 |
1696 | #define I40E_AQ_LINK_TX_SHIFT 0x02 | 1692 | #define I40E_AQ_LINK_TX_SHIFT 0x02 |
1697 | #define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) | 1693 | #define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) |
1698 | #define I40E_AQ_LINK_TX_ACTIVE 0x00 | 1694 | #define I40E_AQ_LINK_TX_ACTIVE 0x00 |
1699 | #define I40E_AQ_LINK_TX_DRAINED 0x01 | 1695 | #define I40E_AQ_LINK_TX_DRAINED 0x01 |
1700 | #define I40E_AQ_LINK_TX_FLUSHED 0x03 | 1696 | #define I40E_AQ_LINK_TX_FLUSHED 0x03 |
1701 | #define I40E_AQ_LINK_FORCED_40G 0x10 | 1697 | #define I40E_AQ_LINK_FORCED_40G 0x10 |
1702 | u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ | 1698 | u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ |
1703 | __le16 max_frame_size; | 1699 | __le16 max_frame_size; |
1704 | u8 config; | 1700 | u8 config; |
1705 | #define I40E_AQ_CONFIG_CRC_ENA 0x04 | 1701 | #define I40E_AQ_CONFIG_CRC_ENA 0x04 |
1706 | #define I40E_AQ_CONFIG_PACING_MASK 0x78 | 1702 | #define I40E_AQ_CONFIG_PACING_MASK 0x78 |
1707 | u8 reserved[5]; | 1703 | u8 reserved[5]; |
1708 | }; | 1704 | }; |
1709 | 1705 | ||
1710 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); | 1706 | I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); |
1711 | 1707 | ||
1712 | /* Set event mask command (direct 0x613) */ | 1708 | /* Set event mask command (direct 0x613) */ |
1713 | struct i40e_aqc_set_phy_int_mask { | 1709 | struct i40e_aqc_set_phy_int_mask { |
1714 | u8 reserved[8]; | 1710 | u8 reserved[8]; |
1715 | __le16 event_mask; | 1711 | __le16 event_mask; |
1716 | #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 | 1712 | #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 |
1717 | #define I40E_AQ_EVENT_MEDIA_NA 0x0004 | 1713 | #define I40E_AQ_EVENT_MEDIA_NA 0x0004 |
1718 | #define I40E_AQ_EVENT_LINK_FAULT 0x0008 | 1714 | #define I40E_AQ_EVENT_LINK_FAULT 0x0008 |
1719 | #define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 | 1715 | #define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 |
1720 | #define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 | 1716 | #define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 |
1721 | #define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 | 1717 | #define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 |
1722 | #define I40E_AQ_EVENT_AN_COMPLETED 0x0080 | 1718 | #define I40E_AQ_EVENT_AN_COMPLETED 0x0080 |
1723 | #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 | 1719 | #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 |
1724 | #define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 | 1720 | #define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 |
1725 | u8 reserved1[6]; | 1721 | u8 reserved1[6]; |
1726 | }; | 1722 | }; |
1727 | 1723 | ||
1728 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); | 1724 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); |
@@ -1732,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); | |||
1732 | * Get Link Partner AN advt register (direct 0x0616) | 1728 | * Get Link Partner AN advt register (direct 0x0616) |
1733 | */ | 1729 | */ |
1734 | struct i40e_aqc_an_advt_reg { | 1730 | struct i40e_aqc_an_advt_reg { |
1735 | __le32 local_an_reg0; | 1731 | __le32 local_an_reg0; |
1736 | __le16 local_an_reg1; | 1732 | __le16 local_an_reg1; |
1737 | u8 reserved[10]; | 1733 | u8 reserved[10]; |
1738 | }; | 1734 | }; |
1739 | 1735 | ||
1740 | I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); | 1736 | I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); |
1741 | 1737 | ||
1742 | /* Set Loopback mode (0x0618) */ | 1738 | /* Set Loopback mode (0x0618) */ |
1743 | struct i40e_aqc_set_lb_mode { | 1739 | struct i40e_aqc_set_lb_mode { |
1744 | __le16 lb_mode; | 1740 | __le16 lb_mode; |
1745 | #define I40E_AQ_LB_PHY_LOCAL 0x01 | 1741 | #define I40E_AQ_LB_PHY_LOCAL 0x01 |
1746 | #define I40E_AQ_LB_PHY_REMOTE 0x02 | 1742 | #define I40E_AQ_LB_PHY_REMOTE 0x02 |
1747 | #define I40E_AQ_LB_MAC_LOCAL 0x04 | 1743 | #define I40E_AQ_LB_MAC_LOCAL 0x04 |
1748 | u8 reserved[14]; | 1744 | u8 reserved[14]; |
1749 | }; | 1745 | }; |
1750 | 1746 | ||
1751 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); | 1747 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); |
1752 | 1748 | ||
1753 | /* Set PHY Debug command (0x0622) */ | 1749 | /* Set PHY Debug command (0x0622) */ |
1754 | struct i40e_aqc_set_phy_debug { | 1750 | struct i40e_aqc_set_phy_debug { |
1755 | u8 command_flags; | 1751 | u8 command_flags; |
1756 | #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 | 1752 | #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 |
1757 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 | 1753 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 |
1758 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ | 1754 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ |
@@ -1761,15 +1757,15 @@ struct i40e_aqc_set_phy_debug { | |||
1761 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 | 1757 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 |
1762 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 | 1758 | #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 |
1763 | #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 | 1759 | #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 |
1764 | u8 reserved[15]; | 1760 | u8 reserved[15]; |
1765 | }; | 1761 | }; |
1766 | 1762 | ||
1767 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); | 1763 | I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); |
1768 | 1764 | ||
1769 | enum i40e_aq_phy_reg_type { | 1765 | enum i40e_aq_phy_reg_type { |
1770 | I40E_AQC_PHY_REG_INTERNAL = 0x1, | 1766 | I40E_AQC_PHY_REG_INTERNAL = 0x1, |
1771 | I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, | 1767 | I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, |
1772 | I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 | 1768 | I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 |
1773 | }; | 1769 | }; |
1774 | 1770 | ||
1775 | /* NVM Read command (indirect 0x0701) | 1771 | /* NVM Read command (indirect 0x0701) |
@@ -1777,40 +1773,40 @@ enum i40e_aq_phy_reg_type { | |||
1777 | * NVM Update commands (indirect 0x0703) | 1773 | * NVM Update commands (indirect 0x0703) |
1778 | */ | 1774 | */ |
1779 | struct i40e_aqc_nvm_update { | 1775 | struct i40e_aqc_nvm_update { |
1780 | u8 command_flags; | 1776 | u8 command_flags; |
1781 | #define I40E_AQ_NVM_LAST_CMD 0x01 | 1777 | #define I40E_AQ_NVM_LAST_CMD 0x01 |
1782 | #define I40E_AQ_NVM_FLASH_ONLY 0x80 | 1778 | #define I40E_AQ_NVM_FLASH_ONLY 0x80 |
1783 | u8 module_pointer; | 1779 | u8 module_pointer; |
1784 | __le16 length; | 1780 | __le16 length; |
1785 | __le32 offset; | 1781 | __le32 offset; |
1786 | __le32 addr_high; | 1782 | __le32 addr_high; |
1787 | __le32 addr_low; | 1783 | __le32 addr_low; |
1788 | }; | 1784 | }; |
1789 | 1785 | ||
1790 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); | 1786 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); |
1791 | 1787 | ||
1792 | /* NVM Config Read (indirect 0x0704) */ | 1788 | /* NVM Config Read (indirect 0x0704) */ |
1793 | struct i40e_aqc_nvm_config_read { | 1789 | struct i40e_aqc_nvm_config_read { |
1794 | __le16 cmd_flags; | 1790 | __le16 cmd_flags; |
1795 | #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 | 1791 | #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 |
1796 | #define ANVM_READ_SINGLE_FEATURE 0 | 1792 | #define ANVM_READ_SINGLE_FEATURE 0 |
1797 | #define ANVM_READ_MULTIPLE_FEATURES 1 | 1793 | #define ANVM_READ_MULTIPLE_FEATURES 1 |
1798 | __le16 element_count; | 1794 | __le16 element_count; |
1799 | __le16 element_id; /* Feature/field ID */ | 1795 | __le16 element_id; /* Feature/field ID */ |
1800 | u8 reserved[2]; | 1796 | u8 reserved[2]; |
1801 | __le32 address_high; | 1797 | __le32 address_high; |
1802 | __le32 address_low; | 1798 | __le32 address_low; |
1803 | }; | 1799 | }; |
1804 | 1800 | ||
1805 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); | 1801 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); |
1806 | 1802 | ||
1807 | /* NVM Config Write (indirect 0x0705) */ | 1803 | /* NVM Config Write (indirect 0x0705) */ |
1808 | struct i40e_aqc_nvm_config_write { | 1804 | struct i40e_aqc_nvm_config_write { |
1809 | __le16 cmd_flags; | 1805 | __le16 cmd_flags; |
1810 | __le16 element_count; | 1806 | __le16 element_count; |
1811 | u8 reserved[4]; | 1807 | u8 reserved[4]; |
1812 | __le32 address_high; | 1808 | __le32 address_high; |
1813 | __le32 address_low; | 1809 | __le32 address_low; |
1814 | }; | 1810 | }; |
1815 | 1811 | ||
1816 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); | 1812 | I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); |
@@ -1835,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field { | |||
1835 | * Send to Peer PF command (indirect 0x0803) | 1831 | * Send to Peer PF command (indirect 0x0803) |
1836 | */ | 1832 | */ |
1837 | struct i40e_aqc_pf_vf_message { | 1833 | struct i40e_aqc_pf_vf_message { |
1838 | __le32 id; | 1834 | __le32 id; |
1839 | u8 reserved[4]; | 1835 | u8 reserved[4]; |
1840 | __le32 addr_high; | 1836 | __le32 addr_high; |
1841 | __le32 addr_low; | 1837 | __le32 addr_low; |
1842 | }; | 1838 | }; |
1843 | 1839 | ||
1844 | I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); | 1840 | I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); |
@@ -1874,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); | |||
1874 | * uses i40e_aq_desc | 1870 | * uses i40e_aq_desc |
1875 | */ | 1871 | */ |
1876 | struct i40e_aqc_alternate_write_done { | 1872 | struct i40e_aqc_alternate_write_done { |
1877 | __le16 cmd_flags; | 1873 | __le16 cmd_flags; |
1878 | #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 | 1874 | #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 |
1879 | #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 | 1875 | #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 |
1880 | #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 | 1876 | #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 |
1881 | #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 | 1877 | #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 |
1882 | u8 reserved[14]; | 1878 | u8 reserved[14]; |
1883 | }; | 1879 | }; |
1884 | 1880 | ||
1885 | I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); | 1881 | I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); |
1886 | 1882 | ||
1887 | /* Set OEM mode (direct 0x0905) */ | 1883 | /* Set OEM mode (direct 0x0905) */ |
1888 | struct i40e_aqc_alternate_set_mode { | 1884 | struct i40e_aqc_alternate_set_mode { |
1889 | __le32 mode; | 1885 | __le32 mode; |
1890 | #define I40E_AQ_ALTERNATE_MODE_NONE 0 | 1886 | #define I40E_AQ_ALTERNATE_MODE_NONE 0 |
1891 | #define I40E_AQ_ALTERNATE_MODE_OEM 1 | 1887 | #define I40E_AQ_ALTERNATE_MODE_OEM 1 |
1892 | u8 reserved[12]; | 1888 | u8 reserved[12]; |
1893 | }; | 1889 | }; |
1894 | 1890 | ||
1895 | I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); | 1891 | I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); |
@@ -1900,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); | |||
1900 | 1896 | ||
1901 | /* Lan Queue Overflow Event (direct, 0x1001) */ | 1897 | /* Lan Queue Overflow Event (direct, 0x1001) */ |
1902 | struct i40e_aqc_lan_overflow { | 1898 | struct i40e_aqc_lan_overflow { |
1903 | __le32 prtdcb_rupto; | 1899 | __le32 prtdcb_rupto; |
1904 | __le32 otx_ctl; | 1900 | __le32 otx_ctl; |
1905 | u8 reserved[8]; | 1901 | u8 reserved[8]; |
1906 | }; | 1902 | }; |
1907 | 1903 | ||
1908 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); | 1904 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); |
1909 | 1905 | ||
1910 | /* Get LLDP MIB (indirect 0x0A00) */ | 1906 | /* Get LLDP MIB (indirect 0x0A00) */ |
1911 | struct i40e_aqc_lldp_get_mib { | 1907 | struct i40e_aqc_lldp_get_mib { |
1912 | u8 type; | 1908 | u8 type; |
1913 | u8 reserved1; | 1909 | u8 reserved1; |
1914 | #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 | 1910 | #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 |
1915 | #define I40E_AQ_LLDP_MIB_LOCAL 0x0 | 1911 | #define I40E_AQ_LLDP_MIB_LOCAL 0x0 |
1916 | #define I40E_AQ_LLDP_MIB_REMOTE 0x1 | 1912 | #define I40E_AQ_LLDP_MIB_REMOTE 0x1 |
1917 | #define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 | 1913 | #define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 |
1918 | #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC | 1914 | #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC |
1919 | #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 | 1915 | #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 |
1920 | #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 | 1916 | #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 |
1921 | #define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 | 1917 | #define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 |
1922 | #define I40E_AQ_LLDP_TX_SHIFT 0x4 | 1918 | #define I40E_AQ_LLDP_TX_SHIFT 0x4 |
1923 | #define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) | 1919 | #define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) |
1924 | /* TX pause flags use I40E_AQ_LINK_TX_* above */ | 1920 | /* TX pause flags use I40E_AQ_LINK_TX_* above */ |
1925 | __le16 local_len; | 1921 | __le16 local_len; |
1926 | __le16 remote_len; | 1922 | __le16 remote_len; |
1927 | u8 reserved2[2]; | 1923 | u8 reserved2[2]; |
1928 | __le32 addr_high; | 1924 | __le32 addr_high; |
1929 | __le32 addr_low; | 1925 | __le32 addr_low; |
1930 | }; | 1926 | }; |
1931 | 1927 | ||
1932 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); | 1928 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); |
@@ -1935,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); | |||
1935 | * also used for the event (with type in the command field) | 1931 | * also used for the event (with type in the command field) |
1936 | */ | 1932 | */ |
1937 | struct i40e_aqc_lldp_update_mib { | 1933 | struct i40e_aqc_lldp_update_mib { |
1938 | u8 command; | 1934 | u8 command; |
1939 | #define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 | 1935 | #define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 |
1940 | #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 | 1936 | #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 |
1941 | u8 reserved[7]; | 1937 | u8 reserved[7]; |
1942 | __le32 addr_high; | 1938 | __le32 addr_high; |
1943 | __le32 addr_low; | 1939 | __le32 addr_low; |
1944 | }; | 1940 | }; |
1945 | 1941 | ||
1946 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); | 1942 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); |
@@ -1949,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); | |||
1949 | * Delete LLDP TLV (indirect 0x0A04) | 1945 | * Delete LLDP TLV (indirect 0x0A04) |
1950 | */ | 1946 | */ |
1951 | struct i40e_aqc_lldp_add_tlv { | 1947 | struct i40e_aqc_lldp_add_tlv { |
1952 | u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ | 1948 | u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ |
1953 | u8 reserved1[1]; | 1949 | u8 reserved1[1]; |
1954 | __le16 len; | 1950 | __le16 len; |
1955 | u8 reserved2[4]; | 1951 | u8 reserved2[4]; |
1956 | __le32 addr_high; | 1952 | __le32 addr_high; |
1957 | __le32 addr_low; | 1953 | __le32 addr_low; |
1958 | }; | 1954 | }; |
1959 | 1955 | ||
1960 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); | 1956 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); |
1961 | 1957 | ||
1962 | /* Update LLDP TLV (indirect 0x0A03) */ | 1958 | /* Update LLDP TLV (indirect 0x0A03) */ |
1963 | struct i40e_aqc_lldp_update_tlv { | 1959 | struct i40e_aqc_lldp_update_tlv { |
1964 | u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ | 1960 | u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ |
1965 | u8 reserved; | 1961 | u8 reserved; |
1966 | __le16 old_len; | 1962 | __le16 old_len; |
1967 | __le16 new_offset; | 1963 | __le16 new_offset; |
1968 | __le16 new_len; | 1964 | __le16 new_len; |
1969 | __le32 addr_high; | 1965 | __le32 addr_high; |
1970 | __le32 addr_low; | 1966 | __le32 addr_low; |
1971 | }; | 1967 | }; |
1972 | 1968 | ||
1973 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); | 1969 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); |
1974 | 1970 | ||
1975 | /* Stop LLDP (direct 0x0A05) */ | 1971 | /* Stop LLDP (direct 0x0A05) */ |
1976 | struct i40e_aqc_lldp_stop { | 1972 | struct i40e_aqc_lldp_stop { |
1977 | u8 command; | 1973 | u8 command; |
1978 | #define I40E_AQ_LLDP_AGENT_STOP 0x0 | 1974 | #define I40E_AQ_LLDP_AGENT_STOP 0x0 |
1979 | #define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 | 1975 | #define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 |
1980 | u8 reserved[15]; | 1976 | u8 reserved[15]; |
1981 | }; | 1977 | }; |
1982 | 1978 | ||
1983 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); | 1979 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); |
@@ -1985,9 +1981,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); | |||
1985 | /* Start LLDP (direct 0x0A06) */ | 1981 | /* Start LLDP (direct 0x0A06) */ |
1986 | 1982 | ||
1987 | struct i40e_aqc_lldp_start { | 1983 | struct i40e_aqc_lldp_start { |
1988 | u8 command; | 1984 | u8 command; |
1989 | #define I40E_AQ_LLDP_AGENT_START 0x1 | 1985 | #define I40E_AQ_LLDP_AGENT_START 0x1 |
1990 | u8 reserved[15]; | 1986 | u8 reserved[15]; |
1991 | }; | 1987 | }; |
1992 | 1988 | ||
1993 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); | 1989 | I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); |
@@ -1998,13 +1994,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); | |||
1998 | 1994 | ||
1999 | /* Add Udp Tunnel command and completion (direct 0x0B00) */ | 1995 | /* Add Udp Tunnel command and completion (direct 0x0B00) */ |
2000 | struct i40e_aqc_add_udp_tunnel { | 1996 | struct i40e_aqc_add_udp_tunnel { |
2001 | __le16 udp_port; | 1997 | __le16 udp_port; |
2002 | u8 reserved0[3]; | 1998 | u8 reserved0[3]; |
2003 | u8 protocol_type; | 1999 | u8 protocol_type; |
2004 | #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 | 2000 | #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 |
2005 | #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 | 2001 | #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 |
2006 | #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 | 2002 | #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 |
2007 | u8 reserved1[10]; | 2003 | u8 reserved1[10]; |
2008 | }; | 2004 | }; |
2009 | 2005 | ||
2010 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); | 2006 | I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); |
@@ -2013,8 +2009,8 @@ struct i40e_aqc_add_udp_tunnel_completion { | |||
2013 | __le16 udp_port; | 2009 | __le16 udp_port; |
2014 | u8 filter_entry_index; | 2010 | u8 filter_entry_index; |
2015 | u8 multiple_pfs; | 2011 | u8 multiple_pfs; |
2016 | #define I40E_AQC_SINGLE_PF 0x0 | 2012 | #define I40E_AQC_SINGLE_PF 0x0 |
2017 | #define I40E_AQC_MULTIPLE_PFS 0x1 | 2013 | #define I40E_AQC_MULTIPLE_PFS 0x1 |
2018 | u8 total_filters; | 2014 | u8 total_filters; |
2019 | u8 reserved[11]; | 2015 | u8 reserved[11]; |
2020 | }; | 2016 | }; |
@@ -2023,23 +2019,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); | |||
2023 | 2019 | ||
2024 | /* remove UDP Tunnel command (0x0B01) */ | 2020 | /* remove UDP Tunnel command (0x0B01) */ |
2025 | struct i40e_aqc_remove_udp_tunnel { | 2021 | struct i40e_aqc_remove_udp_tunnel { |
2026 | u8 reserved[2]; | 2022 | u8 reserved[2]; |
2027 | u8 index; /* 0 to 15 */ | 2023 | u8 index; /* 0 to 15 */ |
2028 | u8 pf_filters; | 2024 | u8 reserved2[13]; |
2029 | u8 total_filters; | ||
2030 | u8 reserved2[11]; | ||
2031 | }; | 2025 | }; |
2032 | 2026 | ||
2033 | I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); | 2027 | I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); |
2034 | 2028 | ||
2035 | struct i40e_aqc_del_udp_tunnel_completion { | 2029 | struct i40e_aqc_del_udp_tunnel_completion { |
2036 | __le16 udp_port; | 2030 | __le16 udp_port; |
2037 | u8 index; /* 0 to 15 */ | 2031 | u8 index; /* 0 to 15 */ |
2038 | u8 multiple_pfs; | 2032 | u8 multiple_pfs; |
2039 | u8 total_filters_used; | 2033 | u8 total_filters_used; |
2040 | u8 reserved; | 2034 | u8 reserved1[11]; |
2041 | u8 tunnels_free; | ||
2042 | u8 reserved1[9]; | ||
2043 | }; | 2035 | }; |
2044 | 2036 | ||
2045 | I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); | 2037 | I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); |
@@ -2068,11 +2060,11 @@ struct i40e_aqc_tunnel_key_structure { | |||
2068 | u8 key1_len; /* 0 to 15 */ | 2060 | u8 key1_len; /* 0 to 15 */ |
2069 | u8 key2_len; /* 0 to 15 */ | 2061 | u8 key2_len; /* 0 to 15 */ |
2070 | u8 flags; | 2062 | u8 flags; |
2071 | #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 | 2063 | #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 |
2072 | /* response flags */ | 2064 | /* response flags */ |
2073 | #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 | 2065 | #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 |
2074 | #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 | 2066 | #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 |
2075 | #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 | 2067 | #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 |
2076 | u8 network_key_index; | 2068 | u8 network_key_index; |
2077 | #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 | 2069 | #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 |
2078 | #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 | 2070 | #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 |
@@ -2085,21 +2077,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); | |||
2085 | 2077 | ||
2086 | /* OEM mode commands (direct 0xFE0x) */ | 2078 | /* OEM mode commands (direct 0xFE0x) */ |
2087 | struct i40e_aqc_oem_param_change { | 2079 | struct i40e_aqc_oem_param_change { |
2088 | __le32 param_type; | 2080 | __le32 param_type; |
2089 | #define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 | 2081 | #define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 |
2090 | #define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 | 2082 | #define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 |
2091 | #define I40E_AQ_OEM_PARAM_MAC 2 | 2083 | #define I40E_AQ_OEM_PARAM_MAC 2 |
2092 | __le32 param_value1; | 2084 | __le32 param_value1; |
2093 | u8 param_value2[8]; | 2085 | u8 param_value2[8]; |
2094 | }; | 2086 | }; |
2095 | 2087 | ||
2096 | I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); | 2088 | I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); |
2097 | 2089 | ||
2098 | struct i40e_aqc_oem_state_change { | 2090 | struct i40e_aqc_oem_state_change { |
2099 | __le32 state; | 2091 | __le32 state; |
2100 | #define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 | 2092 | #define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 |
2101 | #define I40E_AQ_OEM_STATE_LINK_UP 0x1 | 2093 | #define I40E_AQ_OEM_STATE_LINK_UP 0x1 |
2102 | u8 reserved[12]; | 2094 | u8 reserved[12]; |
2103 | }; | 2095 | }; |
2104 | 2096 | ||
2105 | I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); | 2097 | I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); |
@@ -2111,18 +2103,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); | |||
2111 | /* set test more (0xFF01, internal) */ | 2103 | /* set test more (0xFF01, internal) */ |
2112 | 2104 | ||
2113 | struct i40e_acq_set_test_mode { | 2105 | struct i40e_acq_set_test_mode { |
2114 | u8 mode; | 2106 | u8 mode; |
2115 | #define I40E_AQ_TEST_PARTIAL 0 | 2107 | #define I40E_AQ_TEST_PARTIAL 0 |
2116 | #define I40E_AQ_TEST_FULL 1 | 2108 | #define I40E_AQ_TEST_FULL 1 |
2117 | #define I40E_AQ_TEST_NVM 2 | 2109 | #define I40E_AQ_TEST_NVM 2 |
2118 | u8 reserved[3]; | 2110 | u8 reserved[3]; |
2119 | u8 command; | 2111 | u8 command; |
2120 | #define I40E_AQ_TEST_OPEN 0 | 2112 | #define I40E_AQ_TEST_OPEN 0 |
2121 | #define I40E_AQ_TEST_CLOSE 1 | 2113 | #define I40E_AQ_TEST_CLOSE 1 |
2122 | #define I40E_AQ_TEST_INC 2 | 2114 | #define I40E_AQ_TEST_INC 2 |
2123 | u8 reserved2[3]; | 2115 | u8 reserved2[3]; |
2124 | __le32 address_high; | 2116 | __le32 address_high; |
2125 | __le32 address_low; | 2117 | __le32 address_low; |
2126 | }; | 2118 | }; |
2127 | 2119 | ||
2128 | I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); | 2120 | I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); |
@@ -2175,21 +2167,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); | |||
2175 | #define I40E_AQ_CLUSTER_ID_ALTRAM 11 | 2167 | #define I40E_AQ_CLUSTER_ID_ALTRAM 11 |
2176 | 2168 | ||
2177 | struct i40e_aqc_debug_dump_internals { | 2169 | struct i40e_aqc_debug_dump_internals { |
2178 | u8 cluster_id; | 2170 | u8 cluster_id; |
2179 | u8 table_id; | 2171 | u8 table_id; |
2180 | __le16 data_size; | 2172 | __le16 data_size; |
2181 | __le32 idx; | 2173 | __le32 idx; |
2182 | __le32 address_high; | 2174 | __le32 address_high; |
2183 | __le32 address_low; | 2175 | __le32 address_low; |
2184 | }; | 2176 | }; |
2185 | 2177 | ||
2186 | I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); | 2178 | I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); |
2187 | 2179 | ||
2188 | struct i40e_aqc_debug_modify_internals { | 2180 | struct i40e_aqc_debug_modify_internals { |
2189 | u8 cluster_id; | 2181 | u8 cluster_id; |
2190 | u8 cluster_specific_params[7]; | 2182 | u8 cluster_specific_params[7]; |
2191 | __le32 address_high; | 2183 | __le32 address_high; |
2192 | __le32 address_low; | 2184 | __le32 address_low; |
2193 | }; | 2185 | }; |
2194 | 2186 | ||
2195 | I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); | 2187 | I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c51bc7a33bc5..dabe6a4220c6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
@@ -1494,7 +1494,7 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1494 | 1494 | ||
1495 | while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, | 1495 | while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, |
1496 | &adapter->crit_section)) | 1496 | &adapter->crit_section)) |
1497 | udelay(500); | 1497 | usleep_range(500, 1000); |
1498 | 1498 | ||
1499 | if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { | 1499 | if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { |
1500 | dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); | 1500 | dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); |
@@ -1980,7 +1980,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) | |||
1980 | if ((rstat == I40E_VFR_VFACTIVE) || | 1980 | if ((rstat == I40E_VFR_VFACTIVE) || |
1981 | (rstat == I40E_VFR_COMPLETED)) | 1981 | (rstat == I40E_VFR_COMPLETED)) |
1982 | return 0; | 1982 | return 0; |
1983 | udelay(10); | 1983 | usleep_range(10, 20); |
1984 | } | 1984 | } |
1985 | return -EBUSY; | 1985 | return -EBUSY; |
1986 | } | 1986 | } |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ade067de1689..ccc3ce2e8c8c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -2558,11 +2558,10 @@ static void mvneta_adjust_link(struct net_device *ndev) | |||
2558 | MVNETA_GMAC_FORCE_LINK_DOWN); | 2558 | MVNETA_GMAC_FORCE_LINK_DOWN); |
2559 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | 2559 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
2560 | mvneta_port_up(pp); | 2560 | mvneta_port_up(pp); |
2561 | netdev_info(pp->dev, "link up\n"); | ||
2562 | } else { | 2561 | } else { |
2563 | mvneta_port_down(pp); | 2562 | mvneta_port_down(pp); |
2564 | netdev_info(pp->dev, "link down\n"); | ||
2565 | } | 2563 | } |
2564 | phy_print_status(phydev); | ||
2566 | } | 2565 | } |
2567 | } | 2566 | } |
2568 | 2567 | ||
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index c3b209cd0660..21ddecef151e 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
@@ -106,6 +106,7 @@ | |||
106 | #define SDMA_CMD_ERD (1 << 7) | 106 | #define SDMA_CMD_ERD (1 << 7) |
107 | 107 | ||
108 | /* Bit definitions of the Port Config Reg */ | 108 | /* Bit definitions of the Port Config Reg */ |
109 | #define PCR_DUPLEX_FULL (1 << 15) | ||
109 | #define PCR_HS (1 << 12) | 110 | #define PCR_HS (1 << 12) |
110 | #define PCR_EN (1 << 7) | 111 | #define PCR_EN (1 << 7) |
111 | #define PCR_PM (1 << 0) | 112 | #define PCR_PM (1 << 0) |
@@ -113,11 +114,17 @@ | |||
113 | /* Bit definitions of the Port Config Extend Reg */ | 114 | /* Bit definitions of the Port Config Extend Reg */ |
114 | #define PCXR_2BSM (1 << 28) | 115 | #define PCXR_2BSM (1 << 28) |
115 | #define PCXR_DSCP_EN (1 << 21) | 116 | #define PCXR_DSCP_EN (1 << 21) |
117 | #define PCXR_RMII_EN (1 << 20) | ||
118 | #define PCXR_AN_SPEED_DIS (1 << 19) | ||
119 | #define PCXR_SPEED_100 (1 << 18) | ||
116 | #define PCXR_MFL_1518 (0 << 14) | 120 | #define PCXR_MFL_1518 (0 << 14) |
117 | #define PCXR_MFL_1536 (1 << 14) | 121 | #define PCXR_MFL_1536 (1 << 14) |
118 | #define PCXR_MFL_2048 (2 << 14) | 122 | #define PCXR_MFL_2048 (2 << 14) |
119 | #define PCXR_MFL_64K (3 << 14) | 123 | #define PCXR_MFL_64K (3 << 14) |
124 | #define PCXR_FLOWCTL_DIS (1 << 12) | ||
120 | #define PCXR_FLP (1 << 11) | 125 | #define PCXR_FLP (1 << 11) |
126 | #define PCXR_AN_FLOWCTL_DIS (1 << 10) | ||
127 | #define PCXR_AN_DUPLEX_DIS (1 << 9) | ||
121 | #define PCXR_PRIO_TX_OFF 3 | 128 | #define PCXR_PRIO_TX_OFF 3 |
122 | #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) | 129 | #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) |
123 | 130 | ||
@@ -170,7 +177,6 @@ | |||
170 | #define LINK_UP (1 << 3) | 177 | #define LINK_UP (1 << 3) |
171 | 178 | ||
172 | /* Bit definitions for work to be done */ | 179 | /* Bit definitions for work to be done */ |
173 | #define WORK_LINK (1 << 0) | ||
174 | #define WORK_TX_DONE (1 << 1) | 180 | #define WORK_TX_DONE (1 << 1) |
175 | 181 | ||
176 | /* | 182 | /* |
@@ -197,6 +203,9 @@ struct tx_desc { | |||
197 | struct pxa168_eth_private { | 203 | struct pxa168_eth_private { |
198 | int port_num; /* User Ethernet port number */ | 204 | int port_num; /* User Ethernet port number */ |
199 | int phy_addr; | 205 | int phy_addr; |
206 | int phy_speed; | ||
207 | int phy_duplex; | ||
208 | phy_interface_t phy_intf; | ||
200 | 209 | ||
201 | int rx_resource_err; /* Rx ring resource error flag */ | 210 | int rx_resource_err; /* Rx ring resource error flag */ |
202 | 211 | ||
@@ -269,11 +278,11 @@ enum hash_table_entry { | |||
269 | static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); | 278 | static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); |
270 | static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); | 279 | static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); |
271 | static int pxa168_init_hw(struct pxa168_eth_private *pep); | 280 | static int pxa168_init_hw(struct pxa168_eth_private *pep); |
281 | static int pxa168_init_phy(struct net_device *dev); | ||
272 | static void eth_port_reset(struct net_device *dev); | 282 | static void eth_port_reset(struct net_device *dev); |
273 | static void eth_port_start(struct net_device *dev); | 283 | static void eth_port_start(struct net_device *dev); |
274 | static int pxa168_eth_open(struct net_device *dev); | 284 | static int pxa168_eth_open(struct net_device *dev); |
275 | static int pxa168_eth_stop(struct net_device *dev); | 285 | static int pxa168_eth_stop(struct net_device *dev); |
276 | static int ethernet_phy_setup(struct net_device *dev); | ||
277 | 286 | ||
278 | static inline u32 rdl(struct pxa168_eth_private *pep, int offset) | 287 | static inline u32 rdl(struct pxa168_eth_private *pep, int offset) |
279 | { | 288 | { |
@@ -305,26 +314,6 @@ static void abort_dma(struct pxa168_eth_private *pep) | |||
305 | netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); | 314 | netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); |
306 | } | 315 | } |
307 | 316 | ||
308 | static int ethernet_phy_get(struct pxa168_eth_private *pep) | ||
309 | { | ||
310 | unsigned int reg_data; | ||
311 | |||
312 | reg_data = rdl(pep, PHY_ADDRESS); | ||
313 | |||
314 | return (reg_data >> (5 * pep->port_num)) & 0x1f; | ||
315 | } | ||
316 | |||
317 | static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr) | ||
318 | { | ||
319 | u32 reg_data; | ||
320 | int addr_shift = 5 * pep->port_num; | ||
321 | |||
322 | reg_data = rdl(pep, PHY_ADDRESS); | ||
323 | reg_data &= ~(0x1f << addr_shift); | ||
324 | reg_data |= (phy_addr & 0x1f) << addr_shift; | ||
325 | wrl(pep, PHY_ADDRESS, reg_data); | ||
326 | } | ||
327 | |||
328 | static void rxq_refill(struct net_device *dev) | 317 | static void rxq_refill(struct net_device *dev) |
329 | { | 318 | { |
330 | struct pxa168_eth_private *pep = netdev_priv(dev); | 319 | struct pxa168_eth_private *pep = netdev_priv(dev); |
@@ -655,14 +644,7 @@ static void eth_port_start(struct net_device *dev) | |||
655 | struct pxa168_eth_private *pep = netdev_priv(dev); | 644 | struct pxa168_eth_private *pep = netdev_priv(dev); |
656 | int tx_curr_desc, rx_curr_desc; | 645 | int tx_curr_desc, rx_curr_desc; |
657 | 646 | ||
658 | /* Perform PHY reset, if there is a PHY. */ | 647 | phy_start(pep->phy); |
659 | if (pep->phy != NULL) { | ||
660 | struct ethtool_cmd cmd; | ||
661 | |||
662 | pxa168_get_settings(pep->dev, &cmd); | ||
663 | phy_init_hw(pep->phy); | ||
664 | pxa168_set_settings(pep->dev, &cmd); | ||
665 | } | ||
666 | 648 | ||
667 | /* Assignment of Tx CTRP of given queue */ | 649 | /* Assignment of Tx CTRP of given queue */ |
668 | tx_curr_desc = pep->tx_curr_desc_q; | 650 | tx_curr_desc = pep->tx_curr_desc_q; |
@@ -717,6 +699,8 @@ static void eth_port_reset(struct net_device *dev) | |||
717 | val = rdl(pep, PORT_CONFIG); | 699 | val = rdl(pep, PORT_CONFIG); |
718 | val &= ~PCR_EN; | 700 | val &= ~PCR_EN; |
719 | wrl(pep, PORT_CONFIG, val); | 701 | wrl(pep, PORT_CONFIG, val); |
702 | |||
703 | phy_stop(pep->phy); | ||
720 | } | 704 | } |
721 | 705 | ||
722 | /* | 706 | /* |
@@ -884,43 +868,9 @@ static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, | |||
884 | } | 868 | } |
885 | if (icr & ICR_RXBUF) | 869 | if (icr & ICR_RXBUF) |
886 | ret = 1; | 870 | ret = 1; |
887 | if (icr & ICR_MII_CH) { | ||
888 | pep->work_todo |= WORK_LINK; | ||
889 | ret = 1; | ||
890 | } | ||
891 | return ret; | 871 | return ret; |
892 | } | 872 | } |
893 | 873 | ||
894 | static void handle_link_event(struct pxa168_eth_private *pep) | ||
895 | { | ||
896 | struct net_device *dev = pep->dev; | ||
897 | u32 port_status; | ||
898 | int speed; | ||
899 | int duplex; | ||
900 | int fc; | ||
901 | |||
902 | port_status = rdl(pep, PORT_STATUS); | ||
903 | if (!(port_status & LINK_UP)) { | ||
904 | if (netif_carrier_ok(dev)) { | ||
905 | netdev_info(dev, "link down\n"); | ||
906 | netif_carrier_off(dev); | ||
907 | txq_reclaim(dev, 1); | ||
908 | } | ||
909 | return; | ||
910 | } | ||
911 | if (port_status & PORT_SPEED_100) | ||
912 | speed = 100; | ||
913 | else | ||
914 | speed = 10; | ||
915 | |||
916 | duplex = (port_status & FULL_DUPLEX) ? 1 : 0; | ||
917 | fc = (port_status & FLOW_CONTROL_DISABLED) ? 0 : 1; | ||
918 | netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", | ||
919 | speed, duplex ? "full" : "half", fc ? "en" : "dis"); | ||
920 | if (!netif_carrier_ok(dev)) | ||
921 | netif_carrier_on(dev); | ||
922 | } | ||
923 | |||
924 | static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) | 874 | static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) |
925 | { | 875 | { |
926 | struct net_device *dev = (struct net_device *)dev_id; | 876 | struct net_device *dev = (struct net_device *)dev_id; |
@@ -978,8 +928,11 @@ static int set_port_config_ext(struct pxa168_eth_private *pep) | |||
978 | skb_size = PCXR_MFL_64K; | 928 | skb_size = PCXR_MFL_64K; |
979 | 929 | ||
980 | /* Extended Port Configuration */ | 930 | /* Extended Port Configuration */ |
981 | wrl(pep, | 931 | wrl(pep, PORT_CONFIG_EXT, |
982 | PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */ | 932 | PCXR_AN_SPEED_DIS | /* Disable HW AN */ |
933 | PCXR_AN_DUPLEX_DIS | | ||
934 | PCXR_AN_FLOWCTL_DIS | | ||
935 | PCXR_2BSM | /* Two byte prefix aligns IP hdr */ | ||
983 | PCXR_DSCP_EN | /* Enable DSCP in IP */ | 936 | PCXR_DSCP_EN | /* Enable DSCP in IP */ |
984 | skb_size | PCXR_FLP | /* do not force link pass */ | 937 | skb_size | PCXR_FLP | /* do not force link pass */ |
985 | PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ | 938 | PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ |
@@ -987,6 +940,69 @@ static int set_port_config_ext(struct pxa168_eth_private *pep) | |||
987 | return 0; | 940 | return 0; |
988 | } | 941 | } |
989 | 942 | ||
943 | static void pxa168_eth_adjust_link(struct net_device *dev) | ||
944 | { | ||
945 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
946 | struct phy_device *phy = pep->phy; | ||
947 | u32 cfg, cfg_o = rdl(pep, PORT_CONFIG); | ||
948 | u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT); | ||
949 | |||
950 | cfg = cfg_o & ~PCR_DUPLEX_FULL; | ||
951 | cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN); | ||
952 | |||
953 | if (phy->interface == PHY_INTERFACE_MODE_RMII) | ||
954 | cfgext |= PCXR_RMII_EN; | ||
955 | if (phy->speed == SPEED_100) | ||
956 | cfgext |= PCXR_SPEED_100; | ||
957 | if (phy->duplex) | ||
958 | cfg |= PCR_DUPLEX_FULL; | ||
959 | if (!phy->pause) | ||
960 | cfgext |= PCXR_FLOWCTL_DIS; | ||
961 | |||
962 | /* Bail out if there has nothing changed */ | ||
963 | if (cfg == cfg_o && cfgext == cfgext_o) | ||
964 | return; | ||
965 | |||
966 | wrl(pep, PORT_CONFIG, cfg); | ||
967 | wrl(pep, PORT_CONFIG_EXT, cfgext); | ||
968 | |||
969 | phy_print_status(phy); | ||
970 | } | ||
971 | |||
972 | static int pxa168_init_phy(struct net_device *dev) | ||
973 | { | ||
974 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
975 | struct ethtool_cmd cmd; | ||
976 | int err; | ||
977 | |||
978 | if (pep->phy) | ||
979 | return 0; | ||
980 | |||
981 | pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); | ||
982 | if (!pep->phy) | ||
983 | return -ENODEV; | ||
984 | |||
985 | err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link, | ||
986 | pep->phy_intf); | ||
987 | if (err) | ||
988 | return err; | ||
989 | |||
990 | err = pxa168_get_settings(dev, &cmd); | ||
991 | if (err) | ||
992 | return err; | ||
993 | |||
994 | cmd.phy_address = pep->phy_addr; | ||
995 | cmd.speed = pep->phy_speed; | ||
996 | cmd.duplex = pep->phy_duplex; | ||
997 | cmd.advertising = PHY_BASIC_FEATURES; | ||
998 | cmd.autoneg = AUTONEG_ENABLE; | ||
999 | |||
1000 | if (cmd.speed != 0) | ||
1001 | cmd.autoneg = AUTONEG_DISABLE; | ||
1002 | |||
1003 | return pxa168_set_settings(dev, &cmd); | ||
1004 | } | ||
1005 | |||
990 | static int pxa168_init_hw(struct pxa168_eth_private *pep) | 1006 | static int pxa168_init_hw(struct pxa168_eth_private *pep) |
991 | { | 1007 | { |
992 | int err = 0; | 1008 | int err = 0; |
@@ -1133,6 +1149,10 @@ static int pxa168_eth_open(struct net_device *dev) | |||
1133 | struct pxa168_eth_private *pep = netdev_priv(dev); | 1149 | struct pxa168_eth_private *pep = netdev_priv(dev); |
1134 | int err; | 1150 | int err; |
1135 | 1151 | ||
1152 | err = pxa168_init_phy(dev); | ||
1153 | if (err) | ||
1154 | return err; | ||
1155 | |||
1136 | err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); | 1156 | err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); |
1137 | if (err) { | 1157 | if (err) { |
1138 | dev_err(&dev->dev, "can't assign irq\n"); | 1158 | dev_err(&dev->dev, "can't assign irq\n"); |
@@ -1231,10 +1251,6 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) | |||
1231 | struct net_device *dev = pep->dev; | 1251 | struct net_device *dev = pep->dev; |
1232 | int work_done = 0; | 1252 | int work_done = 0; |
1233 | 1253 | ||
1234 | if (unlikely(pep->work_todo & WORK_LINK)) { | ||
1235 | pep->work_todo &= ~(WORK_LINK); | ||
1236 | handle_link_event(pep); | ||
1237 | } | ||
1238 | /* | 1254 | /* |
1239 | * We call txq_reclaim every time since in NAPI interupts are disabled | 1255 | * We call txq_reclaim every time since in NAPI interupts are disabled |
1240 | * and due to this we miss the TX_DONE interrupt,which is not updated in | 1256 | * and due to this we miss the TX_DONE interrupt,which is not updated in |
@@ -1357,77 +1373,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, | |||
1357 | return -EOPNOTSUPP; | 1373 | return -EOPNOTSUPP; |
1358 | } | 1374 | } |
1359 | 1375 | ||
1360 | static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) | ||
1361 | { | ||
1362 | struct mii_bus *bus = pep->smi_bus; | ||
1363 | struct phy_device *phydev; | ||
1364 | int start; | ||
1365 | int num; | ||
1366 | int i; | ||
1367 | |||
1368 | if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) { | ||
1369 | /* Scan entire range */ | ||
1370 | start = ethernet_phy_get(pep); | ||
1371 | num = 32; | ||
1372 | } else { | ||
1373 | /* Use phy addr specific to platform */ | ||
1374 | start = phy_addr & 0x1f; | ||
1375 | num = 1; | ||
1376 | } | ||
1377 | phydev = NULL; | ||
1378 | for (i = 0; i < num; i++) { | ||
1379 | int addr = (start + i) & 0x1f; | ||
1380 | if (bus->phy_map[addr] == NULL) | ||
1381 | mdiobus_scan(bus, addr); | ||
1382 | |||
1383 | if (phydev == NULL) { | ||
1384 | phydev = bus->phy_map[addr]; | ||
1385 | if (phydev != NULL) | ||
1386 | ethernet_phy_set_addr(pep, addr); | ||
1387 | } | ||
1388 | } | ||
1389 | |||
1390 | return phydev; | ||
1391 | } | ||
1392 | |||
1393 | static void phy_init(struct pxa168_eth_private *pep) | ||
1394 | { | ||
1395 | struct phy_device *phy = pep->phy; | ||
1396 | |||
1397 | phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII); | ||
1398 | |||
1399 | if (pep->pd && pep->pd->speed != 0) { | ||
1400 | phy->autoneg = AUTONEG_DISABLE; | ||
1401 | phy->advertising = 0; | ||
1402 | phy->speed = pep->pd->speed; | ||
1403 | phy->duplex = pep->pd->duplex; | ||
1404 | } else { | ||
1405 | phy->autoneg = AUTONEG_ENABLE; | ||
1406 | phy->speed = 0; | ||
1407 | phy->duplex = 0; | ||
1408 | phy->supported &= PHY_BASIC_FEATURES; | ||
1409 | phy->advertising = phy->supported | ADVERTISED_Autoneg; | ||
1410 | } | ||
1411 | |||
1412 | phy_start_aneg(phy); | ||
1413 | } | ||
1414 | |||
1415 | static int ethernet_phy_setup(struct net_device *dev) | ||
1416 | { | ||
1417 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
1418 | |||
1419 | if (pep->pd && pep->pd->init) | ||
1420 | pep->pd->init(); | ||
1421 | |||
1422 | pep->phy = phy_scan(pep, pep->phy_addr & 0x1f); | ||
1423 | if (pep->phy != NULL) | ||
1424 | phy_init(pep); | ||
1425 | |||
1426 | update_hash_table_mac_address(pep, NULL, dev->dev_addr); | ||
1427 | |||
1428 | return 0; | ||
1429 | } | ||
1430 | |||
1431 | static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 1376 | static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1432 | { | 1377 | { |
1433 | struct pxa168_eth_private *pep = netdev_priv(dev); | 1378 | struct pxa168_eth_private *pep = netdev_priv(dev); |
@@ -1505,16 +1450,14 @@ static int pxa168_eth_probe(struct platform_device *pdev) | |||
1505 | pep = netdev_priv(dev); | 1450 | pep = netdev_priv(dev); |
1506 | pep->dev = dev; | 1451 | pep->dev = dev; |
1507 | pep->clk = clk; | 1452 | pep->clk = clk; |
1453 | |||
1508 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1454 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1509 | if (res == NULL) { | ||
1510 | err = -ENODEV; | ||
1511 | goto err_netdev; | ||
1512 | } | ||
1513 | pep->base = devm_ioremap_resource(&pdev->dev, res); | 1455 | pep->base = devm_ioremap_resource(&pdev->dev, res); |
1514 | if (IS_ERR(pep->base)) { | 1456 | if (IS_ERR(pep->base)) { |
1515 | err = -ENOMEM; | 1457 | err = -ENOMEM; |
1516 | goto err_netdev; | 1458 | goto err_netdev; |
1517 | } | 1459 | } |
1460 | |||
1518 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1461 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1519 | BUG_ON(!res); | 1462 | BUG_ON(!res); |
1520 | dev->irq = res->start; | 1463 | dev->irq = res->start; |
@@ -1552,13 +1495,23 @@ static int pxa168_eth_probe(struct platform_device *pdev) | |||
1552 | 1495 | ||
1553 | pep->port_num = pep->pd->port_number; | 1496 | pep->port_num = pep->pd->port_number; |
1554 | pep->phy_addr = pep->pd->phy_addr; | 1497 | pep->phy_addr = pep->pd->phy_addr; |
1498 | pep->phy_speed = pep->pd->speed; | ||
1499 | pep->phy_duplex = pep->pd->duplex; | ||
1500 | pep->phy_intf = pep->pd->intf; | ||
1501 | |||
1502 | if (pep->pd->init) | ||
1503 | pep->pd->init(); | ||
1555 | } else if (pdev->dev.of_node) { | 1504 | } else if (pdev->dev.of_node) { |
1556 | of_property_read_u32(pdev->dev.of_node, "port-id", | 1505 | of_property_read_u32(pdev->dev.of_node, "port-id", |
1557 | &pep->port_num); | 1506 | &pep->port_num); |
1558 | 1507 | ||
1559 | np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); | 1508 | np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
1560 | if (np) | 1509 | if (!np) { |
1561 | of_property_read_u32(np, "reg", &pep->phy_addr); | 1510 | dev_err(&pdev->dev, "missing phy-handle\n"); |
1511 | return -EINVAL; | ||
1512 | } | ||
1513 | of_property_read_u32(np, "reg", &pep->phy_addr); | ||
1514 | pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); | ||
1562 | } | 1515 | } |
1563 | 1516 | ||
1564 | /* Hardware supports only 3 ports */ | 1517 | /* Hardware supports only 3 ports */ |
@@ -1588,9 +1541,6 @@ static int pxa168_eth_probe(struct platform_device *pdev) | |||
1588 | goto err_free_mdio; | 1541 | goto err_free_mdio; |
1589 | 1542 | ||
1590 | pxa168_init_hw(pep); | 1543 | pxa168_init_hw(pep); |
1591 | err = ethernet_phy_setup(dev); | ||
1592 | if (err) | ||
1593 | goto err_mdiobus; | ||
1594 | SET_NETDEV_DEV(dev, &pdev->dev); | 1544 | SET_NETDEV_DEV(dev, &pdev->dev); |
1595 | err = register_netdev(dev); | 1545 | err = register_netdev(dev); |
1596 | if (err) | 1546 | if (err) |
@@ -1621,13 +1571,13 @@ static int pxa168_eth_remove(struct platform_device *pdev) | |||
1621 | pep->htpr, pep->htpr_dma); | 1571 | pep->htpr, pep->htpr_dma); |
1622 | pep->htpr = NULL; | 1572 | pep->htpr = NULL; |
1623 | } | 1573 | } |
1574 | if (pep->phy) | ||
1575 | phy_disconnect(pep->phy); | ||
1624 | if (pep->clk) { | 1576 | if (pep->clk) { |
1625 | clk_disable(pep->clk); | 1577 | clk_disable(pep->clk); |
1626 | clk_put(pep->clk); | 1578 | clk_put(pep->clk); |
1627 | pep->clk = NULL; | 1579 | pep->clk = NULL; |
1628 | } | 1580 | } |
1629 | if (pep->phy != NULL) | ||
1630 | phy_detach(pep->phy); | ||
1631 | 1581 | ||
1632 | iounmap(pep->base); | 1582 | iounmap(pep->base); |
1633 | pep->base = NULL; | 1583 | pep->base = NULL; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index b16e1b95566f..916459effcfa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -1338,6 +1338,15 @@ static struct mlx4_cmd_info cmd_info[] = { | |||
1338 | .verify = NULL, | 1338 | .verify = NULL, |
1339 | .wrapper = mlx4_QUERY_IF_STAT_wrapper | 1339 | .wrapper = mlx4_QUERY_IF_STAT_wrapper |
1340 | }, | 1340 | }, |
1341 | { | ||
1342 | .opcode = MLX4_CMD_ACCESS_REG, | ||
1343 | .has_inbox = true, | ||
1344 | .has_outbox = true, | ||
1345 | .out_is_imm = false, | ||
1346 | .encode_slave_id = false, | ||
1347 | .verify = NULL, | ||
1348 | .wrapper = NULL, | ||
1349 | }, | ||
1341 | /* Native multicast commands are not available for guests */ | 1350 | /* Native multicast commands are not available for guests */ |
1342 | { | 1351 | { |
1343 | .opcode = MLX4_CMD_QP_ATTACH, | 1352 | .opcode = MLX4_CMD_QP_ATTACH, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 57dda95b67d8..999014413b1a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c | |||
@@ -35,52 +35,6 @@ | |||
35 | 35 | ||
36 | #include "mlx4_en.h" | 36 | #include "mlx4_en.h" |
37 | 37 | ||
38 | int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter) | ||
39 | { | ||
40 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
41 | struct mlx4_en_dev *mdev = priv->mdev; | ||
42 | int port_up = 0; | ||
43 | int err = 0; | ||
44 | |||
45 | if (priv->hwtstamp_config.tx_type == tx_type && | ||
46 | priv->hwtstamp_config.rx_filter == rx_filter) | ||
47 | return 0; | ||
48 | |||
49 | mutex_lock(&mdev->state_lock); | ||
50 | if (priv->port_up) { | ||
51 | port_up = 1; | ||
52 | mlx4_en_stop_port(dev, 1); | ||
53 | } | ||
54 | |||
55 | mlx4_en_free_resources(priv); | ||
56 | |||
57 | en_warn(priv, "Changing Time Stamp configuration\n"); | ||
58 | |||
59 | priv->hwtstamp_config.tx_type = tx_type; | ||
60 | priv->hwtstamp_config.rx_filter = rx_filter; | ||
61 | |||
62 | if (rx_filter != HWTSTAMP_FILTER_NONE) | ||
63 | dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; | ||
64 | else | ||
65 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; | ||
66 | |||
67 | err = mlx4_en_alloc_resources(priv); | ||
68 | if (err) { | ||
69 | en_err(priv, "Failed reallocating port resources\n"); | ||
70 | goto out; | ||
71 | } | ||
72 | if (port_up) { | ||
73 | err = mlx4_en_start_port(dev); | ||
74 | if (err) | ||
75 | en_err(priv, "Failed starting port\n"); | ||
76 | } | ||
77 | |||
78 | out: | ||
79 | mutex_unlock(&mdev->state_lock); | ||
80 | netdev_features_change(dev); | ||
81 | return err; | ||
82 | } | ||
83 | |||
84 | /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) | 38 | /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) |
85 | */ | 39 | */ |
86 | static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) | 40 | static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ae83da9cd18a..8ea4d5be7376 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/ethtool.h> | 35 | #include <linux/ethtool.h> |
36 | #include <linux/netdevice.h> | 36 | #include <linux/netdevice.h> |
37 | #include <linux/mlx4/driver.h> | 37 | #include <linux/mlx4/driver.h> |
38 | #include <linux/mlx4/device.h> | ||
38 | #include <linux/in.h> | 39 | #include <linux/in.h> |
39 | #include <net/ip.h> | 40 | #include <net/ip.h> |
40 | 41 | ||
@@ -374,7 +375,302 @@ static void mlx4_en_get_strings(struct net_device *dev, | |||
374 | } | 375 | } |
375 | } | 376 | } |
376 | 377 | ||
377 | static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 378 | static u32 mlx4_en_autoneg_get(struct net_device *dev) |
379 | { | ||
380 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
381 | struct mlx4_en_dev *mdev = priv->mdev; | ||
382 | u32 autoneg = AUTONEG_DISABLE; | ||
383 | |||
384 | if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) && | ||
385 | (priv->port_state.flags & MLX4_EN_PORT_ANE)) | ||
386 | autoneg = AUTONEG_ENABLE; | ||
387 | |||
388 | return autoneg; | ||
389 | } | ||
390 | |||
391 | static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) | ||
392 | { | ||
393 | u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); | ||
394 | |||
395 | if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) | ||
396 | | MLX4_PROT_MASK(MLX4_1000BASE_T) | ||
397 | | MLX4_PROT_MASK(MLX4_100BASE_TX))) { | ||
398 | return SUPPORTED_TP; | ||
399 | } | ||
400 | |||
401 | if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) | ||
402 | | MLX4_PROT_MASK(MLX4_10GBASE_SR) | ||
403 | | MLX4_PROT_MASK(MLX4_56GBASE_SR4) | ||
404 | | MLX4_PROT_MASK(MLX4_40GBASE_CR4) | ||
405 | | MLX4_PROT_MASK(MLX4_40GBASE_SR4) | ||
406 | | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { | ||
407 | return SUPPORTED_FIBRE; | ||
408 | } | ||
409 | |||
410 | if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) | ||
411 | | MLX4_PROT_MASK(MLX4_40GBASE_KR4) | ||
412 | | MLX4_PROT_MASK(MLX4_20GBASE_KR2) | ||
413 | | MLX4_PROT_MASK(MLX4_10GBASE_KR) | ||
414 | | MLX4_PROT_MASK(MLX4_10GBASE_KX4) | ||
415 | | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { | ||
416 | return SUPPORTED_Backplane; | ||
417 | } | ||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) | ||
422 | { | ||
423 | u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper); | ||
424 | |||
425 | if (!eth_proto) /* link down */ | ||
426 | eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); | ||
427 | |||
428 | if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) | ||
429 | | MLX4_PROT_MASK(MLX4_1000BASE_T) | ||
430 | | MLX4_PROT_MASK(MLX4_100BASE_TX))) { | ||
431 | return PORT_TP; | ||
432 | } | ||
433 | |||
434 | if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR) | ||
435 | | MLX4_PROT_MASK(MLX4_56GBASE_SR4) | ||
436 | | MLX4_PROT_MASK(MLX4_40GBASE_SR4) | ||
437 | | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { | ||
438 | return PORT_FIBRE; | ||
439 | } | ||
440 | |||
441 | if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) | ||
442 | | MLX4_PROT_MASK(MLX4_56GBASE_CR4) | ||
443 | | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) { | ||
444 | return PORT_DA; | ||
445 | } | ||
446 | |||
447 | if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) | ||
448 | | MLX4_PROT_MASK(MLX4_40GBASE_KR4) | ||
449 | | MLX4_PROT_MASK(MLX4_20GBASE_KR2) | ||
450 | | MLX4_PROT_MASK(MLX4_10GBASE_KR) | ||
451 | | MLX4_PROT_MASK(MLX4_10GBASE_KX4) | ||
452 | | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { | ||
453 | return PORT_NONE; | ||
454 | } | ||
455 | return PORT_OTHER; | ||
456 | } | ||
457 | |||
458 | #define MLX4_LINK_MODES_SZ \ | ||
459 | (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) | ||
460 | |||
461 | enum ethtool_report { | ||
462 | SUPPORTED = 0, | ||
463 | ADVERTISED = 1, | ||
464 | SPEED = 2 | ||
465 | }; | ||
466 | |||
467 | /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ | ||
468 | static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { | ||
469 | [MLX4_100BASE_TX] = { | ||
470 | SUPPORTED_100baseT_Full, | ||
471 | ADVERTISED_100baseT_Full, | ||
472 | SPEED_100 | ||
473 | }, | ||
474 | |||
475 | [MLX4_1000BASE_T] = { | ||
476 | SUPPORTED_1000baseT_Full, | ||
477 | ADVERTISED_1000baseT_Full, | ||
478 | SPEED_1000 | ||
479 | }, | ||
480 | [MLX4_1000BASE_CX_SGMII] = { | ||
481 | SUPPORTED_1000baseKX_Full, | ||
482 | ADVERTISED_1000baseKX_Full, | ||
483 | SPEED_1000 | ||
484 | }, | ||
485 | [MLX4_1000BASE_KX] = { | ||
486 | SUPPORTED_1000baseKX_Full, | ||
487 | ADVERTISED_1000baseKX_Full, | ||
488 | SPEED_1000 | ||
489 | }, | ||
490 | |||
491 | [MLX4_10GBASE_T] = { | ||
492 | SUPPORTED_10000baseT_Full, | ||
493 | ADVERTISED_10000baseT_Full, | ||
494 | SPEED_10000 | ||
495 | }, | ||
496 | [MLX4_10GBASE_CX4] = { | ||
497 | SUPPORTED_10000baseKX4_Full, | ||
498 | ADVERTISED_10000baseKX4_Full, | ||
499 | SPEED_10000 | ||
500 | }, | ||
501 | [MLX4_10GBASE_KX4] = { | ||
502 | SUPPORTED_10000baseKX4_Full, | ||
503 | ADVERTISED_10000baseKX4_Full, | ||
504 | SPEED_10000 | ||
505 | }, | ||
506 | [MLX4_10GBASE_KR] = { | ||
507 | SUPPORTED_10000baseKR_Full, | ||
508 | ADVERTISED_10000baseKR_Full, | ||
509 | SPEED_10000 | ||
510 | }, | ||
511 | [MLX4_10GBASE_CR] = { | ||
512 | SUPPORTED_10000baseKR_Full, | ||
513 | ADVERTISED_10000baseKR_Full, | ||
514 | SPEED_10000 | ||
515 | }, | ||
516 | [MLX4_10GBASE_SR] = { | ||
517 | SUPPORTED_10000baseKR_Full, | ||
518 | ADVERTISED_10000baseKR_Full, | ||
519 | SPEED_10000 | ||
520 | }, | ||
521 | |||
522 | [MLX4_20GBASE_KR2] = { | ||
523 | SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, | ||
524 | ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, | ||
525 | SPEED_20000 | ||
526 | }, | ||
527 | |||
528 | [MLX4_40GBASE_CR4] = { | ||
529 | SUPPORTED_40000baseCR4_Full, | ||
530 | ADVERTISED_40000baseCR4_Full, | ||
531 | SPEED_40000 | ||
532 | }, | ||
533 | [MLX4_40GBASE_KR4] = { | ||
534 | SUPPORTED_40000baseKR4_Full, | ||
535 | ADVERTISED_40000baseKR4_Full, | ||
536 | SPEED_40000 | ||
537 | }, | ||
538 | [MLX4_40GBASE_SR4] = { | ||
539 | SUPPORTED_40000baseSR4_Full, | ||
540 | ADVERTISED_40000baseSR4_Full, | ||
541 | SPEED_40000 | ||
542 | }, | ||
543 | |||
544 | [MLX4_56GBASE_KR4] = { | ||
545 | SUPPORTED_56000baseKR4_Full, | ||
546 | ADVERTISED_56000baseKR4_Full, | ||
547 | SPEED_56000 | ||
548 | }, | ||
549 | [MLX4_56GBASE_CR4] = { | ||
550 | SUPPORTED_56000baseCR4_Full, | ||
551 | ADVERTISED_56000baseCR4_Full, | ||
552 | SPEED_56000 | ||
553 | }, | ||
554 | [MLX4_56GBASE_SR4] = { | ||
555 | SUPPORTED_56000baseSR4_Full, | ||
556 | ADVERTISED_56000baseSR4_Full, | ||
557 | SPEED_56000 | ||
558 | }, | ||
559 | }; | ||
560 | |||
561 | static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) | ||
562 | { | ||
563 | int i; | ||
564 | u32 link_modes = 0; | ||
565 | |||
566 | for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { | ||
567 | if (eth_proto & MLX4_PROT_MASK(i)) | ||
568 | link_modes |= ptys2ethtool_map[i][report]; | ||
569 | } | ||
570 | return link_modes; | ||
571 | } | ||
572 | |||
573 | static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) | ||
574 | { | ||
575 | int i; | ||
576 | u32 ptys_modes = 0; | ||
577 | |||
578 | for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { | ||
579 | if (ptys2ethtool_map[i][report] & link_modes) | ||
580 | ptys_modes |= 1 << i; | ||
581 | } | ||
582 | return ptys_modes; | ||
583 | } | ||
584 | |||
585 | /* Convert actual speed (SPEED_XXX) to ptys link modes */ | ||
586 | static u32 speed2ptys_link_modes(u32 speed) | ||
587 | { | ||
588 | int i; | ||
589 | u32 ptys_modes = 0; | ||
590 | |||
591 | for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { | ||
592 | if (ptys2ethtool_map[i][SPEED] == speed) | ||
593 | ptys_modes |= 1 << i; | ||
594 | } | ||
595 | return ptys_modes; | ||
596 | } | ||
597 | |||
598 | static int ethtool_get_ptys_settings(struct net_device *dev, | ||
599 | struct ethtool_cmd *cmd) | ||
600 | { | ||
601 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
602 | struct mlx4_ptys_reg ptys_reg; | ||
603 | u32 eth_proto; | ||
604 | int ret; | ||
605 | |||
606 | memset(&ptys_reg, 0, sizeof(ptys_reg)); | ||
607 | ptys_reg.local_port = priv->port; | ||
608 | ptys_reg.proto_mask = MLX4_PTYS_EN; | ||
609 | ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, | ||
610 | MLX4_ACCESS_REG_QUERY, &ptys_reg); | ||
611 | if (ret) { | ||
612 | en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)", | ||
613 | ret); | ||
614 | return ret; | ||
615 | } | ||
616 | en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n", | ||
617 | ptys_reg.proto_mask); | ||
618 | en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n", | ||
619 | be32_to_cpu(ptys_reg.eth_proto_cap)); | ||
620 | en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n", | ||
621 | be32_to_cpu(ptys_reg.eth_proto_admin)); | ||
622 | en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n", | ||
623 | be32_to_cpu(ptys_reg.eth_proto_oper)); | ||
624 | en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", | ||
625 | be32_to_cpu(ptys_reg.eth_proto_lp_adv)); | ||
626 | |||
627 | cmd->supported = 0; | ||
628 | cmd->advertising = 0; | ||
629 | |||
630 | cmd->supported |= ptys_get_supported_port(&ptys_reg); | ||
631 | |||
632 | eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); | ||
633 | cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); | ||
634 | |||
635 | eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); | ||
636 | cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); | ||
637 | |||
638 | cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
639 | cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; | ||
640 | |||
641 | cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? | ||
642 | ADVERTISED_Asym_Pause : 0; | ||
643 | |||
644 | cmd->port = ptys_get_active_port(&ptys_reg); | ||
645 | cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? | ||
646 | XCVR_EXTERNAL : XCVR_INTERNAL; | ||
647 | |||
648 | if (mlx4_en_autoneg_get(dev)) { | ||
649 | cmd->supported |= SUPPORTED_Autoneg; | ||
650 | cmd->advertising |= ADVERTISED_Autoneg; | ||
651 | } | ||
652 | |||
653 | cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? | ||
654 | AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
655 | |||
656 | eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); | ||
657 | cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); | ||
658 | |||
659 | cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? | ||
660 | ADVERTISED_Autoneg : 0; | ||
661 | |||
662 | cmd->phy_address = 0; | ||
663 | cmd->mdio_support = 0; | ||
664 | cmd->maxtxpkt = 0; | ||
665 | cmd->maxrxpkt = 0; | ||
666 | cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; | ||
667 | cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; | ||
668 | |||
669 | return ret; | ||
670 | } | ||
671 | |||
672 | static void ethtool_get_default_settings(struct net_device *dev, | ||
673 | struct ethtool_cmd *cmd) | ||
378 | { | 674 | { |
379 | struct mlx4_en_priv *priv = netdev_priv(dev); | 675 | struct mlx4_en_priv *priv = netdev_priv(dev); |
380 | int trans_type; | 676 | int trans_type; |
@@ -382,18 +678,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
382 | cmd->autoneg = AUTONEG_DISABLE; | 678 | cmd->autoneg = AUTONEG_DISABLE; |
383 | cmd->supported = SUPPORTED_10000baseT_Full; | 679 | cmd->supported = SUPPORTED_10000baseT_Full; |
384 | cmd->advertising = ADVERTISED_10000baseT_Full; | 680 | cmd->advertising = ADVERTISED_10000baseT_Full; |
385 | 681 | trans_type = priv->port_state.transceiver; | |
386 | if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) | ||
387 | return -ENOMEM; | ||
388 | |||
389 | trans_type = priv->port_state.transciver; | ||
390 | if (netif_carrier_ok(dev)) { | ||
391 | ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); | ||
392 | cmd->duplex = DUPLEX_FULL; | ||
393 | } else { | ||
394 | ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); | ||
395 | cmd->duplex = DUPLEX_UNKNOWN; | ||
396 | } | ||
397 | 682 | ||
398 | if (trans_type > 0 && trans_type <= 0xC) { | 683 | if (trans_type > 0 && trans_type <= 0xC) { |
399 | cmd->port = PORT_FIBRE; | 684 | cmd->port = PORT_FIBRE; |
@@ -409,17 +694,118 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
409 | cmd->port = -1; | 694 | cmd->port = -1; |
410 | cmd->transceiver = -1; | 695 | cmd->transceiver = -1; |
411 | } | 696 | } |
697 | } | ||
698 | |||
699 | static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
700 | { | ||
701 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
702 | int ret = -EINVAL; | ||
703 | |||
704 | if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) | ||
705 | return -ENOMEM; | ||
706 | |||
707 | en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n", | ||
708 | priv->port_state.flags & MLX4_EN_PORT_ANC, | ||
709 | priv->port_state.flags & MLX4_EN_PORT_ANE); | ||
710 | |||
711 | if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) | ||
712 | ret = ethtool_get_ptys_settings(dev, cmd); | ||
713 | if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ | ||
714 | ethtool_get_default_settings(dev, cmd); | ||
715 | |||
716 | if (netif_carrier_ok(dev)) { | ||
717 | ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); | ||
718 | cmd->duplex = DUPLEX_FULL; | ||
719 | } else { | ||
720 | ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); | ||
721 | cmd->duplex = DUPLEX_UNKNOWN; | ||
722 | } | ||
412 | return 0; | 723 | return 0; |
413 | } | 724 | } |
414 | 725 | ||
726 | /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */ | ||
727 | static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, | ||
728 | __be32 proto_cap) | ||
729 | { | ||
730 | __be32 proto_admin = 0; | ||
731 | |||
732 | if (!speed) { /* Speed = 0 ==> Reset Link modes */ | ||
733 | proto_admin = proto_cap; | ||
734 | en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n", | ||
735 | be32_to_cpu(proto_cap)); | ||
736 | } else { | ||
737 | u32 ptys_link_modes = speed2ptys_link_modes(speed); | ||
738 | |||
739 | proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap; | ||
740 | en_info(priv, "Setting Speed to %d\n", speed); | ||
741 | } | ||
742 | return proto_admin; | ||
743 | } | ||
744 | |||
415 | static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 745 | static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
416 | { | 746 | { |
417 | if ((cmd->autoneg == AUTONEG_ENABLE) || | 747 | struct mlx4_en_priv *priv = netdev_priv(dev); |
418 | (ethtool_cmd_speed(cmd) != SPEED_10000) || | 748 | struct mlx4_ptys_reg ptys_reg; |
419 | (cmd->duplex != DUPLEX_FULL)) | 749 | __be32 proto_admin; |
750 | int ret; | ||
751 | |||
752 | u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); | ||
753 | int speed = ethtool_cmd_speed(cmd); | ||
754 | |||
755 | en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", | ||
756 | speed, cmd->advertising, cmd->autoneg, cmd->duplex); | ||
757 | |||
758 | if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || | ||
759 | (cmd->autoneg == AUTONEG_ENABLE) || (cmd->duplex == DUPLEX_HALF)) | ||
420 | return -EINVAL; | 760 | return -EINVAL; |
421 | 761 | ||
422 | /* Nothing to change */ | 762 | memset(&ptys_reg, 0, sizeof(ptys_reg)); |
763 | ptys_reg.local_port = priv->port; | ||
764 | ptys_reg.proto_mask = MLX4_PTYS_EN; | ||
765 | ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, | ||
766 | MLX4_ACCESS_REG_QUERY, &ptys_reg); | ||
767 | if (ret) { | ||
768 | en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n", | ||
769 | ret); | ||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | proto_admin = cpu_to_be32(ptys_adv); | ||
774 | if (speed >= 0 && speed != priv->port_state.link_speed) | ||
775 | /* If speed was set then speed decides :-) */ | ||
776 | proto_admin = speed_set_ptys_admin(priv, speed, | ||
777 | ptys_reg.eth_proto_cap); | ||
778 | |||
779 | proto_admin &= ptys_reg.eth_proto_cap; | ||
780 | |||
781 | if (proto_admin == ptys_reg.eth_proto_admin) | ||
782 | return 0; /* Nothing to change */ | ||
783 | |||
784 | if (!proto_admin) { | ||
785 | en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); | ||
786 | return -EINVAL; /* nothing to change due to bad input */ | ||
787 | } | ||
788 | |||
789 | en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", | ||
790 | be32_to_cpu(proto_admin)); | ||
791 | |||
792 | ptys_reg.eth_proto_admin = proto_admin; | ||
793 | ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE, | ||
794 | &ptys_reg); | ||
795 | if (ret) { | ||
796 | en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)", | ||
797 | be32_to_cpu(ptys_reg.eth_proto_admin), ret); | ||
798 | return ret; | ||
799 | } | ||
800 | |||
801 | en_warn(priv, "Port link mode changed, restarting port...\n"); | ||
802 | mutex_lock(&priv->mdev->state_lock); | ||
803 | if (priv->port_up) { | ||
804 | mlx4_en_stop_port(dev, 1); | ||
805 | if (mlx4_en_start_port(dev)) | ||
806 | en_err(priv, "Failed restarting port %d\n", priv->port); | ||
807 | } | ||
808 | mutex_unlock(&priv->mdev->state_lock); | ||
423 | return 0; | 809 | return 0; |
424 | } | 810 | } |
425 | 811 | ||
@@ -596,6 +982,7 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key) | |||
596 | int err = 0; | 982 | int err = 0; |
597 | 983 | ||
598 | rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; | 984 | rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; |
985 | rss_rings = 1 << ilog2(rss_rings); | ||
599 | 986 | ||
600 | while (n--) { | 987 | while (n--) { |
601 | ring_index[n] = rss_map->qps[n % rss_rings].qpn - | 988 | ring_index[n] = rss_map->qps[n % rss_rings].qpn - |
@@ -1309,6 +1696,86 @@ static int mlx4_en_set_tunable(struct net_device *dev, | |||
1309 | return ret; | 1696 | return ret; |
1310 | } | 1697 | } |
1311 | 1698 | ||
1699 | static int mlx4_en_get_module_info(struct net_device *dev, | ||
1700 | struct ethtool_modinfo *modinfo) | ||
1701 | { | ||
1702 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
1703 | struct mlx4_en_dev *mdev = priv->mdev; | ||
1704 | int ret; | ||
1705 | u8 data[4]; | ||
1706 | |||
1707 | /* Read first 2 bytes to get Module & REV ID */ | ||
1708 | ret = mlx4_get_module_info(mdev->dev, priv->port, | ||
1709 | 0/*offset*/, 2/*size*/, data); | ||
1710 | if (ret < 2) | ||
1711 | return -EIO; | ||
1712 | |||
1713 | switch (data[0] /* identifier */) { | ||
1714 | case MLX4_MODULE_ID_QSFP: | ||
1715 | modinfo->type = ETH_MODULE_SFF_8436; | ||
1716 | modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; | ||
1717 | break; | ||
1718 | case MLX4_MODULE_ID_QSFP_PLUS: | ||
1719 | if (data[1] >= 0x3) { /* revision id */ | ||
1720 | modinfo->type = ETH_MODULE_SFF_8636; | ||
1721 | modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; | ||
1722 | } else { | ||
1723 | modinfo->type = ETH_MODULE_SFF_8436; | ||
1724 | modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; | ||
1725 | } | ||
1726 | break; | ||
1727 | case MLX4_MODULE_ID_QSFP28: | ||
1728 | modinfo->type = ETH_MODULE_SFF_8636; | ||
1729 | modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; | ||
1730 | break; | ||
1731 | case MLX4_MODULE_ID_SFP: | ||
1732 | modinfo->type = ETH_MODULE_SFF_8472; | ||
1733 | modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; | ||
1734 | break; | ||
1735 | default: | ||
1736 | return -ENOSYS; | ||
1737 | } | ||
1738 | |||
1739 | return 0; | ||
1740 | } | ||
1741 | |||
1742 | static int mlx4_en_get_module_eeprom(struct net_device *dev, | ||
1743 | struct ethtool_eeprom *ee, | ||
1744 | u8 *data) | ||
1745 | { | ||
1746 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
1747 | struct mlx4_en_dev *mdev = priv->mdev; | ||
1748 | int offset = ee->offset; | ||
1749 | int i = 0, ret; | ||
1750 | |||
1751 | if (ee->len == 0) | ||
1752 | return -EINVAL; | ||
1753 | |||
1754 | memset(data, 0, ee->len); | ||
1755 | |||
1756 | while (i < ee->len) { | ||
1757 | en_dbg(DRV, priv, | ||
1758 | "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", | ||
1759 | i, offset, ee->len - i); | ||
1760 | |||
1761 | ret = mlx4_get_module_info(mdev->dev, priv->port, | ||
1762 | offset, ee->len - i, data + i); | ||
1763 | |||
1764 | if (!ret) /* Done reading */ | ||
1765 | return 0; | ||
1766 | |||
1767 | if (ret < 0) { | ||
1768 | en_err(priv, | ||
1769 | "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", | ||
1770 | i, offset, ee->len - i, ret); | ||
1771 | return 0; | ||
1772 | } | ||
1773 | |||
1774 | i += ret; | ||
1775 | offset += ret; | ||
1776 | } | ||
1777 | return 0; | ||
1778 | } | ||
1312 | 1779 | ||
1313 | const struct ethtool_ops mlx4_en_ethtool_ops = { | 1780 | const struct ethtool_ops mlx4_en_ethtool_ops = { |
1314 | .get_drvinfo = mlx4_en_get_drvinfo, | 1781 | .get_drvinfo = mlx4_en_get_drvinfo, |
@@ -1341,6 +1808,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { | |||
1341 | .get_priv_flags = mlx4_en_get_priv_flags, | 1808 | .get_priv_flags = mlx4_en_get_priv_flags, |
1342 | .get_tunable = mlx4_en_get_tunable, | 1809 | .get_tunable = mlx4_en_get_tunable, |
1343 | .set_tunable = mlx4_en_set_tunable, | 1810 | .set_tunable = mlx4_en_set_tunable, |
1811 | .get_module_info = mlx4_en_get_module_info, | ||
1812 | .get_module_eeprom = mlx4_en_get_module_eeprom | ||
1344 | }; | 1813 | }; |
1345 | 1814 | ||
1346 | 1815 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 2091ae88615d..9f16f754137b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c | |||
@@ -221,15 +221,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
221 | { | 221 | { |
222 | struct mlx4_en_dev *mdev; | 222 | struct mlx4_en_dev *mdev; |
223 | int i; | 223 | int i; |
224 | int err; | ||
225 | 224 | ||
226 | printk_once(KERN_INFO "%s", mlx4_en_version); | 225 | printk_once(KERN_INFO "%s", mlx4_en_version); |
227 | 226 | ||
228 | mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); | 227 | mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); |
229 | if (!mdev) { | 228 | if (!mdev) |
230 | err = -ENOMEM; | ||
231 | goto err_free_res; | 229 | goto err_free_res; |
232 | } | ||
233 | 230 | ||
234 | if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) | 231 | if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) |
235 | goto err_free_dev; | 232 | goto err_free_dev; |
@@ -264,8 +261,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
264 | } | 261 | } |
265 | 262 | ||
266 | /* Build device profile according to supplied module parameters */ | 263 | /* Build device profile according to supplied module parameters */ |
267 | err = mlx4_en_get_profile(mdev); | 264 | if (mlx4_en_get_profile(mdev)) { |
268 | if (err) { | ||
269 | mlx4_err(mdev, "Bad module parameters, aborting\n"); | 265 | mlx4_err(mdev, "Bad module parameters, aborting\n"); |
270 | goto err_mr; | 266 | goto err_mr; |
271 | } | 267 | } |
@@ -286,10 +282,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
286 | * Note: we cannot use the shared workqueue because of deadlocks caused | 282 | * Note: we cannot use the shared workqueue because of deadlocks caused |
287 | * by the rtnl lock */ | 283 | * by the rtnl lock */ |
288 | mdev->workqueue = create_singlethread_workqueue("mlx4_en"); | 284 | mdev->workqueue = create_singlethread_workqueue("mlx4_en"); |
289 | if (!mdev->workqueue) { | 285 | if (!mdev->workqueue) |
290 | err = -ENOMEM; | ||
291 | goto err_mr; | 286 | goto err_mr; |
292 | } | ||
293 | 287 | ||
294 | /* At this stage all non-port specific tasks are complete: | 288 | /* At this stage all non-port specific tasks are complete: |
295 | * mark the card state as up */ | 289 | * mark the card state as up */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f3032fec8fce..0efbae90f1ba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -575,7 +575,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) | |||
575 | struct mlx4_mac_entry *entry; | 575 | struct mlx4_mac_entry *entry; |
576 | int index = 0; | 576 | int index = 0; |
577 | int err = 0; | 577 | int err = 0; |
578 | u64 reg_id; | 578 | u64 reg_id = 0; |
579 | int *qpn = &priv->base_qpn; | 579 | int *qpn = &priv->base_qpn; |
580 | u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); | 580 | u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); |
581 | 581 | ||
@@ -1843,8 +1843,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) | |||
1843 | } | 1843 | } |
1844 | local_bh_enable(); | 1844 | local_bh_enable(); |
1845 | 1845 | ||
1846 | while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) | 1846 | napi_synchronize(&cq->napi); |
1847 | msleep(1); | ||
1848 | mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); | 1847 | mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); |
1849 | mlx4_en_deactivate_cq(priv, cq); | 1848 | mlx4_en_deactivate_cq(priv, cq); |
1850 | 1849 | ||
@@ -2157,7 +2156,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) | |||
2157 | return -ERANGE; | 2156 | return -ERANGE; |
2158 | } | 2157 | } |
2159 | 2158 | ||
2160 | if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) { | 2159 | if (mlx4_en_reset_config(dev, config, dev->features)) { |
2161 | config.tx_type = HWTSTAMP_TX_OFF; | 2160 | config.tx_type = HWTSTAMP_TX_OFF; |
2162 | config.rx_filter = HWTSTAMP_FILTER_NONE; | 2161 | config.rx_filter = HWTSTAMP_FILTER_NONE; |
2163 | } | 2162 | } |
@@ -2190,6 +2189,16 @@ static int mlx4_en_set_features(struct net_device *netdev, | |||
2190 | netdev_features_t features) | 2189 | netdev_features_t features) |
2191 | { | 2190 | { |
2192 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 2191 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
2192 | int ret = 0; | ||
2193 | |||
2194 | if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { | ||
2195 | en_info(priv, "Turn %s RX vlan strip offload\n", | ||
2196 | (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); | ||
2197 | ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, | ||
2198 | features); | ||
2199 | if (ret) | ||
2200 | return ret; | ||
2201 | } | ||
2193 | 2202 | ||
2194 | if (features & NETIF_F_LOOPBACK) | 2203 | if (features & NETIF_F_LOOPBACK) |
2195 | priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); | 2204 | priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); |
@@ -2431,6 +2440,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2431 | 2440 | ||
2432 | priv = netdev_priv(dev); | 2441 | priv = netdev_priv(dev); |
2433 | memset(priv, 0, sizeof(struct mlx4_en_priv)); | 2442 | memset(priv, 0, sizeof(struct mlx4_en_priv)); |
2443 | spin_lock_init(&priv->stats_lock); | ||
2444 | INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); | ||
2445 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | ||
2446 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | ||
2447 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | ||
2448 | INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); | ||
2449 | #ifdef CONFIG_MLX4_EN_VXLAN | ||
2450 | INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); | ||
2451 | INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); | ||
2452 | #endif | ||
2453 | #ifdef CONFIG_RFS_ACCEL | ||
2454 | INIT_LIST_HEAD(&priv->filters); | ||
2455 | spin_lock_init(&priv->filters_lock); | ||
2456 | #endif | ||
2457 | |||
2434 | priv->dev = dev; | 2458 | priv->dev = dev; |
2435 | priv->mdev = mdev; | 2459 | priv->mdev = mdev; |
2436 | priv->ddev = &mdev->pdev->dev; | 2460 | priv->ddev = &mdev->pdev->dev; |
@@ -2462,16 +2486,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2462 | priv->cqe_size = mdev->dev->caps.cqe_size; | 2486 | priv->cqe_size = mdev->dev->caps.cqe_size; |
2463 | priv->mac_index = -1; | 2487 | priv->mac_index = -1; |
2464 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | 2488 | priv->msg_enable = MLX4_EN_MSG_LEVEL; |
2465 | spin_lock_init(&priv->stats_lock); | ||
2466 | INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); | ||
2467 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | ||
2468 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | ||
2469 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | ||
2470 | INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); | ||
2471 | #ifdef CONFIG_MLX4_EN_VXLAN | ||
2472 | INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); | ||
2473 | INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); | ||
2474 | #endif | ||
2475 | #ifdef CONFIG_MLX4_EN_DCB | 2489 | #ifdef CONFIG_MLX4_EN_DCB |
2476 | if (!mlx4_is_slave(priv->mdev->dev)) { | 2490 | if (!mlx4_is_slave(priv->mdev->dev)) { |
2477 | if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { | 2491 | if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { |
@@ -2514,11 +2528,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2514 | if (err) | 2528 | if (err) |
2515 | goto out; | 2529 | goto out; |
2516 | 2530 | ||
2517 | #ifdef CONFIG_RFS_ACCEL | ||
2518 | INIT_LIST_HEAD(&priv->filters); | ||
2519 | spin_lock_init(&priv->filters_lock); | ||
2520 | #endif | ||
2521 | |||
2522 | /* Initialize time stamping config */ | 2531 | /* Initialize time stamping config */ |
2523 | priv->hwtstamp_config.flags = 0; | 2532 | priv->hwtstamp_config.flags = 0; |
2524 | priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; | 2533 | priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; |
@@ -2559,7 +2568,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2559 | dev->features = dev->hw_features | NETIF_F_HIGHDMA | | 2568 | dev->features = dev->hw_features | NETIF_F_HIGHDMA | |
2560 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | | 2569 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
2561 | NETIF_F_HW_VLAN_CTAG_FILTER; | 2570 | NETIF_F_HW_VLAN_CTAG_FILTER; |
2562 | dev->hw_features |= NETIF_F_LOOPBACK; | 2571 | dev->hw_features |= NETIF_F_LOOPBACK | |
2572 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; | ||
2563 | 2573 | ||
2564 | if (mdev->dev->caps.steering_mode == | 2574 | if (mdev->dev->caps.steering_mode == |
2565 | MLX4_STEERING_MODE_DEVICE_MANAGED) | 2575 | MLX4_STEERING_MODE_DEVICE_MANAGED) |
@@ -2633,3 +2643,79 @@ out: | |||
2633 | return err; | 2643 | return err; |
2634 | } | 2644 | } |
2635 | 2645 | ||
2646 | int mlx4_en_reset_config(struct net_device *dev, | ||
2647 | struct hwtstamp_config ts_config, | ||
2648 | netdev_features_t features) | ||
2649 | { | ||
2650 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
2651 | struct mlx4_en_dev *mdev = priv->mdev; | ||
2652 | int port_up = 0; | ||
2653 | int err = 0; | ||
2654 | |||
2655 | if (priv->hwtstamp_config.tx_type == ts_config.tx_type && | ||
2656 | priv->hwtstamp_config.rx_filter == ts_config.rx_filter && | ||
2657 | !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) | ||
2658 | return 0; /* Nothing to change */ | ||
2659 | |||
2660 | if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && | ||
2661 | (features & NETIF_F_HW_VLAN_CTAG_RX) && | ||
2662 | (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { | ||
2663 | en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); | ||
2664 | return -EINVAL; | ||
2665 | } | ||
2666 | |||
2667 | mutex_lock(&mdev->state_lock); | ||
2668 | if (priv->port_up) { | ||
2669 | port_up = 1; | ||
2670 | mlx4_en_stop_port(dev, 1); | ||
2671 | } | ||
2672 | |||
2673 | mlx4_en_free_resources(priv); | ||
2674 | |||
2675 | en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", | ||
2676 | ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); | ||
2677 | |||
2678 | priv->hwtstamp_config.tx_type = ts_config.tx_type; | ||
2679 | priv->hwtstamp_config.rx_filter = ts_config.rx_filter; | ||
2680 | |||
2681 | if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { | ||
2682 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | ||
2683 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; | ||
2684 | else | ||
2685 | dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; | ||
2686 | } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { | ||
2687 | /* RX time-stamping is OFF, update the RX vlan offload | ||
2688 | * to the latest wanted state | ||
2689 | */ | ||
2690 | if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) | ||
2691 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; | ||
2692 | else | ||
2693 | dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; | ||
2694 | } | ||
2695 | |||
2696 | /* RX vlan offload and RX time-stamping can't co-exist ! | ||
2697 | * Regardless of the caller's choice, | ||
2698 | * Turn Off RX vlan offload in case of time-stamping is ON | ||
2699 | */ | ||
2700 | if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { | ||
2701 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | ||
2702 | en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); | ||
2703 | dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; | ||
2704 | } | ||
2705 | |||
2706 | err = mlx4_en_alloc_resources(priv); | ||
2707 | if (err) { | ||
2708 | en_err(priv, "Failed reallocating port resources\n"); | ||
2709 | goto out; | ||
2710 | } | ||
2711 | if (port_up) { | ||
2712 | err = mlx4_en_start_port(dev); | ||
2713 | if (err) | ||
2714 | en_err(priv, "Failed starting port\n"); | ||
2715 | } | ||
2716 | |||
2717 | out: | ||
2718 | mutex_unlock(&mdev->state_lock); | ||
2719 | netdev_features_change(dev); | ||
2720 | return err; | ||
2721 | } | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 0a0261d128b9..134b12e17da5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c | |||
@@ -91,21 +91,37 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) | |||
91 | * already synchronized, no need in locking */ | 91 | * already synchronized, no need in locking */ |
92 | state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); | 92 | state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); |
93 | switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { | 93 | switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { |
94 | case MLX4_EN_100M_SPEED: | ||
95 | state->link_speed = SPEED_100; | ||
96 | break; | ||
94 | case MLX4_EN_1G_SPEED: | 97 | case MLX4_EN_1G_SPEED: |
95 | state->link_speed = 1000; | 98 | state->link_speed = SPEED_1000; |
96 | break; | 99 | break; |
97 | case MLX4_EN_10G_SPEED_XAUI: | 100 | case MLX4_EN_10G_SPEED_XAUI: |
98 | case MLX4_EN_10G_SPEED_XFI: | 101 | case MLX4_EN_10G_SPEED_XFI: |
99 | state->link_speed = 10000; | 102 | state->link_speed = SPEED_10000; |
103 | break; | ||
104 | case MLX4_EN_20G_SPEED: | ||
105 | state->link_speed = SPEED_20000; | ||
100 | break; | 106 | break; |
101 | case MLX4_EN_40G_SPEED: | 107 | case MLX4_EN_40G_SPEED: |
102 | state->link_speed = 40000; | 108 | state->link_speed = SPEED_40000; |
109 | break; | ||
110 | case MLX4_EN_56G_SPEED: | ||
111 | state->link_speed = SPEED_56000; | ||
103 | break; | 112 | break; |
104 | default: | 113 | default: |
105 | state->link_speed = -1; | 114 | state->link_speed = -1; |
106 | break; | 115 | break; |
107 | } | 116 | } |
108 | state->transciver = qport_context->transceiver; | 117 | |
118 | state->transceiver = qport_context->transceiver; | ||
119 | |||
120 | state->flags = 0; /* Reset and recalculate the port flags */ | ||
121 | state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ? | ||
122 | MLX4_EN_PORT_ANC : 0; | ||
123 | state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ? | ||
124 | MLX4_EN_PORT_ANE : 0; | ||
109 | 125 | ||
110 | out: | 126 | out: |
111 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 127 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 745090b49d9e..040da4b16b1c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h | |||
@@ -53,22 +53,49 @@ enum { | |||
53 | MLX4_MCAST_ENABLE = 2, | 53 | MLX4_MCAST_ENABLE = 2, |
54 | }; | 54 | }; |
55 | 55 | ||
56 | enum mlx4_link_mode { | ||
57 | MLX4_1000BASE_CX_SGMII = 0, | ||
58 | MLX4_1000BASE_KX = 1, | ||
59 | MLX4_10GBASE_CX4 = 2, | ||
60 | MLX4_10GBASE_KX4 = 3, | ||
61 | MLX4_10GBASE_KR = 4, | ||
62 | MLX4_20GBASE_KR2 = 5, | ||
63 | MLX4_40GBASE_CR4 = 6, | ||
64 | MLX4_40GBASE_KR4 = 7, | ||
65 | MLX4_56GBASE_KR4 = 8, | ||
66 | MLX4_10GBASE_CR = 12, | ||
67 | MLX4_10GBASE_SR = 13, | ||
68 | MLX4_40GBASE_SR4 = 15, | ||
69 | MLX4_56GBASE_CR4 = 17, | ||
70 | MLX4_56GBASE_SR4 = 18, | ||
71 | MLX4_100BASE_TX = 24, | ||
72 | MLX4_1000BASE_T = 25, | ||
73 | MLX4_10GBASE_T = 26, | ||
74 | }; | ||
75 | |||
76 | #define MLX4_PROT_MASK(link_mode) (1<<link_mode) | ||
77 | |||
56 | enum { | 78 | enum { |
57 | MLX4_EN_1G_SPEED = 0x02, | 79 | MLX4_EN_100M_SPEED = 0x04, |
58 | MLX4_EN_10G_SPEED_XFI = 0x01, | ||
59 | MLX4_EN_10G_SPEED_XAUI = 0x00, | 80 | MLX4_EN_10G_SPEED_XAUI = 0x00, |
81 | MLX4_EN_10G_SPEED_XFI = 0x01, | ||
82 | MLX4_EN_1G_SPEED = 0x02, | ||
83 | MLX4_EN_20G_SPEED = 0x08, | ||
60 | MLX4_EN_40G_SPEED = 0x40, | 84 | MLX4_EN_40G_SPEED = 0x40, |
85 | MLX4_EN_56G_SPEED = 0x20, | ||
61 | MLX4_EN_OTHER_SPEED = 0x0f, | 86 | MLX4_EN_OTHER_SPEED = 0x0f, |
62 | }; | 87 | }; |
63 | 88 | ||
64 | struct mlx4_en_query_port_context { | 89 | struct mlx4_en_query_port_context { |
65 | u8 link_up; | 90 | u8 link_up; |
66 | #define MLX4_EN_LINK_UP_MASK 0x80 | 91 | #define MLX4_EN_LINK_UP_MASK 0x80 |
67 | u8 reserved; | 92 | #define MLX4_EN_ANC_MASK 0x40 |
93 | u8 autoneg; | ||
94 | #define MLX4_EN_AUTONEG_MASK 0x80 | ||
68 | __be16 mtu; | 95 | __be16 mtu; |
69 | u8 reserved2; | 96 | u8 reserved2; |
70 | u8 link_speed; | 97 | u8 link_speed; |
71 | #define MLX4_EN_SPEED_MASK 0x43 | 98 | #define MLX4_EN_SPEED_MASK 0x6f |
72 | u16 reserved3[5]; | 99 | u16 reserved3[5]; |
73 | __be64 mac; | 100 | __be64 mac; |
74 | u8 transceiver; | 101 | u8 transceiver; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 01660c595f5c..c562c1468944 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -119,7 +119,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, | |||
119 | 119 | ||
120 | out: | 120 | out: |
121 | while (i--) { | 121 | while (i--) { |
122 | frag_info = &priv->frag_info[i]; | ||
123 | if (page_alloc[i].page != ring_alloc[i].page) { | 122 | if (page_alloc[i].page != ring_alloc[i].page) { |
124 | dma_unmap_page(priv->ddev, page_alloc[i].dma, | 123 | dma_unmap_page(priv->ddev, page_alloc[i].dma, |
125 | page_alloc[i].page_size, PCI_DMA_FROMDEVICE); | 124 | page_alloc[i].page_size, PCI_DMA_FROMDEVICE); |
@@ -879,8 +878,8 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq) | |||
879 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | 878 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); |
880 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | 879 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); |
881 | 880 | ||
882 | if (priv->port_up) | 881 | if (likely(priv->port_up)) |
883 | napi_schedule(&cq->napi); | 882 | napi_schedule_irqoff(&cq->napi); |
884 | else | 883 | else |
885 | mlx4_en_arm_cq(priv, cq); | 884 | mlx4_en_arm_cq(priv, cq); |
886 | } | 885 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 49d5afc7cfb8..2d8ee66138e8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
@@ -129,11 +129,15 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv) | |||
129 | if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) | 129 | if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) |
130 | return -ENOMEM; | 130 | return -ENOMEM; |
131 | 131 | ||
132 | /* The device supports 1G, 10G and 40G speeds */ | 132 | /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */ |
133 | if (priv->port_state.link_speed != 1000 && | 133 | if (priv->port_state.link_speed != SPEED_100 && |
134 | priv->port_state.link_speed != 10000 && | 134 | priv->port_state.link_speed != SPEED_1000 && |
135 | priv->port_state.link_speed != 40000) | 135 | priv->port_state.link_speed != SPEED_10000 && |
136 | priv->port_state.link_speed != SPEED_20000 && | ||
137 | priv->port_state.link_speed != SPEED_40000 && | ||
138 | priv->port_state.link_speed != SPEED_56000) | ||
136 | return priv->port_state.link_speed; | 139 | return priv->port_state.link_speed; |
140 | |||
137 | return 0; | 141 | return 0; |
138 | } | 142 | } |
139 | 143 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 454d9fea640e..d0cecbdd9ba8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -479,8 +479,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq) | |||
479 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | 479 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); |
480 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | 480 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); |
481 | 481 | ||
482 | if (priv->port_up) | 482 | if (likely(priv->port_up)) |
483 | napi_schedule(&cq->napi); | 483 | napi_schedule_irqoff(&cq->napi); |
484 | else | 484 | else |
485 | mlx4_en_arm_cq(priv, cq); | 485 | mlx4_en_arm_cq(priv, cq); |
486 | } | 486 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2e88a235e26b..72289ef5ebbe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -139,7 +139,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | |||
139 | [10] = "TCP/IP offloads/flow-steering for VXLAN support", | 139 | [10] = "TCP/IP offloads/flow-steering for VXLAN support", |
140 | [11] = "MAD DEMUX (Secure-Host) support", | 140 | [11] = "MAD DEMUX (Secure-Host) support", |
141 | [12] = "Large cache line (>64B) CQE stride support", | 141 | [12] = "Large cache line (>64B) CQE stride support", |
142 | [13] = "Large cache line (>64B) EQE stride support" | 142 | [13] = "Large cache line (>64B) EQE stride support", |
143 | [14] = "Ethernet protocol control support", | ||
144 | [15] = "Ethernet Backplane autoneg support" | ||
143 | }; | 145 | }; |
144 | int i; | 146 | int i; |
145 | 147 | ||
@@ -560,6 +562,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
560 | #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 | 562 | #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 |
561 | #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 | 563 | #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 |
562 | #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a | 564 | #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a |
565 | #define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a | ||
563 | #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 | 566 | #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 |
564 | #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 | 567 | #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 |
565 | #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 | 568 | #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 |
@@ -573,6 +576,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
573 | #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 | 576 | #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 |
574 | #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 | 577 | #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 |
575 | #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 | 578 | #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 |
579 | #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c | ||
576 | #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d | 580 | #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d |
577 | #define QUERY_DEV_CAP_VXLAN 0x9e | 581 | #define QUERY_DEV_CAP_VXLAN 0x9e |
578 | #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 | 582 | #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 |
@@ -737,15 +741,19 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
737 | MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); | 741 | MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); |
738 | dev_cap->max_rq_desc_sz = size; | 742 | dev_cap->max_rq_desc_sz = size; |
739 | MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); | 743 | MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); |
744 | if (field & (1 << 5)) | ||
745 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; | ||
740 | if (field & (1 << 6)) | 746 | if (field & (1 << 6)) |
741 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; | 747 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; |
742 | if (field & (1 << 7)) | 748 | if (field & (1 << 7)) |
743 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; | 749 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; |
744 | |||
745 | MLX4_GET(dev_cap->bmme_flags, outbox, | 750 | MLX4_GET(dev_cap->bmme_flags, outbox, |
746 | QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | 751 | QUERY_DEV_CAP_BMME_FLAGS_OFFSET); |
747 | MLX4_GET(dev_cap->reserved_lkey, outbox, | 752 | MLX4_GET(dev_cap->reserved_lkey, outbox, |
748 | QUERY_DEV_CAP_RSVD_LKEY_OFFSET); | 753 | QUERY_DEV_CAP_RSVD_LKEY_OFFSET); |
754 | MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); | ||
755 | if (field32 & (1 << 0)) | ||
756 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; | ||
749 | MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); | 757 | MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); |
750 | if (field & 1<<6) | 758 | if (field & 1<<6) |
751 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; | 759 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; |
@@ -2144,3 +2152,114 @@ out: | |||
2144 | mlx4_free_cmd_mailbox(dev, mailbox); | 2152 | mlx4_free_cmd_mailbox(dev, mailbox); |
2145 | return err; | 2153 | return err; |
2146 | } | 2154 | } |
2155 | |||
2156 | /* Access Reg commands */ | ||
2157 | enum mlx4_access_reg_masks { | ||
2158 | MLX4_ACCESS_REG_STATUS_MASK = 0x7f, | ||
2159 | MLX4_ACCESS_REG_METHOD_MASK = 0x7f, | ||
2160 | MLX4_ACCESS_REG_LEN_MASK = 0x7ff | ||
2161 | }; | ||
2162 | |||
2163 | struct mlx4_access_reg { | ||
2164 | __be16 constant1; | ||
2165 | u8 status; | ||
2166 | u8 resrvd1; | ||
2167 | __be16 reg_id; | ||
2168 | u8 method; | ||
2169 | u8 constant2; | ||
2170 | __be32 resrvd2[2]; | ||
2171 | __be16 len_const; | ||
2172 | __be16 resrvd3; | ||
2173 | #define MLX4_ACCESS_REG_HEADER_SIZE (20) | ||
2174 | u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; | ||
2175 | } __attribute__((__packed__)); | ||
2176 | |||
2177 | /** | ||
2178 | * mlx4_ACCESS_REG - Generic access reg command. | ||
2179 | * @dev: mlx4_dev. | ||
2180 | * @reg_id: register ID to access. | ||
2181 | * @method: Access method Read/Write. | ||
2182 | * @reg_len: register length to Read/Write in bytes. | ||
2183 | * @reg_data: reg_data pointer to Read/Write From/To. | ||
2184 | * | ||
2185 | * Access ConnectX registers FW command. | ||
2186 | * Returns 0 on success and copies outbox mlx4_access_reg data | ||
2187 | * field into reg_data or a negative error code. | ||
2188 | */ | ||
2189 | static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, | ||
2190 | enum mlx4_access_reg_method method, | ||
2191 | u16 reg_len, void *reg_data) | ||
2192 | { | ||
2193 | struct mlx4_cmd_mailbox *inbox, *outbox; | ||
2194 | struct mlx4_access_reg *inbuf, *outbuf; | ||
2195 | int err; | ||
2196 | |||
2197 | inbox = mlx4_alloc_cmd_mailbox(dev); | ||
2198 | if (IS_ERR(inbox)) | ||
2199 | return PTR_ERR(inbox); | ||
2200 | |||
2201 | outbox = mlx4_alloc_cmd_mailbox(dev); | ||
2202 | if (IS_ERR(outbox)) { | ||
2203 | mlx4_free_cmd_mailbox(dev, inbox); | ||
2204 | return PTR_ERR(outbox); | ||
2205 | } | ||
2206 | |||
2207 | inbuf = inbox->buf; | ||
2208 | outbuf = outbox->buf; | ||
2209 | |||
2210 | inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); | ||
2211 | inbuf->constant2 = 0x1; | ||
2212 | inbuf->reg_id = cpu_to_be16(reg_id); | ||
2213 | inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; | ||
2214 | |||
2215 | reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); | ||
2216 | inbuf->len_const = | ||
2217 | cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | | ||
2218 | ((0x3) << 12)); | ||
2219 | |||
2220 | memcpy(inbuf->reg_data, reg_data, reg_len); | ||
2221 | err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, | ||
2222 | MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, | ||
2223 | MLX4_CMD_NATIVE); | ||
2224 | if (err) | ||
2225 | goto out; | ||
2226 | |||
2227 | if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { | ||
2228 | err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; | ||
2229 | mlx4_err(dev, | ||
2230 | "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", | ||
2231 | reg_id, err); | ||
2232 | goto out; | ||
2233 | } | ||
2234 | |||
2235 | memcpy(reg_data, outbuf->reg_data, reg_len); | ||
2236 | out: | ||
2237 | mlx4_free_cmd_mailbox(dev, inbox); | ||
2238 | mlx4_free_cmd_mailbox(dev, outbox); | ||
2239 | return err; | ||
2240 | } | ||
2241 | |||
2242 | /* ConnectX registers IDs */ | ||
2243 | enum mlx4_reg_id { | ||
2244 | MLX4_REG_ID_PTYS = 0x5004, | ||
2245 | }; | ||
2246 | |||
2247 | /** | ||
2248 | * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) | ||
2249 | * register | ||
2250 | * @dev: mlx4_dev. | ||
2251 | * @method: Access method Read/Write. | ||
2252 | * @ptys_reg: PTYS register data pointer. | ||
2253 | * | ||
2254 | * Access ConnectX PTYS register, to Read/Write Port Type/Speed | ||
2255 | * configuration | ||
2256 | * Returns 0 on success or a negative error code. | ||
2257 | */ | ||
2258 | int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, | ||
2259 | enum mlx4_access_reg_method method, | ||
2260 | struct mlx4_ptys_reg *ptys_reg) | ||
2261 | { | ||
2262 | return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, | ||
2263 | method, sizeof(*ptys_reg), ptys_reg); | ||
2264 | } | ||
2265 | EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 8fef65840b3b..6beb4d34dce0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -421,10 +421,16 @@ struct mlx4_en_rss_map { | |||
421 | enum mlx4_qp_state indir_state; | 421 | enum mlx4_qp_state indir_state; |
422 | }; | 422 | }; |
423 | 423 | ||
424 | enum mlx4_en_port_flag { | ||
425 | MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */ | ||
426 | MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */ | ||
427 | }; | ||
428 | |||
424 | struct mlx4_en_port_state { | 429 | struct mlx4_en_port_state { |
425 | int link_state; | 430 | int link_state; |
426 | int link_speed; | 431 | int link_speed; |
427 | int transciver; | 432 | int transceiver; |
433 | u32 flags; | ||
428 | }; | 434 | }; |
429 | 435 | ||
430 | struct mlx4_en_pkt_stats { | 436 | struct mlx4_en_pkt_stats { |
@@ -829,6 +835,13 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv); | |||
829 | void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); | 835 | void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); |
830 | void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); | 836 | void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); |
831 | 837 | ||
838 | #define DEV_FEATURE_CHANGED(dev, new_features, feature) \ | ||
839 | ((dev->features & feature) ^ (new_features & feature)) | ||
840 | |||
841 | int mlx4_en_reset_config(struct net_device *dev, | ||
842 | struct hwtstamp_config ts_config, | ||
843 | netdev_features_t new_features); | ||
844 | |||
832 | /* | 845 | /* |
833 | * Functions for time stamping | 846 | * Functions for time stamping |
834 | */ | 847 | */ |
@@ -838,9 +851,6 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, | |||
838 | u64 timestamp); | 851 | u64 timestamp); |
839 | void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); | 852 | void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); |
840 | void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev); | 853 | void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev); |
841 | int mlx4_en_timestamp_config(struct net_device *dev, | ||
842 | int tx_type, | ||
843 | int rx_filter); | ||
844 | 854 | ||
845 | /* Globals | 855 | /* Globals |
846 | */ | 856 | */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 94eeb2c7d7e4..30eb1ead0fe6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -1311,3 +1311,159 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, | |||
1311 | return 0; | 1311 | return 0; |
1312 | } | 1312 | } |
1313 | EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); | 1313 | EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); |
1314 | |||
1315 | /* Cable Module Info */ | ||
1316 | #define MODULE_INFO_MAX_READ 48 | ||
1317 | |||
1318 | #define I2C_ADDR_LOW 0x50 | ||
1319 | #define I2C_ADDR_HIGH 0x51 | ||
1320 | #define I2C_PAGE_SIZE 256 | ||
1321 | |||
1322 | /* Module Info Data */ | ||
1323 | struct mlx4_cable_info { | ||
1324 | u8 i2c_addr; | ||
1325 | u8 page_num; | ||
1326 | __be16 dev_mem_address; | ||
1327 | __be16 reserved1; | ||
1328 | __be16 size; | ||
1329 | __be32 reserved2[2]; | ||
1330 | u8 data[MODULE_INFO_MAX_READ]; | ||
1331 | }; | ||
1332 | |||
1333 | enum cable_info_err { | ||
1334 | CABLE_INF_INV_PORT = 0x1, | ||
1335 | CABLE_INF_OP_NOSUP = 0x2, | ||
1336 | CABLE_INF_NOT_CONN = 0x3, | ||
1337 | CABLE_INF_NO_EEPRM = 0x4, | ||
1338 | CABLE_INF_PAGE_ERR = 0x5, | ||
1339 | CABLE_INF_INV_ADDR = 0x6, | ||
1340 | CABLE_INF_I2C_ADDR = 0x7, | ||
1341 | CABLE_INF_QSFP_VIO = 0x8, | ||
1342 | CABLE_INF_I2C_BUSY = 0x9, | ||
1343 | }; | ||
1344 | |||
1345 | #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF) | ||
1346 | |||
1347 | static inline const char *cable_info_mad_err_str(u16 mad_status) | ||
1348 | { | ||
1349 | u8 err = MAD_STATUS_2_CABLE_ERR(mad_status); | ||
1350 | |||
1351 | switch (err) { | ||
1352 | case CABLE_INF_INV_PORT: | ||
1353 | return "invalid port selected"; | ||
1354 | case CABLE_INF_OP_NOSUP: | ||
1355 | return "operation not supported for this port (the port is of type CX4 or internal)"; | ||
1356 | case CABLE_INF_NOT_CONN: | ||
1357 | return "cable is not connected"; | ||
1358 | case CABLE_INF_NO_EEPRM: | ||
1359 | return "the connected cable has no EPROM (passive copper cable)"; | ||
1360 | case CABLE_INF_PAGE_ERR: | ||
1361 | return "page number is greater than 15"; | ||
1362 | case CABLE_INF_INV_ADDR: | ||
1363 | return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)"; | ||
1364 | case CABLE_INF_I2C_ADDR: | ||
1365 | return "invalid I2C slave address"; | ||
1366 | case CABLE_INF_QSFP_VIO: | ||
1367 | return "at least one cable violates the QSFP specification and ignores the modsel signal"; | ||
1368 | case CABLE_INF_I2C_BUSY: | ||
1369 | return "I2C bus is constantly busy"; | ||
1370 | } | ||
1371 | return "Unknown Error"; | ||
1372 | } | ||
1373 | |||
1374 | /** | ||
1375 | * mlx4_get_module_info - Read cable module eeprom data | ||
1376 | * @dev: mlx4_dev. | ||
1377 | * @port: port number. | ||
1378 | * @offset: byte offset in eeprom to start reading data from. | ||
1379 | * @size: num of bytes to read. | ||
1380 | * @data: output buffer to put the requested data into. | ||
1381 | * | ||
1382 | * Reads cable module eeprom data, puts the outcome data into | ||
1383 | * data pointer paramer. | ||
1384 | * Returns num of read bytes on success or a negative error | ||
1385 | * code. | ||
1386 | */ | ||
1387 | int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, | ||
1388 | u16 offset, u16 size, u8 *data) | ||
1389 | { | ||
1390 | struct mlx4_cmd_mailbox *inbox, *outbox; | ||
1391 | struct mlx4_mad_ifc *inmad, *outmad; | ||
1392 | struct mlx4_cable_info *cable_info; | ||
1393 | u16 i2c_addr; | ||
1394 | int ret; | ||
1395 | |||
1396 | if (size > MODULE_INFO_MAX_READ) | ||
1397 | size = MODULE_INFO_MAX_READ; | ||
1398 | |||
1399 | inbox = mlx4_alloc_cmd_mailbox(dev); | ||
1400 | if (IS_ERR(inbox)) | ||
1401 | return PTR_ERR(inbox); | ||
1402 | |||
1403 | outbox = mlx4_alloc_cmd_mailbox(dev); | ||
1404 | if (IS_ERR(outbox)) { | ||
1405 | mlx4_free_cmd_mailbox(dev, inbox); | ||
1406 | return PTR_ERR(outbox); | ||
1407 | } | ||
1408 | |||
1409 | inmad = (struct mlx4_mad_ifc *)(inbox->buf); | ||
1410 | outmad = (struct mlx4_mad_ifc *)(outbox->buf); | ||
1411 | |||
1412 | inmad->method = 0x1; /* Get */ | ||
1413 | inmad->class_version = 0x1; | ||
1414 | inmad->mgmt_class = 0x1; | ||
1415 | inmad->base_version = 0x1; | ||
1416 | inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ | ||
1417 | |||
1418 | if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE) | ||
1419 | /* Cross pages reads are not allowed | ||
1420 | * read until offset 256 in low page | ||
1421 | */ | ||
1422 | size -= offset + size - I2C_PAGE_SIZE; | ||
1423 | |||
1424 | i2c_addr = I2C_ADDR_LOW; | ||
1425 | if (offset >= I2C_PAGE_SIZE) { | ||
1426 | /* Reset offset to high page */ | ||
1427 | i2c_addr = I2C_ADDR_HIGH; | ||
1428 | offset -= I2C_PAGE_SIZE; | ||
1429 | } | ||
1430 | |||
1431 | cable_info = (struct mlx4_cable_info *)inmad->data; | ||
1432 | cable_info->dev_mem_address = cpu_to_be16(offset); | ||
1433 | cable_info->page_num = 0; | ||
1434 | cable_info->i2c_addr = i2c_addr; | ||
1435 | cable_info->size = cpu_to_be16(size); | ||
1436 | |||
1437 | ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, | ||
1438 | MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, | ||
1439 | MLX4_CMD_NATIVE); | ||
1440 | if (ret) | ||
1441 | goto out; | ||
1442 | |||
1443 | if (be16_to_cpu(outmad->status)) { | ||
1444 | /* Mad returned with bad status */ | ||
1445 | ret = be16_to_cpu(outmad->status); | ||
1446 | mlx4_warn(dev, | ||
1447 | "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", | ||
1448 | 0xFF60, port, i2c_addr, offset, size, | ||
1449 | ret, cable_info_mad_err_str(ret)); | ||
1450 | |||
1451 | if (i2c_addr == I2C_ADDR_HIGH && | ||
1452 | MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR) | ||
1453 | /* Some SFP cables do not support i2c slave | ||
1454 | * address 0x51 (high page), abort silently. | ||
1455 | */ | ||
1456 | ret = 0; | ||
1457 | else | ||
1458 | ret = -ret; | ||
1459 | goto out; | ||
1460 | } | ||
1461 | cable_info = (struct mlx4_cable_info *)outmad->data; | ||
1462 | memcpy(data, cable_info->data, size); | ||
1463 | ret = size; | ||
1464 | out: | ||
1465 | mlx4_free_cmd_mailbox(dev, inbox); | ||
1466 | mlx4_free_cmd_mailbox(dev, outbox); | ||
1467 | return ret; | ||
1468 | } | ||
1469 | EXPORT_SYMBOL(mlx4_get_module_info); | ||
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index 9e4ddbba7036..66c2d50d5b8d 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c | |||
@@ -326,13 +326,9 @@ static int mac_onboard_sonic_probe(struct net_device *dev) | |||
326 | macintosh_config->ident == MAC_MODEL_P588 || | 326 | macintosh_config->ident == MAC_MODEL_P588 || |
327 | macintosh_config->ident == MAC_MODEL_P575 || | 327 | macintosh_config->ident == MAC_MODEL_P575 || |
328 | macintosh_config->ident == MAC_MODEL_C610) { | 328 | macintosh_config->ident == MAC_MODEL_C610) { |
329 | unsigned long flags; | ||
330 | int card_present; | 329 | int card_present; |
331 | 330 | ||
332 | local_irq_save(flags); | ||
333 | card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); | 331 | card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); |
334 | local_irq_restore(flags); | ||
335 | |||
336 | if (!card_present) { | 332 | if (!card_present) { |
337 | printk("none.\n"); | 333 | printk("none.\n"); |
338 | return -ENODEV; | 334 | return -ENODEV; |
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h index 040b13739947..32497f0e537c 100644 --- a/drivers/net/ethernet/realtek/atp.h +++ b/drivers/net/ethernet/realtek/atp.h | |||
@@ -6,10 +6,10 @@ | |||
6 | 6 | ||
7 | /* The header prepended to received packets. */ | 7 | /* The header prepended to received packets. */ |
8 | struct rx_header { | 8 | struct rx_header { |
9 | ushort pad; /* Pad. */ | 9 | ushort pad; /* Pad. */ |
10 | ushort rx_count; | 10 | ushort rx_count; |
11 | ushort rx_status; /* Unknown bit assignments :-<. */ | 11 | ushort rx_status; /* Unknown bit assignments :-<. */ |
12 | ushort cur_addr; /* Apparently the current buffer address(?) */ | 12 | ushort cur_addr; /* Apparently the current buffer address(?) */ |
13 | }; | 13 | }; |
14 | 14 | ||
15 | #define PAR_DATA 0 | 15 | #define PAR_DATA 0 |
@@ -29,22 +29,25 @@ struct rx_header { | |||
29 | #define RdAddr 0xC0 | 29 | #define RdAddr 0xC0 |
30 | #define HNib 0x10 | 30 | #define HNib 0x10 |
31 | 31 | ||
32 | enum page0_regs | 32 | enum page0_regs { |
33 | { | 33 | /* The first six registers hold |
34 | /* The first six registers hold the ethernet physical station address. */ | 34 | * the ethernet physical station address. |
35 | PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5, | 35 | */ |
36 | TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */ | 36 | PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5, |
37 | TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */ | 37 | TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */ |
38 | ISR = 10, IMR = 11, /* Interrupt status and mask. */ | 38 | TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */ |
39 | CMR1 = 12, /* Command register 1. */ | 39 | ISR = 10, IMR = 11, /* Interrupt status and mask. */ |
40 | CMR2 = 13, /* Command register 2. */ | 40 | CMR1 = 12, /* Command register 1. */ |
41 | MODSEL = 14, /* Mode select register. */ | 41 | CMR2 = 13, /* Command register 2. */ |
42 | MAR = 14, /* Memory address register (?). */ | 42 | MODSEL = 14, /* Mode select register. */ |
43 | CMR2_h = 0x1d, }; | 43 | MAR = 14, /* Memory address register (?). */ |
44 | 44 | CMR2_h = 0x1d, | |
45 | enum eepage_regs | 45 | }; |
46 | { PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */ | ||
47 | 46 | ||
47 | enum eepage_regs { | ||
48 | PROM_CMD = 6, | ||
49 | PROM_DATA = 7 /* Note that PROM_CMD is in the "high" bits. */ | ||
50 | }; | ||
48 | 51 | ||
49 | #define ISR_TxOK 0x01 | 52 | #define ISR_TxOK 0x01 |
50 | #define ISR_RxOK 0x04 | 53 | #define ISR_RxOK 0x04 |
@@ -72,141 +75,146 @@ enum eepage_regs | |||
72 | #define CMR2h_Normal 2 /* Accept physical and broadcast address. */ | 75 | #define CMR2h_Normal 2 /* Accept physical and broadcast address. */ |
73 | #define CMR2h_PROMISC 3 /* Promiscuous mode. */ | 76 | #define CMR2h_PROMISC 3 /* Promiscuous mode. */ |
74 | 77 | ||
75 | /* An inline function used below: it differs from inb() by explicitly return an unsigned | 78 | /* An inline function used below: it differs from inb() by explicitly |
76 | char, saving a truncation. */ | 79 | * return an unsigned char, saving a truncation. |
80 | */ | ||
77 | static inline unsigned char inbyte(unsigned short port) | 81 | static inline unsigned char inbyte(unsigned short port) |
78 | { | 82 | { |
79 | unsigned char _v; | 83 | unsigned char _v; |
80 | __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port)); | 84 | |
81 | return _v; | 85 | __asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port)); |
86 | return _v; | ||
82 | } | 87 | } |
83 | 88 | ||
84 | /* Read register OFFSET. | 89 | /* Read register OFFSET. |
85 | This command should always be terminated with read_end(). */ | 90 | * This command should always be terminated with read_end(). |
91 | */ | ||
86 | static inline unsigned char read_nibble(short port, unsigned char offset) | 92 | static inline unsigned char read_nibble(short port, unsigned char offset) |
87 | { | 93 | { |
88 | unsigned char retval; | 94 | unsigned char retval; |
89 | outb(EOC+offset, port + PAR_DATA); | 95 | |
90 | outb(RdAddr+offset, port + PAR_DATA); | 96 | outb(EOC+offset, port + PAR_DATA); |
91 | inbyte(port + PAR_STATUS); /* Settling time delay */ | 97 | outb(RdAddr+offset, port + PAR_DATA); |
92 | retval = inbyte(port + PAR_STATUS); | 98 | inbyte(port + PAR_STATUS); /* Settling time delay */ |
93 | outb(EOC+offset, port + PAR_DATA); | 99 | retval = inbyte(port + PAR_STATUS); |
94 | 100 | outb(EOC+offset, port + PAR_DATA); | |
95 | return retval; | 101 | |
102 | return retval; | ||
96 | } | 103 | } |
97 | 104 | ||
98 | /* Functions for bulk data read. The interrupt line is always disabled. */ | 105 | /* Functions for bulk data read. The interrupt line is always disabled. */ |
99 | /* Get a byte using read mode 0, reading data from the control lines. */ | 106 | /* Get a byte using read mode 0, reading data from the control lines. */ |
100 | static inline unsigned char read_byte_mode0(short ioaddr) | 107 | static inline unsigned char read_byte_mode0(short ioaddr) |
101 | { | 108 | { |
102 | unsigned char low_nib; | 109 | unsigned char low_nib; |
103 | 110 | ||
104 | outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); | 111 | outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); |
105 | inbyte(ioaddr + PAR_STATUS); | 112 | inbyte(ioaddr + PAR_STATUS); |
106 | low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; | 113 | low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; |
107 | outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); | 114 | outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); |
108 | inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ | 115 | inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ |
109 | inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ | 116 | inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ |
110 | return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); | 117 | return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); |
111 | } | 118 | } |
112 | 119 | ||
113 | /* The same as read_byte_mode0(), but does multiple inb()s for stability. */ | 120 | /* The same as read_byte_mode0(), but does multiple inb()s for stability. */ |
114 | static inline unsigned char read_byte_mode2(short ioaddr) | 121 | static inline unsigned char read_byte_mode2(short ioaddr) |
115 | { | 122 | { |
116 | unsigned char low_nib; | 123 | unsigned char low_nib; |
117 | 124 | ||
118 | outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); | 125 | outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); |
119 | inbyte(ioaddr + PAR_STATUS); | 126 | inbyte(ioaddr + PAR_STATUS); |
120 | low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; | 127 | low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; |
121 | outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); | 128 | outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); |
122 | inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ | 129 | inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ |
123 | return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); | 130 | return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); |
124 | } | 131 | } |
125 | 132 | ||
126 | /* Read a byte through the data register. */ | 133 | /* Read a byte through the data register. */ |
127 | static inline unsigned char read_byte_mode4(short ioaddr) | 134 | static inline unsigned char read_byte_mode4(short ioaddr) |
128 | { | 135 | { |
129 | unsigned char low_nib; | 136 | unsigned char low_nib; |
130 | 137 | ||
131 | outb(RdAddr | MAR, ioaddr + PAR_DATA); | 138 | outb(RdAddr | MAR, ioaddr + PAR_DATA); |
132 | low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; | 139 | low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; |
133 | outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); | 140 | outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); |
134 | return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); | 141 | return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); |
135 | } | 142 | } |
136 | 143 | ||
137 | /* Read a byte through the data register, double reading to allow settling. */ | 144 | /* Read a byte through the data register, double reading to allow settling. */ |
138 | static inline unsigned char read_byte_mode6(short ioaddr) | 145 | static inline unsigned char read_byte_mode6(short ioaddr) |
139 | { | 146 | { |
140 | unsigned char low_nib; | 147 | unsigned char low_nib; |
141 | 148 | ||
142 | outb(RdAddr | MAR, ioaddr + PAR_DATA); | 149 | outb(RdAddr | MAR, ioaddr + PAR_DATA); |
143 | inbyte(ioaddr + PAR_STATUS); | 150 | inbyte(ioaddr + PAR_STATUS); |
144 | low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; | 151 | low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; |
145 | outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); | 152 | outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); |
146 | inbyte(ioaddr + PAR_STATUS); | 153 | inbyte(ioaddr + PAR_STATUS); |
147 | return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); | 154 | return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); |
148 | } | 155 | } |
149 | 156 | ||
150 | static inline void | 157 | static inline void |
151 | write_reg(short port, unsigned char reg, unsigned char value) | 158 | write_reg(short port, unsigned char reg, unsigned char value) |
152 | { | 159 | { |
153 | unsigned char outval; | 160 | unsigned char outval; |
154 | outb(EOC | reg, port + PAR_DATA); | 161 | |
155 | outval = WrAddr | reg; | 162 | outb(EOC | reg, port + PAR_DATA); |
156 | outb(outval, port + PAR_DATA); | 163 | outval = WrAddr | reg; |
157 | outb(outval, port + PAR_DATA); /* Double write for PS/2. */ | 164 | outb(outval, port + PAR_DATA); |
158 | 165 | outb(outval, port + PAR_DATA); /* Double write for PS/2. */ | |
159 | outval &= 0xf0; | 166 | |
160 | outval |= value; | 167 | outval &= 0xf0; |
161 | outb(outval, port + PAR_DATA); | 168 | outval |= value; |
162 | outval &= 0x1f; | 169 | outb(outval, port + PAR_DATA); |
163 | outb(outval, port + PAR_DATA); | 170 | outval &= 0x1f; |
164 | outb(outval, port + PAR_DATA); | 171 | outb(outval, port + PAR_DATA); |
165 | 172 | outb(outval, port + PAR_DATA); | |
166 | outb(EOC | outval, port + PAR_DATA); | 173 | |
174 | outb(EOC | outval, port + PAR_DATA); | ||
167 | } | 175 | } |
168 | 176 | ||
169 | static inline void | 177 | static inline void |
170 | write_reg_high(short port, unsigned char reg, unsigned char value) | 178 | write_reg_high(short port, unsigned char reg, unsigned char value) |
171 | { | 179 | { |
172 | unsigned char outval = EOC | HNib | reg; | 180 | unsigned char outval = EOC | HNib | reg; |
173 | 181 | ||
174 | outb(outval, port + PAR_DATA); | 182 | outb(outval, port + PAR_DATA); |
175 | outval &= WrAddr | HNib | 0x0f; | 183 | outval &= WrAddr | HNib | 0x0f; |
176 | outb(outval, port + PAR_DATA); | 184 | outb(outval, port + PAR_DATA); |
177 | outb(outval, port + PAR_DATA); /* Double write for PS/2. */ | 185 | outb(outval, port + PAR_DATA); /* Double write for PS/2. */ |
178 | 186 | ||
179 | outval = WrAddr | HNib | value; | 187 | outval = WrAddr | HNib | value; |
180 | outb(outval, port + PAR_DATA); | 188 | outb(outval, port + PAR_DATA); |
181 | outval &= HNib | 0x0f; /* HNib | value */ | 189 | outval &= HNib | 0x0f; /* HNib | value */ |
182 | outb(outval, port + PAR_DATA); | 190 | outb(outval, port + PAR_DATA); |
183 | outb(outval, port + PAR_DATA); | 191 | outb(outval, port + PAR_DATA); |
184 | 192 | ||
185 | outb(EOC | HNib | outval, port + PAR_DATA); | 193 | outb(EOC | HNib | outval, port + PAR_DATA); |
186 | } | 194 | } |
187 | 195 | ||
188 | /* Write a byte out using nibble mode. The low nibble is written first. */ | 196 | /* Write a byte out using nibble mode. The low nibble is written first. */ |
189 | static inline void | 197 | static inline void |
190 | write_reg_byte(short port, unsigned char reg, unsigned char value) | 198 | write_reg_byte(short port, unsigned char reg, unsigned char value) |
191 | { | 199 | { |
192 | unsigned char outval; | 200 | unsigned char outval; |
193 | outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */ | 201 | |
194 | outval = WrAddr | reg; | 202 | outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */ |
195 | outb(outval, port + PAR_DATA); | 203 | outval = WrAddr | reg; |
196 | outb(outval, port + PAR_DATA); /* Double write for PS/2. */ | 204 | outb(outval, port + PAR_DATA); |
197 | 205 | outb(outval, port + PAR_DATA); /* Double write for PS/2. */ | |
198 | outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA); | 206 | |
199 | outb(value & 0x0f, port + PAR_DATA); | 207 | outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA); |
200 | value >>= 4; | 208 | outb(value & 0x0f, port + PAR_DATA); |
201 | outb(value, port + PAR_DATA); | 209 | value >>= 4; |
202 | outb(0x10 | value, port + PAR_DATA); | 210 | outb(value, port + PAR_DATA); |
203 | outb(0x10 | value, port + PAR_DATA); | 211 | outb(0x10 | value, port + PAR_DATA); |
204 | 212 | outb(0x10 | value, port + PAR_DATA); | |
205 | outb(EOC | value, port + PAR_DATA); /* Reset the address register. */ | 213 | |
214 | outb(EOC | value, port + PAR_DATA); /* Reset the address register. */ | ||
206 | } | 215 | } |
207 | 216 | ||
208 | /* | 217 | /* Bulk data writes to the packet buffer. The interrupt line remains enabled. |
209 | * Bulk data writes to the packet buffer. The interrupt line remains enabled. | ||
210 | * The first, faster method uses only the dataport (data modes 0, 2 & 4). | 218 | * The first, faster method uses only the dataport (data modes 0, 2 & 4). |
211 | * The second (backup) method uses data and control regs (modes 1, 3 & 5). | 219 | * The second (backup) method uses data and control regs (modes 1, 3 & 5). |
212 | * It should only be needed when there is skew between the individual data | 220 | * It should only be needed when there is skew between the individual data |
@@ -214,28 +222,28 @@ write_reg_byte(short port, unsigned char reg, unsigned char value) | |||
214 | */ | 222 | */ |
215 | static inline void write_byte_mode0(short ioaddr, unsigned char value) | 223 | static inline void write_byte_mode0(short ioaddr, unsigned char value) |
216 | { | 224 | { |
217 | outb(value & 0x0f, ioaddr + PAR_DATA); | 225 | outb(value & 0x0f, ioaddr + PAR_DATA); |
218 | outb((value>>4) | 0x10, ioaddr + PAR_DATA); | 226 | outb((value>>4) | 0x10, ioaddr + PAR_DATA); |
219 | } | 227 | } |
220 | 228 | ||
221 | static inline void write_byte_mode1(short ioaddr, unsigned char value) | 229 | static inline void write_byte_mode1(short ioaddr, unsigned char value) |
222 | { | 230 | { |
223 | outb(value & 0x0f, ioaddr + PAR_DATA); | 231 | outb(value & 0x0f, ioaddr + PAR_DATA); |
224 | outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL); | 232 | outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL); |
225 | outb((value>>4) | 0x10, ioaddr + PAR_DATA); | 233 | outb((value>>4) | 0x10, ioaddr + PAR_DATA); |
226 | outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL); | 234 | outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL); |
227 | } | 235 | } |
228 | 236 | ||
229 | /* Write 16bit VALUE to the packet buffer: the same as above just doubled. */ | 237 | /* Write 16bit VALUE to the packet buffer: the same as above just doubled. */ |
230 | static inline void write_word_mode0(short ioaddr, unsigned short value) | 238 | static inline void write_word_mode0(short ioaddr, unsigned short value) |
231 | { | 239 | { |
232 | outb(value & 0x0f, ioaddr + PAR_DATA); | 240 | outb(value & 0x0f, ioaddr + PAR_DATA); |
233 | value >>= 4; | 241 | value >>= 4; |
234 | outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); | 242 | outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); |
235 | value >>= 4; | 243 | value >>= 4; |
236 | outb(value & 0x0f, ioaddr + PAR_DATA); | 244 | outb(value & 0x0f, ioaddr + PAR_DATA); |
237 | value >>= 4; | 245 | value >>= 4; |
238 | outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); | 246 | outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); |
239 | } | 247 | } |
240 | 248 | ||
241 | /* EEPROM_Ctrl bits. */ | 249 | /* EEPROM_Ctrl bits. */ |
@@ -248,10 +256,10 @@ static inline void write_word_mode0(short ioaddr, unsigned short value) | |||
248 | 256 | ||
249 | /* Delay between EEPROM clock transitions. */ | 257 | /* Delay between EEPROM clock transitions. */ |
250 | #define eeprom_delay(ticks) \ | 258 | #define eeprom_delay(ticks) \ |
251 | do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0) | 259 | do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0) |
252 | 260 | ||
253 | /* The EEPROM commands include the alway-set leading bit. */ | 261 | /* The EEPROM commands include the alway-set leading bit. */ |
254 | #define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17) | 262 | #define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17) |
255 | #define EE_READ(offset) (((6 << 6) + (offset)) << 17) | 263 | #define EE_READ(offset) (((6 << 6) + (offset)) << 17) |
256 | #define EE_ERASE(offset) (((7 << 6) + (offset)) << 17) | 264 | #define EE_ERASE(offset) (((7 << 6) + (offset)) << 17) |
257 | #define EE_CMD_SIZE 27 /* The command+address+data size. */ | 265 | #define EE_CMD_SIZE 27 /* The command+address+data size. */ |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 60e9c2cd051e..dbe860650bfe 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -2769,10 +2769,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
2769 | 2769 | ||
2770 | /* get base addr */ | 2770 | /* get base addr */ |
2771 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2771 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2772 | if (unlikely(res == NULL)) { | ||
2773 | dev_err(&pdev->dev, "invalid resource\n"); | ||
2774 | return -EINVAL; | ||
2775 | } | ||
2776 | 2772 | ||
2777 | ndev = alloc_etherdev(sizeof(struct sh_eth_private)); | 2773 | ndev = alloc_etherdev(sizeof(struct sh_eth_private)); |
2778 | if (!ndev) | 2774 | if (!ndev) |
@@ -2781,8 +2777,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
2781 | pm_runtime_enable(&pdev->dev); | 2777 | pm_runtime_enable(&pdev->dev); |
2782 | pm_runtime_get_sync(&pdev->dev); | 2778 | pm_runtime_get_sync(&pdev->dev); |
2783 | 2779 | ||
2784 | /* The sh Ether-specific entries in the device structure. */ | ||
2785 | ndev->base_addr = res->start; | ||
2786 | devno = pdev->id; | 2780 | devno = pdev->id; |
2787 | if (devno < 0) | 2781 | if (devno < 0) |
2788 | devno = 0; | 2782 | devno = 0; |
@@ -2806,6 +2800,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
2806 | goto out_release; | 2800 | goto out_release; |
2807 | } | 2801 | } |
2808 | 2802 | ||
2803 | ndev->base_addr = res->start; | ||
2804 | |||
2809 | spin_lock_init(&mdp->lock); | 2805 | spin_lock_init(&mdp->lock); |
2810 | mdp->pdev = pdev; | 2806 | mdp->pdev = pdev; |
2811 | 2807 | ||
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index b147d469a799..7fd6e275d1c2 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c | |||
@@ -90,9 +90,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) | |||
90 | 90 | ||
91 | /* Get memory resource */ | 91 | /* Get memory resource */ |
92 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 92 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
93 | if (!res) | ||
94 | goto err_out; | ||
95 | |||
96 | addr = devm_ioremap_resource(dev, res); | 93 | addr = devm_ioremap_resource(dev, res); |
97 | if (IS_ERR(addr)) | 94 | if (IS_ERR(addr)) |
98 | return PTR_ERR(addr); | 95 | return PTR_ERR(addr); |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 3652afd3ec78..e7bb63b2d525 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -40,6 +40,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver"); | |||
40 | MODULE_LICENSE("GPL"); | 40 | MODULE_LICENSE("GPL"); |
41 | MODULE_VERSION(DRV_MODULE_VERSION); | 41 | MODULE_VERSION(DRV_MODULE_VERSION); |
42 | 42 | ||
43 | #define VNET_MAX_TXQS 16 | ||
44 | |||
43 | /* Heuristic for the number of times to exponentially backoff and | 45 | /* Heuristic for the number of times to exponentially backoff and |
44 | * retry sending an LDC trigger when EAGAIN is encountered | 46 | * retry sending an LDC trigger when EAGAIN is encountered |
45 | */ | 47 | */ |
@@ -311,9 +313,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len, | |||
311 | 313 | ||
312 | dev->stats.rx_packets++; | 314 | dev->stats.rx_packets++; |
313 | dev->stats.rx_bytes += len; | 315 | dev->stats.rx_bytes += len; |
314 | 316 | napi_gro_receive(&port->napi, skb); | |
315 | netif_rx(skb); | ||
316 | |||
317 | return 0; | 317 | return 0; |
318 | 318 | ||
319 | out_free_skb: | 319 | out_free_skb: |
@@ -430,6 +430,7 @@ static int vnet_walk_rx_one(struct vnet_port *port, | |||
430 | struct vio_driver_state *vio = &port->vio; | 430 | struct vio_driver_state *vio = &port->vio; |
431 | int err; | 431 | int err; |
432 | 432 | ||
433 | BUG_ON(desc == NULL); | ||
433 | if (IS_ERR(desc)) | 434 | if (IS_ERR(desc)) |
434 | return PTR_ERR(desc); | 435 | return PTR_ERR(desc); |
435 | 436 | ||
@@ -456,10 +457,11 @@ static int vnet_walk_rx_one(struct vnet_port *port, | |||
456 | } | 457 | } |
457 | 458 | ||
458 | static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, | 459 | static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, |
459 | u32 start, u32 end) | 460 | u32 start, u32 end, int *npkts, int budget) |
460 | { | 461 | { |
461 | struct vio_driver_state *vio = &port->vio; | 462 | struct vio_driver_state *vio = &port->vio; |
462 | int ack_start = -1, ack_end = -1; | 463 | int ack_start = -1, ack_end = -1; |
464 | bool send_ack = true; | ||
463 | 465 | ||
464 | end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); | 466 | end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); |
465 | 467 | ||
@@ -471,6 +473,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, | |||
471 | return err; | 473 | return err; |
472 | if (err != 0) | 474 | if (err != 0) |
473 | break; | 475 | break; |
476 | (*npkts)++; | ||
474 | if (ack_start == -1) | 477 | if (ack_start == -1) |
475 | ack_start = start; | 478 | ack_start = start; |
476 | ack_end = start; | 479 | ack_end = start; |
@@ -482,13 +485,26 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, | |||
482 | return err; | 485 | return err; |
483 | ack_start = -1; | 486 | ack_start = -1; |
484 | } | 487 | } |
488 | if ((*npkts) >= budget) { | ||
489 | send_ack = false; | ||
490 | break; | ||
491 | } | ||
485 | } | 492 | } |
486 | if (unlikely(ack_start == -1)) | 493 | if (unlikely(ack_start == -1)) |
487 | ack_start = ack_end = prev_idx(start, dr); | 494 | ack_start = ack_end = prev_idx(start, dr); |
488 | return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); | 495 | if (send_ack) { |
496 | port->napi_resume = false; | ||
497 | return vnet_send_ack(port, dr, ack_start, ack_end, | ||
498 | VIO_DRING_STOPPED); | ||
499 | } else { | ||
500 | port->napi_resume = true; | ||
501 | port->napi_stop_idx = ack_end; | ||
502 | return 1; | ||
503 | } | ||
489 | } | 504 | } |
490 | 505 | ||
491 | static int vnet_rx(struct vnet_port *port, void *msgbuf) | 506 | static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, |
507 | int budget) | ||
492 | { | 508 | { |
493 | struct vio_dring_data *pkt = msgbuf; | 509 | struct vio_dring_data *pkt = msgbuf; |
494 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; | 510 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; |
@@ -505,11 +521,13 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf) | |||
505 | return 0; | 521 | return 0; |
506 | } | 522 | } |
507 | 523 | ||
508 | dr->rcv_nxt++; | 524 | if (!port->napi_resume) |
525 | dr->rcv_nxt++; | ||
509 | 526 | ||
510 | /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ | 527 | /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ |
511 | 528 | ||
512 | return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx); | 529 | return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, |
530 | npkts, budget); | ||
513 | } | 531 | } |
514 | 532 | ||
515 | static int idx_is_pending(struct vio_dring_state *dr, u32 end) | 533 | static int idx_is_pending(struct vio_dring_state *dr, u32 end) |
@@ -535,6 +553,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) | |||
535 | struct vnet *vp; | 553 | struct vnet *vp; |
536 | u32 end; | 554 | u32 end; |
537 | struct vio_net_desc *desc; | 555 | struct vio_net_desc *desc; |
556 | struct netdev_queue *txq; | ||
557 | |||
538 | if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) | 558 | if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) |
539 | return 0; | 559 | return 0; |
540 | 560 | ||
@@ -542,9 +562,12 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) | |||
542 | if (unlikely(!idx_is_pending(dr, end))) | 562 | if (unlikely(!idx_is_pending(dr, end))) |
543 | return 0; | 563 | return 0; |
544 | 564 | ||
565 | vp = port->vp; | ||
566 | dev = vp->dev; | ||
545 | /* sync for race conditions with vnet_start_xmit() and tell xmit it | 567 | /* sync for race conditions with vnet_start_xmit() and tell xmit it |
546 | * is time to send a trigger. | 568 | * is time to send a trigger. |
547 | */ | 569 | */ |
570 | netif_tx_lock(dev); | ||
548 | dr->cons = next_idx(end, dr); | 571 | dr->cons = next_idx(end, dr); |
549 | desc = vio_dring_entry(dr, dr->cons); | 572 | desc = vio_dring_entry(dr, dr->cons); |
550 | if (desc->hdr.state == VIO_DESC_READY && port->start_cons) { | 573 | if (desc->hdr.state == VIO_DESC_READY && port->start_cons) { |
@@ -559,11 +582,10 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) | |||
559 | } else { | 582 | } else { |
560 | port->start_cons = true; | 583 | port->start_cons = true; |
561 | } | 584 | } |
585 | netif_tx_unlock(dev); | ||
562 | 586 | ||
563 | 587 | txq = netdev_get_tx_queue(dev, port->q_index); | |
564 | vp = port->vp; | 588 | if (unlikely(netif_tx_queue_stopped(txq) && |
565 | dev = vp->dev; | ||
566 | if (unlikely(netif_queue_stopped(dev) && | ||
567 | vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) | 589 | vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) |
568 | return 1; | 590 | return 1; |
569 | 591 | ||
@@ -591,58 +613,62 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf) | |||
591 | return 0; | 613 | return 0; |
592 | } | 614 | } |
593 | 615 | ||
594 | static void maybe_tx_wakeup(unsigned long param) | 616 | /* Got back a STOPPED LDC message on port. If the queue is stopped, |
617 | * wake it up so that we'll send out another START message at the | ||
618 | * next TX. | ||
619 | */ | ||
620 | static void maybe_tx_wakeup(struct vnet_port *port) | ||
595 | { | 621 | { |
596 | struct vnet *vp = (struct vnet *)param; | 622 | struct netdev_queue *txq; |
597 | struct net_device *dev = vp->dev; | ||
598 | 623 | ||
599 | netif_tx_lock(dev); | 624 | txq = netdev_get_tx_queue(port->vp->dev, port->q_index); |
600 | if (likely(netif_queue_stopped(dev))) { | 625 | __netif_tx_lock(txq, smp_processor_id()); |
601 | struct vnet_port *port; | 626 | if (likely(netif_tx_queue_stopped(txq))) { |
602 | int wake = 1; | 627 | struct vio_dring_state *dr; |
603 | |||
604 | list_for_each_entry(port, &vp->port_list, list) { | ||
605 | struct vio_dring_state *dr; | ||
606 | 628 | ||
607 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 629 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; |
608 | if (vnet_tx_dring_avail(dr) < | 630 | netif_tx_wake_queue(txq); |
609 | VNET_TX_WAKEUP_THRESH(dr)) { | ||
610 | wake = 0; | ||
611 | break; | ||
612 | } | ||
613 | } | ||
614 | if (wake) | ||
615 | netif_wake_queue(dev); | ||
616 | } | 631 | } |
617 | netif_tx_unlock(dev); | 632 | __netif_tx_unlock(txq); |
618 | } | 633 | } |
619 | 634 | ||
620 | static void vnet_event(void *arg, int event) | 635 | static inline bool port_is_up(struct vnet_port *vnet) |
636 | { | ||
637 | struct vio_driver_state *vio = &vnet->vio; | ||
638 | |||
639 | return !!(vio->hs_state & VIO_HS_COMPLETE); | ||
640 | } | ||
641 | |||
642 | static int vnet_event_napi(struct vnet_port *port, int budget) | ||
621 | { | 643 | { |
622 | struct vnet_port *port = arg; | ||
623 | struct vio_driver_state *vio = &port->vio; | 644 | struct vio_driver_state *vio = &port->vio; |
624 | unsigned long flags; | ||
625 | int tx_wakeup, err; | 645 | int tx_wakeup, err; |
646 | int npkts = 0; | ||
647 | int event = (port->rx_event & LDC_EVENT_RESET); | ||
626 | 648 | ||
627 | spin_lock_irqsave(&vio->lock, flags); | 649 | ldc_ctrl: |
628 | |||
629 | if (unlikely(event == LDC_EVENT_RESET || | 650 | if (unlikely(event == LDC_EVENT_RESET || |
630 | event == LDC_EVENT_UP)) { | 651 | event == LDC_EVENT_UP)) { |
631 | vio_link_state_change(vio, event); | 652 | vio_link_state_change(vio, event); |
632 | spin_unlock_irqrestore(&vio->lock, flags); | ||
633 | 653 | ||
634 | if (event == LDC_EVENT_RESET) { | 654 | if (event == LDC_EVENT_RESET) { |
635 | port->rmtu = 0; | 655 | port->rmtu = 0; |
636 | vio_port_up(vio); | 656 | vio_port_up(vio); |
637 | } | 657 | } |
638 | return; | 658 | port->rx_event = 0; |
659 | return 0; | ||
639 | } | 660 | } |
661 | /* We may have multiple LDC events in rx_event. Unroll send_events() */ | ||
662 | event = (port->rx_event & LDC_EVENT_UP); | ||
663 | port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); | ||
664 | if (event == LDC_EVENT_UP) | ||
665 | goto ldc_ctrl; | ||
666 | event = port->rx_event; | ||
667 | if (!(event & LDC_EVENT_DATA_READY)) | ||
668 | return 0; | ||
640 | 669 | ||
641 | if (unlikely(event != LDC_EVENT_DATA_READY)) { | 670 | /* we dont expect any other bits than RESET, UP, DATA_READY */ |
642 | pr_warn("Unexpected LDC event %d\n", event); | 671 | BUG_ON(event != LDC_EVENT_DATA_READY); |
643 | spin_unlock_irqrestore(&vio->lock, flags); | ||
644 | return; | ||
645 | } | ||
646 | 672 | ||
647 | tx_wakeup = err = 0; | 673 | tx_wakeup = err = 0; |
648 | while (1) { | 674 | while (1) { |
@@ -651,6 +677,21 @@ static void vnet_event(void *arg, int event) | |||
651 | u64 raw[8]; | 677 | u64 raw[8]; |
652 | } msgbuf; | 678 | } msgbuf; |
653 | 679 | ||
680 | if (port->napi_resume) { | ||
681 | struct vio_dring_data *pkt = | ||
682 | (struct vio_dring_data *)&msgbuf; | ||
683 | struct vio_dring_state *dr = | ||
684 | &port->vio.drings[VIO_DRIVER_RX_RING]; | ||
685 | |||
686 | pkt->tag.type = VIO_TYPE_DATA; | ||
687 | pkt->tag.stype = VIO_SUBTYPE_INFO; | ||
688 | pkt->tag.stype_env = VIO_DRING_DATA; | ||
689 | pkt->seq = dr->rcv_nxt; | ||
690 | pkt->start_idx = next_idx(port->napi_stop_idx, dr); | ||
691 | pkt->end_idx = -1; | ||
692 | goto napi_resume; | ||
693 | } | ||
694 | ldc_read: | ||
654 | err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); | 695 | err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); |
655 | if (unlikely(err < 0)) { | 696 | if (unlikely(err < 0)) { |
656 | if (err == -ECONNRESET) | 697 | if (err == -ECONNRESET) |
@@ -667,10 +708,22 @@ static void vnet_event(void *arg, int event) | |||
667 | err = vio_validate_sid(vio, &msgbuf.tag); | 708 | err = vio_validate_sid(vio, &msgbuf.tag); |
668 | if (err < 0) | 709 | if (err < 0) |
669 | break; | 710 | break; |
670 | 711 | napi_resume: | |
671 | if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { | 712 | if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { |
672 | if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { | 713 | if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { |
673 | err = vnet_rx(port, &msgbuf); | 714 | if (!port_is_up(port)) { |
715 | /* failures like handshake_failure() | ||
716 | * may have cleaned up dring, but | ||
717 | * NAPI polling may bring us here. | ||
718 | */ | ||
719 | err = -ECONNRESET; | ||
720 | break; | ||
721 | } | ||
722 | err = vnet_rx(port, &msgbuf, &npkts, budget); | ||
723 | if (npkts >= budget) | ||
724 | break; | ||
725 | if (npkts == 0 && err != -ECONNRESET) | ||
726 | goto ldc_read; | ||
674 | } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { | 727 | } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { |
675 | err = vnet_ack(port, &msgbuf); | 728 | err = vnet_ack(port, &msgbuf); |
676 | if (err > 0) | 729 | if (err > 0) |
@@ -691,15 +744,34 @@ static void vnet_event(void *arg, int event) | |||
691 | if (err == -ECONNRESET) | 744 | if (err == -ECONNRESET) |
692 | break; | 745 | break; |
693 | } | 746 | } |
694 | spin_unlock(&vio->lock); | ||
695 | /* Kick off a tasklet to wake the queue. We cannot call | ||
696 | * maybe_tx_wakeup directly here because we could deadlock on | ||
697 | * netif_tx_lock() with dev_watchdog() | ||
698 | */ | ||
699 | if (unlikely(tx_wakeup && err != -ECONNRESET)) | 747 | if (unlikely(tx_wakeup && err != -ECONNRESET)) |
700 | tasklet_schedule(&port->vp->vnet_tx_wakeup); | 748 | maybe_tx_wakeup(port); |
749 | return npkts; | ||
750 | } | ||
751 | |||
752 | static int vnet_poll(struct napi_struct *napi, int budget) | ||
753 | { | ||
754 | struct vnet_port *port = container_of(napi, struct vnet_port, napi); | ||
755 | struct vio_driver_state *vio = &port->vio; | ||
756 | int processed = vnet_event_napi(port, budget); | ||
757 | |||
758 | if (processed < budget) { | ||
759 | napi_complete(napi); | ||
760 | port->rx_event &= ~LDC_EVENT_DATA_READY; | ||
761 | vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); | ||
762 | } | ||
763 | return processed; | ||
764 | } | ||
765 | |||
766 | static void vnet_event(void *arg, int event) | ||
767 | { | ||
768 | struct vnet_port *port = arg; | ||
769 | struct vio_driver_state *vio = &port->vio; | ||
770 | |||
771 | port->rx_event |= event; | ||
772 | vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); | ||
773 | napi_schedule(&port->napi); | ||
701 | 774 | ||
702 | local_irq_restore(flags); | ||
703 | } | 775 | } |
704 | 776 | ||
705 | static int __vnet_tx_trigger(struct vnet_port *port, u32 start) | 777 | static int __vnet_tx_trigger(struct vnet_port *port, u32 start) |
@@ -746,26 +818,19 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) | |||
746 | return err; | 818 | return err; |
747 | } | 819 | } |
748 | 820 | ||
749 | static inline bool port_is_up(struct vnet_port *vnet) | ||
750 | { | ||
751 | struct vio_driver_state *vio = &vnet->vio; | ||
752 | |||
753 | return !!(vio->hs_state & VIO_HS_COMPLETE); | ||
754 | } | ||
755 | |||
756 | struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) | 821 | struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) |
757 | { | 822 | { |
758 | unsigned int hash = vnet_hashfn(skb->data); | 823 | unsigned int hash = vnet_hashfn(skb->data); |
759 | struct hlist_head *hp = &vp->port_hash[hash]; | 824 | struct hlist_head *hp = &vp->port_hash[hash]; |
760 | struct vnet_port *port; | 825 | struct vnet_port *port; |
761 | 826 | ||
762 | hlist_for_each_entry(port, hp, hash) { | 827 | hlist_for_each_entry_rcu(port, hp, hash) { |
763 | if (!port_is_up(port)) | 828 | if (!port_is_up(port)) |
764 | continue; | 829 | continue; |
765 | if (ether_addr_equal(port->raddr, skb->data)) | 830 | if (ether_addr_equal(port->raddr, skb->data)) |
766 | return port; | 831 | return port; |
767 | } | 832 | } |
768 | list_for_each_entry(port, &vp->port_list, list) { | 833 | list_for_each_entry_rcu(port, &vp->port_list, list) { |
769 | if (!port->switch_port) | 834 | if (!port->switch_port) |
770 | continue; | 835 | continue; |
771 | if (!port_is_up(port)) | 836 | if (!port_is_up(port)) |
@@ -775,18 +840,6 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) | |||
775 | return NULL; | 840 | return NULL; |
776 | } | 841 | } |
777 | 842 | ||
778 | struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) | ||
779 | { | ||
780 | struct vnet_port *ret; | ||
781 | unsigned long flags; | ||
782 | |||
783 | spin_lock_irqsave(&vp->lock, flags); | ||
784 | ret = __tx_port_find(vp, skb); | ||
785 | spin_unlock_irqrestore(&vp->lock, flags); | ||
786 | |||
787 | return ret; | ||
788 | } | ||
789 | |||
790 | static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, | 843 | static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, |
791 | unsigned *pending) | 844 | unsigned *pending) |
792 | { | 845 | { |
@@ -847,11 +900,10 @@ static void vnet_clean_timer_expire(unsigned long port0) | |||
847 | struct vnet_port *port = (struct vnet_port *)port0; | 900 | struct vnet_port *port = (struct vnet_port *)port0; |
848 | struct sk_buff *freeskbs; | 901 | struct sk_buff *freeskbs; |
849 | unsigned pending; | 902 | unsigned pending; |
850 | unsigned long flags; | ||
851 | 903 | ||
852 | spin_lock_irqsave(&port->vio.lock, flags); | 904 | netif_tx_lock(port->vp->dev); |
853 | freeskbs = vnet_clean_tx_ring(port, &pending); | 905 | freeskbs = vnet_clean_tx_ring(port, &pending); |
854 | spin_unlock_irqrestore(&port->vio.lock, flags); | 906 | netif_tx_unlock(port->vp->dev); |
855 | 907 | ||
856 | vnet_free_skbs(freeskbs); | 908 | vnet_free_skbs(freeskbs); |
857 | 909 | ||
@@ -898,28 +950,39 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart, | |||
898 | return skb; | 950 | return skb; |
899 | } | 951 | } |
900 | 952 | ||
953 | static u16 | ||
954 | vnet_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
955 | void *accel_priv, select_queue_fallback_t fallback) | ||
956 | { | ||
957 | struct vnet *vp = netdev_priv(dev); | ||
958 | struct vnet_port *port = __tx_port_find(vp, skb); | ||
959 | |||
960 | return port->q_index; | ||
961 | } | ||
962 | |||
901 | static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 963 | static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
902 | { | 964 | { |
903 | struct vnet *vp = netdev_priv(dev); | 965 | struct vnet *vp = netdev_priv(dev); |
904 | struct vnet_port *port = tx_port_find(vp, skb); | 966 | struct vnet_port *port = NULL; |
905 | struct vio_dring_state *dr; | 967 | struct vio_dring_state *dr; |
906 | struct vio_net_desc *d; | 968 | struct vio_net_desc *d; |
907 | unsigned long flags; | ||
908 | unsigned int len; | 969 | unsigned int len; |
909 | struct sk_buff *freeskbs = NULL; | 970 | struct sk_buff *freeskbs = NULL; |
910 | int i, err, txi; | 971 | int i, err, txi; |
911 | void *start = NULL; | 972 | void *start = NULL; |
912 | int nlen = 0; | 973 | int nlen = 0; |
913 | unsigned pending = 0; | 974 | unsigned pending = 0; |
914 | 975 | struct netdev_queue *txq; | |
915 | if (unlikely(!port)) | ||
916 | goto out_dropped; | ||
917 | 976 | ||
918 | skb = vnet_skb_shape(skb, &start, &nlen); | 977 | skb = vnet_skb_shape(skb, &start, &nlen); |
919 | |||
920 | if (unlikely(!skb)) | 978 | if (unlikely(!skb)) |
921 | goto out_dropped; | 979 | goto out_dropped; |
922 | 980 | ||
981 | rcu_read_lock(); | ||
982 | port = __tx_port_find(vp, skb); | ||
983 | if (unlikely(!port)) | ||
984 | goto out_dropped; | ||
985 | |||
923 | if (skb->len > port->rmtu) { | 986 | if (skb->len > port->rmtu) { |
924 | unsigned long localmtu = port->rmtu - ETH_HLEN; | 987 | unsigned long localmtu = port->rmtu - ETH_HLEN; |
925 | 988 | ||
@@ -937,6 +1000,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
937 | fl4.saddr = ip_hdr(skb)->saddr; | 1000 | fl4.saddr = ip_hdr(skb)->saddr; |
938 | 1001 | ||
939 | rt = ip_route_output_key(dev_net(dev), &fl4); | 1002 | rt = ip_route_output_key(dev_net(dev), &fl4); |
1003 | rcu_read_unlock(); | ||
940 | if (!IS_ERR(rt)) { | 1004 | if (!IS_ERR(rt)) { |
941 | skb_dst_set(skb, &rt->dst); | 1005 | skb_dst_set(skb, &rt->dst); |
942 | icmp_send(skb, ICMP_DEST_UNREACH, | 1006 | icmp_send(skb, ICMP_DEST_UNREACH, |
@@ -951,18 +1015,18 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
951 | goto out_dropped; | 1015 | goto out_dropped; |
952 | } | 1016 | } |
953 | 1017 | ||
954 | spin_lock_irqsave(&port->vio.lock, flags); | ||
955 | |||
956 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 1018 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; |
1019 | i = skb_get_queue_mapping(skb); | ||
1020 | txq = netdev_get_tx_queue(dev, i); | ||
957 | if (unlikely(vnet_tx_dring_avail(dr) < 1)) { | 1021 | if (unlikely(vnet_tx_dring_avail(dr) < 1)) { |
958 | if (!netif_queue_stopped(dev)) { | 1022 | if (!netif_tx_queue_stopped(txq)) { |
959 | netif_stop_queue(dev); | 1023 | netif_tx_stop_queue(txq); |
960 | 1024 | ||
961 | /* This is a hard error, log it. */ | 1025 | /* This is a hard error, log it. */ |
962 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); | 1026 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
963 | dev->stats.tx_errors++; | 1027 | dev->stats.tx_errors++; |
964 | } | 1028 | } |
965 | spin_unlock_irqrestore(&port->vio.lock, flags); | 1029 | rcu_read_unlock(); |
966 | return NETDEV_TX_BUSY; | 1030 | return NETDEV_TX_BUSY; |
967 | } | 1031 | } |
968 | 1032 | ||
@@ -986,7 +1050,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
986 | (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); | 1050 | (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); |
987 | if (err < 0) { | 1051 | if (err < 0) { |
988 | netdev_info(dev, "tx buffer map error %d\n", err); | 1052 | netdev_info(dev, "tx buffer map error %d\n", err); |
989 | goto out_dropped_unlock; | 1053 | goto out_dropped; |
990 | } | 1054 | } |
991 | port->tx_bufs[txi].ncookies = err; | 1055 | port->tx_bufs[txi].ncookies = err; |
992 | 1056 | ||
@@ -1039,7 +1103,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1039 | netdev_info(dev, "TX trigger error %d\n", err); | 1103 | netdev_info(dev, "TX trigger error %d\n", err); |
1040 | d->hdr.state = VIO_DESC_FREE; | 1104 | d->hdr.state = VIO_DESC_FREE; |
1041 | dev->stats.tx_carrier_errors++; | 1105 | dev->stats.tx_carrier_errors++; |
1042 | goto out_dropped_unlock; | 1106 | goto out_dropped; |
1043 | } | 1107 | } |
1044 | 1108 | ||
1045 | ldc_start_done: | 1109 | ldc_start_done: |
@@ -1050,31 +1114,29 @@ ldc_start_done: | |||
1050 | 1114 | ||
1051 | dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); | 1115 | dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); |
1052 | if (unlikely(vnet_tx_dring_avail(dr) < 1)) { | 1116 | if (unlikely(vnet_tx_dring_avail(dr) < 1)) { |
1053 | netif_stop_queue(dev); | 1117 | netif_tx_stop_queue(txq); |
1054 | if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) | 1118 | if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) |
1055 | netif_wake_queue(dev); | 1119 | netif_tx_wake_queue(txq); |
1056 | } | 1120 | } |
1057 | 1121 | ||
1058 | spin_unlock_irqrestore(&port->vio.lock, flags); | 1122 | (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); |
1123 | rcu_read_unlock(); | ||
1059 | 1124 | ||
1060 | vnet_free_skbs(freeskbs); | 1125 | vnet_free_skbs(freeskbs); |
1061 | 1126 | ||
1062 | (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); | ||
1063 | |||
1064 | return NETDEV_TX_OK; | 1127 | return NETDEV_TX_OK; |
1065 | 1128 | ||
1066 | out_dropped_unlock: | ||
1067 | spin_unlock_irqrestore(&port->vio.lock, flags); | ||
1068 | |||
1069 | out_dropped: | 1129 | out_dropped: |
1070 | if (skb) | ||
1071 | dev_kfree_skb(skb); | ||
1072 | vnet_free_skbs(freeskbs); | ||
1073 | if (pending) | 1130 | if (pending) |
1074 | (void)mod_timer(&port->clean_timer, | 1131 | (void)mod_timer(&port->clean_timer, |
1075 | jiffies + VNET_CLEAN_TIMEOUT); | 1132 | jiffies + VNET_CLEAN_TIMEOUT); |
1076 | else if (port) | 1133 | else if (port) |
1077 | del_timer(&port->clean_timer); | 1134 | del_timer(&port->clean_timer); |
1135 | if (port) | ||
1136 | rcu_read_unlock(); | ||
1137 | if (skb) | ||
1138 | dev_kfree_skb(skb); | ||
1139 | vnet_free_skbs(freeskbs); | ||
1078 | dev->stats.tx_dropped++; | 1140 | dev->stats.tx_dropped++; |
1079 | return NETDEV_TX_OK; | 1141 | return NETDEV_TX_OK; |
1080 | } | 1142 | } |
@@ -1087,14 +1149,14 @@ static void vnet_tx_timeout(struct net_device *dev) | |||
1087 | static int vnet_open(struct net_device *dev) | 1149 | static int vnet_open(struct net_device *dev) |
1088 | { | 1150 | { |
1089 | netif_carrier_on(dev); | 1151 | netif_carrier_on(dev); |
1090 | netif_start_queue(dev); | 1152 | netif_tx_start_all_queues(dev); |
1091 | 1153 | ||
1092 | return 0; | 1154 | return 0; |
1093 | } | 1155 | } |
1094 | 1156 | ||
1095 | static int vnet_close(struct net_device *dev) | 1157 | static int vnet_close(struct net_device *dev) |
1096 | { | 1158 | { |
1097 | netif_stop_queue(dev); | 1159 | netif_tx_stop_all_queues(dev); |
1098 | netif_carrier_off(dev); | 1160 | netif_carrier_off(dev); |
1099 | 1161 | ||
1100 | return 0; | 1162 | return 0; |
@@ -1204,18 +1266,17 @@ static void vnet_set_rx_mode(struct net_device *dev) | |||
1204 | { | 1266 | { |
1205 | struct vnet *vp = netdev_priv(dev); | 1267 | struct vnet *vp = netdev_priv(dev); |
1206 | struct vnet_port *port; | 1268 | struct vnet_port *port; |
1207 | unsigned long flags; | ||
1208 | 1269 | ||
1209 | spin_lock_irqsave(&vp->lock, flags); | 1270 | rcu_read_lock(); |
1210 | if (!list_empty(&vp->port_list)) { | 1271 | list_for_each_entry_rcu(port, &vp->port_list, list) { |
1211 | port = list_entry(vp->port_list.next, struct vnet_port, list); | ||
1212 | 1272 | ||
1213 | if (port->switch_port) { | 1273 | if (port->switch_port) { |
1214 | __update_mc_list(vp, dev); | 1274 | __update_mc_list(vp, dev); |
1215 | __send_mc_list(vp, port); | 1275 | __send_mc_list(vp, port); |
1276 | break; | ||
1216 | } | 1277 | } |
1217 | } | 1278 | } |
1218 | spin_unlock_irqrestore(&vp->lock, flags); | 1279 | rcu_read_unlock(); |
1219 | } | 1280 | } |
1220 | 1281 | ||
1221 | static int vnet_change_mtu(struct net_device *dev, int new_mtu) | 1282 | static int vnet_change_mtu(struct net_device *dev, int new_mtu) |
@@ -1342,6 +1403,21 @@ err_out: | |||
1342 | return err; | 1403 | return err; |
1343 | } | 1404 | } |
1344 | 1405 | ||
1406 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1407 | static void vnet_poll_controller(struct net_device *dev) | ||
1408 | { | ||
1409 | struct vnet *vp = netdev_priv(dev); | ||
1410 | struct vnet_port *port; | ||
1411 | unsigned long flags; | ||
1412 | |||
1413 | spin_lock_irqsave(&vp->lock, flags); | ||
1414 | if (!list_empty(&vp->port_list)) { | ||
1415 | port = list_entry(vp->port_list.next, struct vnet_port, list); | ||
1416 | napi_schedule(&port->napi); | ||
1417 | } | ||
1418 | spin_unlock_irqrestore(&vp->lock, flags); | ||
1419 | } | ||
1420 | #endif | ||
1345 | static LIST_HEAD(vnet_list); | 1421 | static LIST_HEAD(vnet_list); |
1346 | static DEFINE_MUTEX(vnet_list_mutex); | 1422 | static DEFINE_MUTEX(vnet_list_mutex); |
1347 | 1423 | ||
@@ -1354,6 +1430,10 @@ static const struct net_device_ops vnet_ops = { | |||
1354 | .ndo_tx_timeout = vnet_tx_timeout, | 1430 | .ndo_tx_timeout = vnet_tx_timeout, |
1355 | .ndo_change_mtu = vnet_change_mtu, | 1431 | .ndo_change_mtu = vnet_change_mtu, |
1356 | .ndo_start_xmit = vnet_start_xmit, | 1432 | .ndo_start_xmit = vnet_start_xmit, |
1433 | .ndo_select_queue = vnet_select_queue, | ||
1434 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1435 | .ndo_poll_controller = vnet_poll_controller, | ||
1436 | #endif | ||
1357 | }; | 1437 | }; |
1358 | 1438 | ||
1359 | static struct vnet *vnet_new(const u64 *local_mac) | 1439 | static struct vnet *vnet_new(const u64 *local_mac) |
@@ -1362,7 +1442,7 @@ static struct vnet *vnet_new(const u64 *local_mac) | |||
1362 | struct vnet *vp; | 1442 | struct vnet *vp; |
1363 | int err, i; | 1443 | int err, i; |
1364 | 1444 | ||
1365 | dev = alloc_etherdev(sizeof(*vp)); | 1445 | dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1); |
1366 | if (!dev) | 1446 | if (!dev) |
1367 | return ERR_PTR(-ENOMEM); | 1447 | return ERR_PTR(-ENOMEM); |
1368 | dev->needed_headroom = VNET_PACKET_SKIP + 8; | 1448 | dev->needed_headroom = VNET_PACKET_SKIP + 8; |
@@ -1374,7 +1454,6 @@ static struct vnet *vnet_new(const u64 *local_mac) | |||
1374 | vp = netdev_priv(dev); | 1454 | vp = netdev_priv(dev); |
1375 | 1455 | ||
1376 | spin_lock_init(&vp->lock); | 1456 | spin_lock_init(&vp->lock); |
1377 | tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp); | ||
1378 | vp->dev = dev; | 1457 | vp->dev = dev; |
1379 | 1458 | ||
1380 | INIT_LIST_HEAD(&vp->port_list); | 1459 | INIT_LIST_HEAD(&vp->port_list); |
@@ -1434,7 +1513,6 @@ static void vnet_cleanup(void) | |||
1434 | vp = list_first_entry(&vnet_list, struct vnet, list); | 1513 | vp = list_first_entry(&vnet_list, struct vnet, list); |
1435 | list_del(&vp->list); | 1514 | list_del(&vp->list); |
1436 | dev = vp->dev; | 1515 | dev = vp->dev; |
1437 | tasklet_kill(&vp->vnet_tx_wakeup); | ||
1438 | /* vio_unregister_driver() should have cleaned up port_list */ | 1516 | /* vio_unregister_driver() should have cleaned up port_list */ |
1439 | BUG_ON(!list_empty(&vp->port_list)); | 1517 | BUG_ON(!list_empty(&vp->port_list)); |
1440 | unregister_netdev(dev); | 1518 | unregister_netdev(dev); |
@@ -1489,6 +1567,25 @@ static void print_version(void) | |||
1489 | 1567 | ||
1490 | const char *remote_macaddr_prop = "remote-mac-address"; | 1568 | const char *remote_macaddr_prop = "remote-mac-address"; |
1491 | 1569 | ||
1570 | static void | ||
1571 | vnet_port_add_txq(struct vnet_port *port) | ||
1572 | { | ||
1573 | struct vnet *vp = port->vp; | ||
1574 | int n; | ||
1575 | |||
1576 | n = vp->nports++; | ||
1577 | n = n & (VNET_MAX_TXQS - 1); | ||
1578 | port->q_index = n; | ||
1579 | netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); | ||
1580 | } | ||
1581 | |||
1582 | static void | ||
1583 | vnet_port_rm_txq(struct vnet_port *port) | ||
1584 | { | ||
1585 | port->vp->nports--; | ||
1586 | netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); | ||
1587 | } | ||
1588 | |||
1492 | static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | 1589 | static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) |
1493 | { | 1590 | { |
1494 | struct mdesc_handle *hp; | 1591 | struct mdesc_handle *hp; |
@@ -1536,6 +1633,8 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1536 | if (err) | 1633 | if (err) |
1537 | goto err_out_free_port; | 1634 | goto err_out_free_port; |
1538 | 1635 | ||
1636 | netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT); | ||
1637 | |||
1539 | err = vnet_port_alloc_tx_bufs(port); | 1638 | err = vnet_port_alloc_tx_bufs(port); |
1540 | if (err) | 1639 | if (err) |
1541 | goto err_out_free_ldc; | 1640 | goto err_out_free_ldc; |
@@ -1550,10 +1649,12 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1550 | 1649 | ||
1551 | spin_lock_irqsave(&vp->lock, flags); | 1650 | spin_lock_irqsave(&vp->lock, flags); |
1552 | if (switch_port) | 1651 | if (switch_port) |
1553 | list_add(&port->list, &vp->port_list); | 1652 | list_add_rcu(&port->list, &vp->port_list); |
1554 | else | 1653 | else |
1555 | list_add_tail(&port->list, &vp->port_list); | 1654 | list_add_tail_rcu(&port->list, &vp->port_list); |
1556 | hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); | 1655 | hlist_add_head_rcu(&port->hash, |
1656 | &vp->port_hash[vnet_hashfn(port->raddr)]); | ||
1657 | vnet_port_add_txq(port); | ||
1557 | spin_unlock_irqrestore(&vp->lock, flags); | 1658 | spin_unlock_irqrestore(&vp->lock, flags); |
1558 | 1659 | ||
1559 | dev_set_drvdata(&vdev->dev, port); | 1660 | dev_set_drvdata(&vdev->dev, port); |
@@ -1564,6 +1665,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1564 | setup_timer(&port->clean_timer, vnet_clean_timer_expire, | 1665 | setup_timer(&port->clean_timer, vnet_clean_timer_expire, |
1565 | (unsigned long)port); | 1666 | (unsigned long)port); |
1566 | 1667 | ||
1668 | napi_enable(&port->napi); | ||
1567 | vio_port_up(&port->vio); | 1669 | vio_port_up(&port->vio); |
1568 | 1670 | ||
1569 | mdesc_release(hp); | 1671 | mdesc_release(hp); |
@@ -1571,6 +1673,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1571 | return 0; | 1673 | return 0; |
1572 | 1674 | ||
1573 | err_out_free_ldc: | 1675 | err_out_free_ldc: |
1676 | netif_napi_del(&port->napi); | ||
1574 | vio_ldc_free(&port->vio); | 1677 | vio_ldc_free(&port->vio); |
1575 | 1678 | ||
1576 | err_out_free_port: | 1679 | err_out_free_port: |
@@ -1586,17 +1689,18 @@ static int vnet_port_remove(struct vio_dev *vdev) | |||
1586 | struct vnet_port *port = dev_get_drvdata(&vdev->dev); | 1689 | struct vnet_port *port = dev_get_drvdata(&vdev->dev); |
1587 | 1690 | ||
1588 | if (port) { | 1691 | if (port) { |
1589 | struct vnet *vp = port->vp; | ||
1590 | unsigned long flags; | ||
1591 | 1692 | ||
1592 | del_timer_sync(&port->vio.timer); | 1693 | del_timer_sync(&port->vio.timer); |
1593 | del_timer_sync(&port->clean_timer); | ||
1594 | 1694 | ||
1595 | spin_lock_irqsave(&vp->lock, flags); | 1695 | napi_disable(&port->napi); |
1596 | list_del(&port->list); | 1696 | |
1597 | hlist_del(&port->hash); | 1697 | list_del_rcu(&port->list); |
1598 | spin_unlock_irqrestore(&vp->lock, flags); | 1698 | hlist_del_rcu(&port->hash); |
1599 | 1699 | ||
1700 | synchronize_rcu(); | ||
1701 | del_timer_sync(&port->clean_timer); | ||
1702 | vnet_port_rm_txq(port); | ||
1703 | netif_napi_del(&port->napi); | ||
1600 | vnet_port_free_tx_bufs(port); | 1704 | vnet_port_free_tx_bufs(port); |
1601 | vio_ldc_free(&port->vio); | 1705 | vio_ldc_free(&port->vio); |
1602 | 1706 | ||
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h index c91104542619..cd5d343ea232 100644 --- a/drivers/net/ethernet/sun/sunvnet.h +++ b/drivers/net/ethernet/sun/sunvnet.h | |||
@@ -56,6 +56,12 @@ struct vnet_port { | |||
56 | struct timer_list clean_timer; | 56 | struct timer_list clean_timer; |
57 | 57 | ||
58 | u64 rmtu; | 58 | u64 rmtu; |
59 | |||
60 | struct napi_struct napi; | ||
61 | u32 napi_stop_idx; | ||
62 | bool napi_resume; | ||
63 | int rx_event; | ||
64 | u16 q_index; | ||
59 | }; | 65 | }; |
60 | 66 | ||
61 | static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) | 67 | static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) |
@@ -97,7 +103,7 @@ struct vnet { | |||
97 | struct list_head list; | 103 | struct list_head list; |
98 | u64 local_mac; | 104 | u64 local_mac; |
99 | 105 | ||
100 | struct tasklet_struct vnet_tx_wakeup; | 106 | int nports; |
101 | }; | 107 | }; |
102 | 108 | ||
103 | #endif /* _SUNVNET_H */ | 109 | #endif /* _SUNVNET_H */ |
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 0f56b1c0e082..70a930ac4fa9 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c | |||
@@ -638,14 +638,12 @@ static int w5100_hw_probe(struct platform_device *pdev) | |||
638 | } | 638 | } |
639 | 639 | ||
640 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 640 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
641 | if (!mem) | ||
642 | return -ENXIO; | ||
643 | mem_size = resource_size(mem); | ||
644 | |||
645 | priv->base = devm_ioremap_resource(&pdev->dev, mem); | 641 | priv->base = devm_ioremap_resource(&pdev->dev, mem); |
646 | if (IS_ERR(priv->base)) | 642 | if (IS_ERR(priv->base)) |
647 | return PTR_ERR(priv->base); | 643 | return PTR_ERR(priv->base); |
648 | 644 | ||
645 | mem_size = resource_size(mem); | ||
646 | |||
649 | spin_lock_init(&priv->reg_lock); | 647 | spin_lock_init(&priv->reg_lock); |
650 | priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; | 648 | priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; |
651 | if (priv->indirect) { | 649 | if (priv->indirect) { |
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index f961f14a0473..7974b7d90fcc 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c | |||
@@ -558,14 +558,12 @@ static int w5300_hw_probe(struct platform_device *pdev) | |||
558 | } | 558 | } |
559 | 559 | ||
560 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 560 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
561 | if (!mem) | ||
562 | return -ENXIO; | ||
563 | mem_size = resource_size(mem); | ||
564 | |||
565 | priv->base = devm_ioremap_resource(&pdev->dev, mem); | 561 | priv->base = devm_ioremap_resource(&pdev->dev, mem); |
566 | if (IS_ERR(priv->base)) | 562 | if (IS_ERR(priv->base)) |
567 | return PTR_ERR(priv->base); | 563 | return PTR_ERR(priv->base); |
568 | 564 | ||
565 | mem_size = resource_size(mem); | ||
566 | |||
569 | spin_lock_init(&priv->reg_lock); | 567 | spin_lock_init(&priv->reg_lock); |
570 | priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; | 568 | priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; |
571 | if (priv->indirect) { | 569 | if (priv->indirect) { |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 78ec33f5100b..3295e4ee9dbb 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -193,7 +193,9 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) | |||
193 | struct flow_keys flow; | 193 | struct flow_keys flow; |
194 | int data_len; | 194 | int data_len; |
195 | 195 | ||
196 | if (!skb_flow_dissect(skb, &flow) || flow.n_proto != htons(ETH_P_IP)) | 196 | if (!skb_flow_dissect(skb, &flow) || |
197 | !(flow.n_proto == htons(ETH_P_IP) || | ||
198 | flow.n_proto == htons(ETH_P_IPV6))) | ||
197 | return false; | 199 | return false; |
198 | 200 | ||
199 | if (flow.ip_proto == IPPROTO_TCP) | 201 | if (flow.ip_proto == IPPROTO_TCP) |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b86f0b6f6d1..ccce6f24b009 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -728,7 +728,8 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) | |||
728 | rssp->hdr.size = sizeof(struct ndis_recv_scale_param); | 728 | rssp->hdr.size = sizeof(struct ndis_recv_scale_param); |
729 | rssp->flag = 0; | 729 | rssp->flag = 0; |
730 | rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | | 730 | rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | |
731 | NDIS_HASH_TCP_IPV4; | 731 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | |
732 | NDIS_HASH_TCP_IPV6; | ||
732 | rssp->indirect_tabsize = 4*ITAB_NUM; | 733 | rssp->indirect_tabsize = 4*ITAB_NUM; |
733 | rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); | 734 | rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); |
734 | rssp->hashkey_size = HASH_KEYLEN; | 735 | rssp->hashkey_size = HASH_KEYLEN; |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 225c033b08f3..bb4d780c0838 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -54,6 +54,9 @@ | |||
54 | #define MII_M1145_PHY_EXT_CR 0x14 | 54 | #define MII_M1145_PHY_EXT_CR 0x14 |
55 | #define MII_M1145_RGMII_RX_DELAY 0x0080 | 55 | #define MII_M1145_RGMII_RX_DELAY 0x0080 |
56 | #define MII_M1145_RGMII_TX_DELAY 0x0002 | 56 | #define MII_M1145_RGMII_TX_DELAY 0x0002 |
57 | #define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 | ||
58 | #define MII_M1145_HWCFG_MODE_MASK 0xf | ||
59 | #define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 | ||
57 | 60 | ||
58 | #define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 | 61 | #define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 |
59 | #define MII_M1145_HWCFG_MODE_MASK 0xf | 62 | #define MII_M1145_HWCFG_MODE_MASK 0xf |
@@ -123,6 +126,9 @@ | |||
123 | 126 | ||
124 | #define MII_M1116R_CONTROL_REG_MAC 21 | 127 | #define MII_M1116R_CONTROL_REG_MAC 21 |
125 | 128 | ||
129 | #define MII_88E3016_PHY_SPEC_CTRL 0x10 | ||
130 | #define MII_88E3016_DISABLE_SCRAMBLER 0x0200 | ||
131 | #define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030 | ||
126 | 132 | ||
127 | MODULE_DESCRIPTION("Marvell PHY driver"); | 133 | MODULE_DESCRIPTION("Marvell PHY driver"); |
128 | MODULE_AUTHOR("Andy Fleming"); | 134 | MODULE_AUTHOR("Andy Fleming"); |
@@ -439,6 +445,25 @@ static int m88e1116r_config_init(struct phy_device *phydev) | |||
439 | return 0; | 445 | return 0; |
440 | } | 446 | } |
441 | 447 | ||
448 | static int m88e3016_config_init(struct phy_device *phydev) | ||
449 | { | ||
450 | int reg; | ||
451 | |||
452 | /* Enable Scrambler and Auto-Crossover */ | ||
453 | reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL); | ||
454 | if (reg < 0) | ||
455 | return reg; | ||
456 | |||
457 | reg &= ~MII_88E3016_DISABLE_SCRAMBLER; | ||
458 | reg |= MII_88E3016_AUTO_MDIX_CROSSOVER; | ||
459 | |||
460 | reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg); | ||
461 | if (reg < 0) | ||
462 | return reg; | ||
463 | |||
464 | return 0; | ||
465 | } | ||
466 | |||
442 | static int m88e1111_config_init(struct phy_device *phydev) | 467 | static int m88e1111_config_init(struct phy_device *phydev) |
443 | { | 468 | { |
444 | int err; | 469 | int err; |
@@ -625,6 +650,7 @@ static int m88e1149_config_init(struct phy_device *phydev) | |||
625 | static int m88e1145_config_init(struct phy_device *phydev) | 650 | static int m88e1145_config_init(struct phy_device *phydev) |
626 | { | 651 | { |
627 | int err; | 652 | int err; |
653 | int temp; | ||
628 | 654 | ||
629 | /* Take care of errata E0 & E1 */ | 655 | /* Take care of errata E0 & E1 */ |
630 | err = phy_write(phydev, 0x1d, 0x001b); | 656 | err = phy_write(phydev, 0x1d, 0x001b); |
@@ -682,7 +708,7 @@ static int m88e1145_config_init(struct phy_device *phydev) | |||
682 | } | 708 | } |
683 | 709 | ||
684 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { | 710 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { |
685 | int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); | 711 | temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); |
686 | if (temp < 0) | 712 | if (temp < 0) |
687 | return temp; | 713 | return temp; |
688 | 714 | ||
@@ -789,6 +815,12 @@ static int marvell_read_status(struct phy_device *phydev) | |||
789 | return 0; | 815 | return 0; |
790 | } | 816 | } |
791 | 817 | ||
818 | static int marvell_aneg_done(struct phy_device *phydev) | ||
819 | { | ||
820 | int retval = phy_read(phydev, MII_M1011_PHY_STATUS); | ||
821 | return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED); | ||
822 | } | ||
823 | |||
792 | static int m88e1121_did_interrupt(struct phy_device *phydev) | 824 | static int m88e1121_did_interrupt(struct phy_device *phydev) |
793 | { | 825 | { |
794 | int imask; | 826 | int imask; |
@@ -1069,6 +1101,23 @@ static struct phy_driver marvell_drivers[] = { | |||
1069 | .suspend = &genphy_suspend, | 1101 | .suspend = &genphy_suspend, |
1070 | .driver = { .owner = THIS_MODULE }, | 1102 | .driver = { .owner = THIS_MODULE }, |
1071 | }, | 1103 | }, |
1104 | { | ||
1105 | .phy_id = MARVELL_PHY_ID_88E3016, | ||
1106 | .phy_id_mask = MARVELL_PHY_ID_MASK, | ||
1107 | .name = "Marvell 88E3016", | ||
1108 | .features = PHY_BASIC_FEATURES, | ||
1109 | .flags = PHY_HAS_INTERRUPT, | ||
1110 | .config_aneg = &genphy_config_aneg, | ||
1111 | .config_init = &m88e3016_config_init, | ||
1112 | .aneg_done = &marvell_aneg_done, | ||
1113 | .read_status = &marvell_read_status, | ||
1114 | .ack_interrupt = &marvell_ack_interrupt, | ||
1115 | .config_intr = &marvell_config_intr, | ||
1116 | .did_interrupt = &m88e1121_did_interrupt, | ||
1117 | .resume = &genphy_resume, | ||
1118 | .suspend = &genphy_suspend, | ||
1119 | .driver = { .owner = THIS_MODULE }, | ||
1120 | }, | ||
1072 | }; | 1121 | }; |
1073 | 1122 | ||
1074 | static int __init marvell_init(void) | 1123 | static int __init marvell_init(void) |
@@ -1098,6 +1147,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { | |||
1098 | { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, | 1147 | { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, |
1099 | { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, | 1148 | { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, |
1100 | { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, | 1149 | { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, |
1150 | { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, | ||
1101 | { } | 1151 | { } |
1102 | }; | 1152 | }; |
1103 | 1153 | ||
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c6554c7a8147..8ded08e027fb 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -486,7 +486,7 @@ struct tally_counter { | |||
486 | __le64 rx_broadcast; | 486 | __le64 rx_broadcast; |
487 | __le32 rx_multicast; | 487 | __le32 rx_multicast; |
488 | __le16 tx_aborted; | 488 | __le16 tx_aborted; |
489 | __le16 tx_underun; | 489 | __le16 tx_underrun; |
490 | }; | 490 | }; |
491 | 491 | ||
492 | struct rx_desc { | 492 | struct rx_desc { |
@@ -690,6 +690,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, | |||
690 | } | 690 | } |
691 | } | 691 | } |
692 | 692 | ||
693 | if (ret == -ENODEV) | ||
694 | set_bit(RTL8152_UNPLUG, &tp->flags); | ||
695 | |||
693 | return ret; | 696 | return ret; |
694 | } | 697 | } |
695 | 698 | ||
@@ -757,6 +760,9 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, | |||
757 | } | 760 | } |
758 | 761 | ||
759 | error1: | 762 | error1: |
763 | if (ret == -ENODEV) | ||
764 | set_bit(RTL8152_UNPLUG, &tp->flags); | ||
765 | |||
760 | return ret; | 766 | return ret; |
761 | } | 767 | } |
762 | 768 | ||
@@ -1083,6 +1089,7 @@ static void read_bulk_callback(struct urb *urb) | |||
1083 | 1089 | ||
1084 | result = r8152_submit_rx(tp, agg, GFP_ATOMIC); | 1090 | result = r8152_submit_rx(tp, agg, GFP_ATOMIC); |
1085 | if (result == -ENODEV) { | 1091 | if (result == -ENODEV) { |
1092 | set_bit(RTL8152_UNPLUG, &tp->flags); | ||
1086 | netif_device_detach(tp->netdev); | 1093 | netif_device_detach(tp->netdev); |
1087 | } else if (result) { | 1094 | } else if (result) { |
1088 | spin_lock(&tp->rx_lock); | 1095 | spin_lock(&tp->rx_lock); |
@@ -1190,11 +1197,13 @@ static void intr_callback(struct urb *urb) | |||
1190 | 1197 | ||
1191 | resubmit: | 1198 | resubmit: |
1192 | res = usb_submit_urb(urb, GFP_ATOMIC); | 1199 | res = usb_submit_urb(urb, GFP_ATOMIC); |
1193 | if (res == -ENODEV) | 1200 | if (res == -ENODEV) { |
1201 | set_bit(RTL8152_UNPLUG, &tp->flags); | ||
1194 | netif_device_detach(tp->netdev); | 1202 | netif_device_detach(tp->netdev); |
1195 | else if (res) | 1203 | } else if (res) { |
1196 | netif_err(tp, intr, tp->netdev, | 1204 | netif_err(tp, intr, tp->netdev, |
1197 | "can't resubmit intr, status %d\n", res); | 1205 | "can't resubmit intr, status %d\n", res); |
1206 | } | ||
1198 | } | 1207 | } |
1199 | 1208 | ||
1200 | static inline void *rx_agg_align(void *data) | 1209 | static inline void *rx_agg_align(void *data) |
@@ -1758,6 +1767,7 @@ static void tx_bottom(struct r8152 *tp) | |||
1758 | struct net_device *netdev = tp->netdev; | 1767 | struct net_device *netdev = tp->netdev; |
1759 | 1768 | ||
1760 | if (res == -ENODEV) { | 1769 | if (res == -ENODEV) { |
1770 | set_bit(RTL8152_UNPLUG, &tp->flags); | ||
1761 | netif_device_detach(netdev); | 1771 | netif_device_detach(netdev); |
1762 | } else { | 1772 | } else { |
1763 | struct net_device_stats *stats = &netdev->stats; | 1773 | struct net_device_stats *stats = &netdev->stats; |
@@ -3427,7 +3437,7 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev, | |||
3427 | data[9] = le64_to_cpu(tally.rx_broadcast); | 3437 | data[9] = le64_to_cpu(tally.rx_broadcast); |
3428 | data[10] = le32_to_cpu(tally.rx_multicast); | 3438 | data[10] = le32_to_cpu(tally.rx_multicast); |
3429 | data[11] = le16_to_cpu(tally.tx_aborted); | 3439 | data[11] = le16_to_cpu(tally.tx_aborted); |
3430 | data[12] = le16_to_cpu(tally.tx_underun); | 3440 | data[12] = le16_to_cpu(tally.tx_underrun); |
3431 | } | 3441 | } |
3432 | 3442 | ||
3433 | static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) | 3443 | static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
@@ -3565,11 +3575,33 @@ out: | |||
3565 | return ret; | 3575 | return ret; |
3566 | } | 3576 | } |
3567 | 3577 | ||
3578 | static int rtl8152_nway_reset(struct net_device *dev) | ||
3579 | { | ||
3580 | struct r8152 *tp = netdev_priv(dev); | ||
3581 | int ret; | ||
3582 | |||
3583 | ret = usb_autopm_get_interface(tp->intf); | ||
3584 | if (ret < 0) | ||
3585 | goto out; | ||
3586 | |||
3587 | mutex_lock(&tp->control); | ||
3588 | |||
3589 | ret = mii_nway_restart(&tp->mii); | ||
3590 | |||
3591 | mutex_unlock(&tp->control); | ||
3592 | |||
3593 | usb_autopm_put_interface(tp->intf); | ||
3594 | |||
3595 | out: | ||
3596 | return ret; | ||
3597 | } | ||
3598 | |||
3568 | static struct ethtool_ops ops = { | 3599 | static struct ethtool_ops ops = { |
3569 | .get_drvinfo = rtl8152_get_drvinfo, | 3600 | .get_drvinfo = rtl8152_get_drvinfo, |
3570 | .get_settings = rtl8152_get_settings, | 3601 | .get_settings = rtl8152_get_settings, |
3571 | .set_settings = rtl8152_set_settings, | 3602 | .set_settings = rtl8152_set_settings, |
3572 | .get_link = ethtool_op_get_link, | 3603 | .get_link = ethtool_op_get_link, |
3604 | .nway_reset = rtl8152_nway_reset, | ||
3573 | .get_msglevel = rtl8152_get_msglevel, | 3605 | .get_msglevel = rtl8152_get_msglevel, |
3574 | .set_msglevel = rtl8152_set_msglevel, | 3606 | .set_msglevel = rtl8152_set_msglevel, |
3575 | .get_wol = rtl8152_get_wol, | 3607 | .get_wol = rtl8152_get_wol, |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 895fe84011e7..a6a32d337bbb 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -235,10 +235,10 @@ static void xenvif_down(struct xenvif *vif) | |||
235 | 235 | ||
236 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 236 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
237 | queue = &vif->queues[queue_index]; | 237 | queue = &vif->queues[queue_index]; |
238 | napi_disable(&queue->napi); | ||
239 | disable_irq(queue->tx_irq); | 238 | disable_irq(queue->tx_irq); |
240 | if (queue->tx_irq != queue->rx_irq) | 239 | if (queue->tx_irq != queue->rx_irq) |
241 | disable_irq(queue->rx_irq); | 240 | disable_irq(queue->rx_irq); |
241 | napi_disable(&queue->napi); | ||
242 | del_timer_sync(&queue->credit_timeout); | 242 | del_timer_sync(&queue->credit_timeout); |
243 | } | 243 | } |
244 | } | 244 | } |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 6563f0713fc0..45755f9aa3f9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -1550,7 +1550,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1550 | unsigned int len; | 1550 | unsigned int len; |
1551 | 1551 | ||
1552 | BUG_ON(i >= MAX_SKB_FRAGS); | 1552 | BUG_ON(i >= MAX_SKB_FRAGS); |
1553 | page = alloc_page(GFP_ATOMIC|__GFP_COLD); | 1553 | page = alloc_page(GFP_ATOMIC); |
1554 | if (!page) { | 1554 | if (!page) { |
1555 | int j; | 1555 | int j; |
1556 | skb->truesize += skb->data_len; | 1556 | skb->truesize += skb->data_len; |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index cca871346a0f..88a70f5ed594 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -77,7 +77,9 @@ struct netfront_cb { | |||
77 | 77 | ||
78 | #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) | 78 | #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) |
79 | #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) | 79 | #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) |
80 | #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) | 80 | |
81 | /* Minimum number of Rx slots (includes slot for GSO metadata). */ | ||
82 | #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) | ||
81 | 83 | ||
82 | /* Queue name is interface name with "-qNNN" appended */ | 84 | /* Queue name is interface name with "-qNNN" appended */ |
83 | #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) | 85 | #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) |
@@ -137,13 +139,6 @@ struct netfront_queue { | |||
137 | struct xen_netif_rx_front_ring rx; | 139 | struct xen_netif_rx_front_ring rx; |
138 | int rx_ring_ref; | 140 | int rx_ring_ref; |
139 | 141 | ||
140 | /* Receive-ring batched refills. */ | ||
141 | #define RX_MIN_TARGET 8 | ||
142 | #define RX_DFL_MIN_TARGET 64 | ||
143 | #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | ||
144 | unsigned rx_min_target, rx_max_target, rx_target; | ||
145 | struct sk_buff_head rx_batch; | ||
146 | |||
147 | struct timer_list rx_refill_timer; | 142 | struct timer_list rx_refill_timer; |
148 | 143 | ||
149 | struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; | 144 | struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; |
@@ -251,7 +246,7 @@ static void rx_refill_timeout(unsigned long data) | |||
251 | static int netfront_tx_slot_available(struct netfront_queue *queue) | 246 | static int netfront_tx_slot_available(struct netfront_queue *queue) |
252 | { | 247 | { |
253 | return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < | 248 | return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < |
254 | (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); | 249 | (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); |
255 | } | 250 | } |
256 | 251 | ||
257 | static void xennet_maybe_wake_tx(struct netfront_queue *queue) | 252 | static void xennet_maybe_wake_tx(struct netfront_queue *queue) |
@@ -265,77 +260,55 @@ static void xennet_maybe_wake_tx(struct netfront_queue *queue) | |||
265 | netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); | 260 | netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); |
266 | } | 261 | } |
267 | 262 | ||
268 | static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | 263 | |
264 | static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) | ||
269 | { | 265 | { |
270 | unsigned short id; | ||
271 | struct sk_buff *skb; | 266 | struct sk_buff *skb; |
272 | struct page *page; | 267 | struct page *page; |
273 | int i, batch_target, notify; | ||
274 | RING_IDX req_prod = queue->rx.req_prod_pvt; | ||
275 | grant_ref_t ref; | ||
276 | unsigned long pfn; | ||
277 | void *vaddr; | ||
278 | struct xen_netif_rx_request *req; | ||
279 | 268 | ||
280 | if (unlikely(!netif_carrier_ok(queue->info->netdev))) | 269 | skb = __netdev_alloc_skb(queue->info->netdev, |
281 | return; | 270 | RX_COPY_THRESHOLD + NET_IP_ALIGN, |
271 | GFP_ATOMIC | __GFP_NOWARN); | ||
272 | if (unlikely(!skb)) | ||
273 | return NULL; | ||
282 | 274 | ||
283 | /* | 275 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); |
284 | * Allocate skbuffs greedily, even though we batch updates to the | 276 | if (!page) { |
285 | * receive ring. This creates a less bursty demand on the memory | 277 | kfree_skb(skb); |
286 | * allocator, so should reduce the chance of failed allocation requests | 278 | return NULL; |
287 | * both for ourself and for other kernel subsystems. | ||
288 | */ | ||
289 | batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons); | ||
290 | for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) { | ||
291 | skb = __netdev_alloc_skb(queue->info->netdev, | ||
292 | RX_COPY_THRESHOLD + NET_IP_ALIGN, | ||
293 | GFP_ATOMIC | __GFP_NOWARN); | ||
294 | if (unlikely(!skb)) | ||
295 | goto no_skb; | ||
296 | |||
297 | /* Align ip header to a 16 bytes boundary */ | ||
298 | skb_reserve(skb, NET_IP_ALIGN); | ||
299 | |||
300 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); | ||
301 | if (!page) { | ||
302 | kfree_skb(skb); | ||
303 | no_skb: | ||
304 | /* Could not allocate any skbuffs. Try again later. */ | ||
305 | mod_timer(&queue->rx_refill_timer, | ||
306 | jiffies + (HZ/10)); | ||
307 | |||
308 | /* Any skbuffs queued for refill? Force them out. */ | ||
309 | if (i != 0) | ||
310 | goto refill; | ||
311 | break; | ||
312 | } | ||
313 | |||
314 | skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); | ||
315 | __skb_queue_tail(&queue->rx_batch, skb); | ||
316 | } | 279 | } |
280 | skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); | ||
281 | |||
282 | /* Align ip header to a 16 bytes boundary */ | ||
283 | skb_reserve(skb, NET_IP_ALIGN); | ||
284 | skb->dev = queue->info->netdev; | ||
285 | |||
286 | return skb; | ||
287 | } | ||
288 | |||
317 | 289 | ||
318 | /* Is the batch large enough to be worthwhile? */ | 290 | static void xennet_alloc_rx_buffers(struct netfront_queue *queue) |
319 | if (i < (queue->rx_target/2)) { | 291 | { |
320 | if (req_prod > queue->rx.sring->req_prod) | 292 | RING_IDX req_prod = queue->rx.req_prod_pvt; |
321 | goto push; | 293 | int notify; |
294 | |||
295 | if (unlikely(!netif_carrier_ok(queue->info->netdev))) | ||
322 | return; | 296 | return; |
323 | } | ||
324 | 297 | ||
325 | /* Adjust our fill target if we risked running out of buffers. */ | 298 | for (req_prod = queue->rx.req_prod_pvt; |
326 | if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) && | 299 | req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; |
327 | ((queue->rx_target *= 2) > queue->rx_max_target)) | 300 | req_prod++) { |
328 | queue->rx_target = queue->rx_max_target; | 301 | struct sk_buff *skb; |
302 | unsigned short id; | ||
303 | grant_ref_t ref; | ||
304 | unsigned long pfn; | ||
305 | struct xen_netif_rx_request *req; | ||
329 | 306 | ||
330 | refill: | 307 | skb = xennet_alloc_one_rx_buffer(queue); |
331 | for (i = 0; ; i++) { | 308 | if (!skb) |
332 | skb = __skb_dequeue(&queue->rx_batch); | ||
333 | if (skb == NULL) | ||
334 | break; | 309 | break; |
335 | 310 | ||
336 | skb->dev = queue->info->netdev; | 311 | id = xennet_rxidx(req_prod); |
337 | |||
338 | id = xennet_rxidx(req_prod + i); | ||
339 | 312 | ||
340 | BUG_ON(queue->rx_skbs[id]); | 313 | BUG_ON(queue->rx_skbs[id]); |
341 | queue->rx_skbs[id] = skb; | 314 | queue->rx_skbs[id] = skb; |
@@ -345,9 +318,8 @@ no_skb: | |||
345 | queue->grant_rx_ref[id] = ref; | 318 | queue->grant_rx_ref[id] = ref; |
346 | 319 | ||
347 | pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); | 320 | pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); |
348 | vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); | ||
349 | 321 | ||
350 | req = RING_GET_REQUEST(&queue->rx, req_prod + i); | 322 | req = RING_GET_REQUEST(&queue->rx, req_prod); |
351 | gnttab_grant_foreign_access_ref(ref, | 323 | gnttab_grant_foreign_access_ref(ref, |
352 | queue->info->xbdev->otherend_id, | 324 | queue->info->xbdev->otherend_id, |
353 | pfn_to_mfn(pfn), | 325 | pfn_to_mfn(pfn), |
@@ -357,11 +329,16 @@ no_skb: | |||
357 | req->gref = ref; | 329 | req->gref = ref; |
358 | } | 330 | } |
359 | 331 | ||
332 | queue->rx.req_prod_pvt = req_prod; | ||
333 | |||
334 | /* Not enough requests? Try again later. */ | ||
335 | if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { | ||
336 | mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); | ||
337 | return; | ||
338 | } | ||
339 | |||
360 | wmb(); /* barrier so backend seens requests */ | 340 | wmb(); /* barrier so backend seens requests */ |
361 | 341 | ||
362 | /* Above is a suitable barrier to ensure backend will see requests. */ | ||
363 | queue->rx.req_prod_pvt = req_prod + i; | ||
364 | push: | ||
365 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); | 342 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); |
366 | if (notify) | 343 | if (notify) |
367 | notify_remote_via_irq(queue->rx_irq); | 344 | notify_remote_via_irq(queue->rx_irq); |
@@ -1070,13 +1047,6 @@ err: | |||
1070 | 1047 | ||
1071 | work_done -= handle_incoming_queue(queue, &rxq); | 1048 | work_done -= handle_incoming_queue(queue, &rxq); |
1072 | 1049 | ||
1073 | /* If we get a callback with very few responses, reduce fill target. */ | ||
1074 | /* NB. Note exponential increase, linear decrease. */ | ||
1075 | if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) > | ||
1076 | ((3*queue->rx_target) / 4)) && | ||
1077 | (--queue->rx_target < queue->rx_min_target)) | ||
1078 | queue->rx_target = queue->rx_min_target; | ||
1079 | |||
1080 | xennet_alloc_rx_buffers(queue); | 1050 | xennet_alloc_rx_buffers(queue); |
1081 | 1051 | ||
1082 | if (work_done < budget) { | 1052 | if (work_done < budget) { |
@@ -1643,11 +1613,6 @@ static int xennet_init_queue(struct netfront_queue *queue) | |||
1643 | spin_lock_init(&queue->tx_lock); | 1613 | spin_lock_init(&queue->tx_lock); |
1644 | spin_lock_init(&queue->rx_lock); | 1614 | spin_lock_init(&queue->rx_lock); |
1645 | 1615 | ||
1646 | skb_queue_head_init(&queue->rx_batch); | ||
1647 | queue->rx_target = RX_DFL_MIN_TARGET; | ||
1648 | queue->rx_min_target = RX_DFL_MIN_TARGET; | ||
1649 | queue->rx_max_target = RX_MAX_TARGET; | ||
1650 | |||
1651 | init_timer(&queue->rx_refill_timer); | 1616 | init_timer(&queue->rx_refill_timer); |
1652 | queue->rx_refill_timer.data = (unsigned long)queue; | 1617 | queue->rx_refill_timer.data = (unsigned long)queue; |
1653 | queue->rx_refill_timer.function = rx_refill_timeout; | 1618 | queue->rx_refill_timer.function = rx_refill_timeout; |
@@ -1670,7 +1635,7 @@ static int xennet_init_queue(struct netfront_queue *queue) | |||
1670 | } | 1635 | } |
1671 | 1636 | ||
1672 | /* A grant for every tx ring slot */ | 1637 | /* A grant for every tx ring slot */ |
1673 | if (gnttab_alloc_grant_references(TX_MAX_TARGET, | 1638 | if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, |
1674 | &queue->gref_tx_head) < 0) { | 1639 | &queue->gref_tx_head) < 0) { |
1675 | pr_alert("can't alloc tx grant refs\n"); | 1640 | pr_alert("can't alloc tx grant refs\n"); |
1676 | err = -ENOMEM; | 1641 | err = -ENOMEM; |
@@ -1678,7 +1643,7 @@ static int xennet_init_queue(struct netfront_queue *queue) | |||
1678 | } | 1643 | } |
1679 | 1644 | ||
1680 | /* A grant for every rx ring slot */ | 1645 | /* A grant for every rx ring slot */ |
1681 | if (gnttab_alloc_grant_references(RX_MAX_TARGET, | 1646 | if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, |
1682 | &queue->gref_rx_head) < 0) { | 1647 | &queue->gref_rx_head) < 0) { |
1683 | pr_alert("can't alloc rx grant refs\n"); | 1648 | pr_alert("can't alloc rx grant refs\n"); |
1684 | err = -ENOMEM; | 1649 | err = -ENOMEM; |
@@ -2146,83 +2111,18 @@ static const struct ethtool_ops xennet_ethtool_ops = | |||
2146 | }; | 2111 | }; |
2147 | 2112 | ||
2148 | #ifdef CONFIG_SYSFS | 2113 | #ifdef CONFIG_SYSFS |
2149 | static ssize_t show_rxbuf_min(struct device *dev, | 2114 | static ssize_t show_rxbuf(struct device *dev, |
2150 | struct device_attribute *attr, char *buf) | 2115 | struct device_attribute *attr, char *buf) |
2151 | { | ||
2152 | struct net_device *netdev = to_net_dev(dev); | ||
2153 | struct netfront_info *info = netdev_priv(netdev); | ||
2154 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2155 | |||
2156 | if (num_queues) | ||
2157 | return sprintf(buf, "%u\n", info->queues[0].rx_min_target); | ||
2158 | else | ||
2159 | return sprintf(buf, "%u\n", RX_MIN_TARGET); | ||
2160 | } | ||
2161 | |||
2162 | static ssize_t store_rxbuf_min(struct device *dev, | ||
2163 | struct device_attribute *attr, | ||
2164 | const char *buf, size_t len) | ||
2165 | { | 2116 | { |
2166 | struct net_device *netdev = to_net_dev(dev); | 2117 | return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); |
2167 | struct netfront_info *np = netdev_priv(netdev); | ||
2168 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2169 | char *endp; | ||
2170 | unsigned long target; | ||
2171 | unsigned int i; | ||
2172 | struct netfront_queue *queue; | ||
2173 | |||
2174 | if (!capable(CAP_NET_ADMIN)) | ||
2175 | return -EPERM; | ||
2176 | |||
2177 | target = simple_strtoul(buf, &endp, 0); | ||
2178 | if (endp == buf) | ||
2179 | return -EBADMSG; | ||
2180 | |||
2181 | if (target < RX_MIN_TARGET) | ||
2182 | target = RX_MIN_TARGET; | ||
2183 | if (target > RX_MAX_TARGET) | ||
2184 | target = RX_MAX_TARGET; | ||
2185 | |||
2186 | for (i = 0; i < num_queues; ++i) { | ||
2187 | queue = &np->queues[i]; | ||
2188 | spin_lock_bh(&queue->rx_lock); | ||
2189 | if (target > queue->rx_max_target) | ||
2190 | queue->rx_max_target = target; | ||
2191 | queue->rx_min_target = target; | ||
2192 | if (target > queue->rx_target) | ||
2193 | queue->rx_target = target; | ||
2194 | |||
2195 | xennet_alloc_rx_buffers(queue); | ||
2196 | |||
2197 | spin_unlock_bh(&queue->rx_lock); | ||
2198 | } | ||
2199 | return len; | ||
2200 | } | ||
2201 | |||
2202 | static ssize_t show_rxbuf_max(struct device *dev, | ||
2203 | struct device_attribute *attr, char *buf) | ||
2204 | { | ||
2205 | struct net_device *netdev = to_net_dev(dev); | ||
2206 | struct netfront_info *info = netdev_priv(netdev); | ||
2207 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2208 | |||
2209 | if (num_queues) | ||
2210 | return sprintf(buf, "%u\n", info->queues[0].rx_max_target); | ||
2211 | else | ||
2212 | return sprintf(buf, "%u\n", RX_MAX_TARGET); | ||
2213 | } | 2118 | } |
2214 | 2119 | ||
2215 | static ssize_t store_rxbuf_max(struct device *dev, | 2120 | static ssize_t store_rxbuf(struct device *dev, |
2216 | struct device_attribute *attr, | 2121 | struct device_attribute *attr, |
2217 | const char *buf, size_t len) | 2122 | const char *buf, size_t len) |
2218 | { | 2123 | { |
2219 | struct net_device *netdev = to_net_dev(dev); | ||
2220 | struct netfront_info *np = netdev_priv(netdev); | ||
2221 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2222 | char *endp; | 2124 | char *endp; |
2223 | unsigned long target; | 2125 | unsigned long target; |
2224 | unsigned int i = 0; | ||
2225 | struct netfront_queue *queue = NULL; | ||
2226 | 2126 | ||
2227 | if (!capable(CAP_NET_ADMIN)) | 2127 | if (!capable(CAP_NET_ADMIN)) |
2228 | return -EPERM; | 2128 | return -EPERM; |
@@ -2231,44 +2131,15 @@ static ssize_t store_rxbuf_max(struct device *dev, | |||
2231 | if (endp == buf) | 2131 | if (endp == buf) |
2232 | return -EBADMSG; | 2132 | return -EBADMSG; |
2233 | 2133 | ||
2234 | if (target < RX_MIN_TARGET) | 2134 | /* rxbuf_min and rxbuf_max are no longer configurable. */ |
2235 | target = RX_MIN_TARGET; | ||
2236 | if (target > RX_MAX_TARGET) | ||
2237 | target = RX_MAX_TARGET; | ||
2238 | |||
2239 | for (i = 0; i < num_queues; ++i) { | ||
2240 | queue = &np->queues[i]; | ||
2241 | spin_lock_bh(&queue->rx_lock); | ||
2242 | if (target < queue->rx_min_target) | ||
2243 | queue->rx_min_target = target; | ||
2244 | queue->rx_max_target = target; | ||
2245 | if (target < queue->rx_target) | ||
2246 | queue->rx_target = target; | ||
2247 | |||
2248 | xennet_alloc_rx_buffers(queue); | ||
2249 | 2135 | ||
2250 | spin_unlock_bh(&queue->rx_lock); | ||
2251 | } | ||
2252 | return len; | 2136 | return len; |
2253 | } | 2137 | } |
2254 | 2138 | ||
2255 | static ssize_t show_rxbuf_cur(struct device *dev, | ||
2256 | struct device_attribute *attr, char *buf) | ||
2257 | { | ||
2258 | struct net_device *netdev = to_net_dev(dev); | ||
2259 | struct netfront_info *info = netdev_priv(netdev); | ||
2260 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2261 | |||
2262 | if (num_queues) | ||
2263 | return sprintf(buf, "%u\n", info->queues[0].rx_target); | ||
2264 | else | ||
2265 | return sprintf(buf, "0\n"); | ||
2266 | } | ||
2267 | |||
2268 | static struct device_attribute xennet_attrs[] = { | 2139 | static struct device_attribute xennet_attrs[] = { |
2269 | __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), | 2140 | __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), |
2270 | __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), | 2141 | __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), |
2271 | __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), | 2142 | __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL), |
2272 | }; | 2143 | }; |
2273 | 2144 | ||
2274 | static int xennet_sysfs_addif(struct net_device *netdev) | 2145 | static int xennet_sysfs_addif(struct net_device *netdev) |
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 8b3f55991805..f1b5111bbaba 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig | |||
@@ -71,7 +71,7 @@ config CLAW | |||
71 | config QETH | 71 | config QETH |
72 | def_tristate y | 72 | def_tristate y |
73 | prompt "Gigabit Ethernet device support" | 73 | prompt "Gigabit Ethernet device support" |
74 | depends on CCW && NETDEVICES && IP_MULTICAST && QDIO | 74 | depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET |
75 | help | 75 | help |
76 | This driver supports the IBM System z OSA Express adapters | 76 | This driver supports the IBM System z OSA Express adapters |
77 | in QDIO mode (all media types), HiperSockets interfaces and z/VM | 77 | in QDIO mode (all media types), HiperSockets interfaces and z/VM |
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index 6bcfbbb20f04..47773c4d235a 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c | |||
@@ -44,8 +44,8 @@ static ssize_t ctcm_buffer_write(struct device *dev, | |||
44 | return -ENODEV; | 44 | return -ENODEV; |
45 | } | 45 | } |
46 | 46 | ||
47 | rc = sscanf(buf, "%u", &bs1); | 47 | rc = kstrtouint(buf, 0, &bs1); |
48 | if (rc != 1) | 48 | if (rc) |
49 | goto einval; | 49 | goto einval; |
50 | if (bs1 > CTCM_BUFSIZE_LIMIT) | 50 | if (bs1 > CTCM_BUFSIZE_LIMIT) |
51 | goto einval; | 51 | goto einval; |
@@ -151,8 +151,8 @@ static ssize_t ctcm_proto_store(struct device *dev, | |||
151 | 151 | ||
152 | if (!priv) | 152 | if (!priv) |
153 | return -ENODEV; | 153 | return -ENODEV; |
154 | rc = sscanf(buf, "%d", &value); | 154 | rc = kstrtoint(buf, 0, &value); |
155 | if ((rc != 1) || | 155 | if (rc || |
156 | !((value == CTCM_PROTO_S390) || | 156 | !((value == CTCM_PROTO_S390) || |
157 | (value == CTCM_PROTO_LINUX) || | 157 | (value == CTCM_PROTO_LINUX) || |
158 | (value == CTCM_PROTO_MPC) || | 158 | (value == CTCM_PROTO_MPC) || |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 0a7d87c372b8..92190aa20b9f 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -1943,15 +1943,16 @@ static ssize_t | |||
1943 | lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1943 | lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
1944 | { | 1944 | { |
1945 | struct lcs_card *card; | 1945 | struct lcs_card *card; |
1946 | int value, rc; | 1946 | int rc; |
1947 | s16 value; | ||
1947 | 1948 | ||
1948 | card = dev_get_drvdata(dev); | 1949 | card = dev_get_drvdata(dev); |
1949 | 1950 | ||
1950 | if (!card) | 1951 | if (!card) |
1951 | return 0; | 1952 | return 0; |
1952 | 1953 | ||
1953 | rc = sscanf(buf, "%d", &value); | 1954 | rc = kstrtos16(buf, 0, &value); |
1954 | if (rc != 1) | 1955 | if (rc) |
1955 | return -EINVAL; | 1956 | return -EINVAL; |
1956 | /* TODO: sanity checks */ | 1957 | /* TODO: sanity checks */ |
1957 | card->portno = value; | 1958 | card->portno = value; |
@@ -2007,8 +2008,8 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char | |||
2007 | if (!card) | 2008 | if (!card) |
2008 | return 0; | 2009 | return 0; |
2009 | 2010 | ||
2010 | rc = sscanf(buf, "%u", &value); | 2011 | rc = kstrtouint(buf, 0, &value); |
2011 | if (rc != 1) | 2012 | if (rc) |
2012 | return -EINVAL; | 2013 | return -EINVAL; |
2013 | /* TODO: sanity checks */ | 2014 | /* TODO: sanity checks */ |
2014 | card->lancmd_timeout = value; | 2015 | card->lancmd_timeout = value; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index e7646ce3d659..7a8bb9f78e76 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -380,11 +380,6 @@ enum qeth_header_ids { | |||
380 | #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 | 380 | #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 |
381 | #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ | 381 | #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ |
382 | 382 | ||
383 | static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) | ||
384 | { | ||
385 | return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); | ||
386 | } | ||
387 | |||
388 | enum qeth_qdio_buffer_states { | 383 | enum qeth_qdio_buffer_states { |
389 | /* | 384 | /* |
390 | * inbound: read out by driver; owned by hardware in order to be filled | 385 | * inbound: read out by driver; owned by hardware in order to be filled |
@@ -843,13 +838,6 @@ struct qeth_trap_id { | |||
843 | /*some helper functions*/ | 838 | /*some helper functions*/ |
844 | #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") | 839 | #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") |
845 | 840 | ||
846 | static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) | ||
847 | { | ||
848 | struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *) | ||
849 | dev_get_drvdata(&cdev->dev))->dev); | ||
850 | return card; | ||
851 | } | ||
852 | |||
853 | static inline int qeth_get_micros(void) | 841 | static inline int qeth_get_micros(void) |
854 | { | 842 | { |
855 | return (int) (get_tod_clock() >> 12); | 843 | return (int) (get_tod_clock() >> 12); |
@@ -894,7 +882,6 @@ const char *qeth_get_cardname_short(struct qeth_card *); | |||
894 | int qeth_realloc_buffer_pool(struct qeth_card *, int); | 882 | int qeth_realloc_buffer_pool(struct qeth_card *, int); |
895 | int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); | 883 | int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); |
896 | void qeth_core_free_discipline(struct qeth_card *); | 884 | void qeth_core_free_discipline(struct qeth_card *); |
897 | void qeth_buffer_reclaim_work(struct work_struct *); | ||
898 | 885 | ||
899 | /* exports for qeth discipline device drivers */ | 886 | /* exports for qeth discipline device drivers */ |
900 | extern struct qeth_card_list_struct qeth_core_card_list; | 887 | extern struct qeth_card_list_struct qeth_core_card_list; |
@@ -913,7 +900,6 @@ int qeth_core_hardsetup_card(struct qeth_card *); | |||
913 | void qeth_print_status_message(struct qeth_card *); | 900 | void qeth_print_status_message(struct qeth_card *); |
914 | int qeth_init_qdio_queues(struct qeth_card *); | 901 | int qeth_init_qdio_queues(struct qeth_card *); |
915 | int qeth_send_startlan(struct qeth_card *); | 902 | int qeth_send_startlan(struct qeth_card *); |
916 | int qeth_send_stoplan(struct qeth_card *); | ||
917 | int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, | 903 | int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, |
918 | int (*reply_cb) | 904 | int (*reply_cb) |
919 | (struct qeth_card *, struct qeth_reply *, unsigned long), | 905 | (struct qeth_card *, struct qeth_reply *, unsigned long), |
@@ -954,8 +940,6 @@ int qeth_snmp_command(struct qeth_card *, char __user *); | |||
954 | int qeth_query_oat_command(struct qeth_card *, char __user *); | 940 | int qeth_query_oat_command(struct qeth_card *, char __user *); |
955 | int qeth_query_switch_attributes(struct qeth_card *card, | 941 | int qeth_query_switch_attributes(struct qeth_card *card, |
956 | struct qeth_switch_info *sw_info); | 942 | struct qeth_switch_info *sw_info); |
957 | int qeth_query_card_info(struct qeth_card *card, | ||
958 | struct carrier_info *carrier_info); | ||
959 | int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, | 943 | int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, |
960 | int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), | 944 | int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), |
961 | void *reply_param); | 945 | void *reply_param); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index fd22c811cbe1..f407e3763432 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -718,6 +718,13 @@ static int qeth_check_idx_response(struct qeth_card *card, | |||
718 | return 0; | 718 | return 0; |
719 | } | 719 | } |
720 | 720 | ||
721 | static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) | ||
722 | { | ||
723 | struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *) | ||
724 | dev_get_drvdata(&cdev->dev))->dev); | ||
725 | return card; | ||
726 | } | ||
727 | |||
721 | static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, | 728 | static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, |
722 | __u32 len) | 729 | __u32 len) |
723 | { | 730 | { |
@@ -1431,6 +1438,7 @@ static void qeth_start_kernel_thread(struct work_struct *work) | |||
1431 | } | 1438 | } |
1432 | } | 1439 | } |
1433 | 1440 | ||
1441 | static void qeth_buffer_reclaim_work(struct work_struct *); | ||
1434 | static int qeth_setup_card(struct qeth_card *card) | 1442 | static int qeth_setup_card(struct qeth_card *card) |
1435 | { | 1443 | { |
1436 | 1444 | ||
@@ -3232,7 +3240,7 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, | |||
3232 | } | 3240 | } |
3233 | EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); | 3241 | EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); |
3234 | 3242 | ||
3235 | void qeth_buffer_reclaim_work(struct work_struct *work) | 3243 | static void qeth_buffer_reclaim_work(struct work_struct *work) |
3236 | { | 3244 | { |
3237 | struct qeth_card *card = container_of(work, struct qeth_card, | 3245 | struct qeth_card *card = container_of(work, struct qeth_card, |
3238 | buffer_reclaim_work.work); | 3246 | buffer_reclaim_work.work); |
@@ -4126,7 +4134,7 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, | |||
4126 | 4134 | ||
4127 | qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); | 4135 | qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); |
4128 | if (cmd->hdr.return_code) { | 4136 | if (cmd->hdr.return_code) { |
4129 | QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code); | 4137 | QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); |
4130 | setparms->data.mode = SET_PROMISC_MODE_OFF; | 4138 | setparms->data.mode = SET_PROMISC_MODE_OFF; |
4131 | } | 4139 | } |
4132 | card->info.promisc_mode = setparms->data.mode; | 4140 | card->info.promisc_mode = setparms->data.mode; |
@@ -4493,13 +4501,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
4493 | snmp = &cmd->data.setadapterparms.data.snmp; | 4501 | snmp = &cmd->data.setadapterparms.data.snmp; |
4494 | 4502 | ||
4495 | if (cmd->hdr.return_code) { | 4503 | if (cmd->hdr.return_code) { |
4496 | QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code); | 4504 | QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); |
4497 | return 0; | 4505 | return 0; |
4498 | } | 4506 | } |
4499 | if (cmd->data.setadapterparms.hdr.return_code) { | 4507 | if (cmd->data.setadapterparms.hdr.return_code) { |
4500 | cmd->hdr.return_code = | 4508 | cmd->hdr.return_code = |
4501 | cmd->data.setadapterparms.hdr.return_code; | 4509 | cmd->data.setadapterparms.hdr.return_code; |
4502 | QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code); | 4510 | QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); |
4503 | return 0; | 4511 | return 0; |
4504 | } | 4512 | } |
4505 | data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); | 4513 | data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); |
@@ -4717,7 +4725,7 @@ static int qeth_query_card_info_cb(struct qeth_card *card, | |||
4717 | return 0; | 4725 | return 0; |
4718 | } | 4726 | } |
4719 | 4727 | ||
4720 | int qeth_query_card_info(struct qeth_card *card, | 4728 | static int qeth_query_card_info(struct qeth_card *card, |
4721 | struct carrier_info *carrier_info) | 4729 | struct carrier_info *carrier_info) |
4722 | { | 4730 | { |
4723 | struct qeth_cmd_buffer *iob; | 4731 | struct qeth_cmd_buffer *iob; |
@@ -4730,7 +4738,6 @@ int qeth_query_card_info(struct qeth_card *card, | |||
4730 | return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, | 4738 | return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, |
4731 | (void *)carrier_info); | 4739 | (void *)carrier_info); |
4732 | } | 4740 | } |
4733 | EXPORT_SYMBOL_GPL(qeth_query_card_info); | ||
4734 | 4741 | ||
4735 | static inline int qeth_get_qdio_q_format(struct qeth_card *card) | 4742 | static inline int qeth_get_qdio_q_format(struct qeth_card *card) |
4736 | { | 4743 | { |
@@ -5113,6 +5120,11 @@ static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, | |||
5113 | return 0; | 5120 | return 0; |
5114 | } | 5121 | } |
5115 | 5122 | ||
5123 | static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) | ||
5124 | { | ||
5125 | return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); | ||
5126 | } | ||
5127 | |||
5116 | struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | 5128 | struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, |
5117 | struct qeth_qdio_buffer *qethbuffer, | 5129 | struct qeth_qdio_buffer *qethbuffer, |
5118 | struct qdio_buffer_element **__element, int *__offset, | 5130 | struct qdio_buffer_element **__element, int *__offset, |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c2679bfe7f66..d02cd1a67943 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -1512,7 +1512,7 @@ static void qeth_bridge_state_change(struct qeth_card *card, | |||
1512 | 1512 | ||
1513 | QETH_CARD_TEXT(card, 2, "brstchng"); | 1513 | QETH_CARD_TEXT(card, 2, "brstchng"); |
1514 | if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { | 1514 | if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { |
1515 | QETH_CARD_TEXT_(card, 2, "BPsz%.8d", qports->entry_length); | 1515 | QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); |
1516 | return; | 1516 | return; |
1517 | } | 1517 | } |
1518 | extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries; | 1518 | extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries; |
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 29c1c00e3a0f..551a4b4c03fd 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h | |||
@@ -42,10 +42,6 @@ struct qeth_ipato_entry { | |||
42 | }; | 42 | }; |
43 | 43 | ||
44 | 44 | ||
45 | void qeth_l3_ipaddr4_to_string(const __u8 *, char *); | ||
46 | int qeth_l3_string_to_ipaddr4(const char *, __u8 *); | ||
47 | void qeth_l3_ipaddr6_to_string(const __u8 *, char *); | ||
48 | int qeth_l3_string_to_ipaddr6(const char *, __u8 *); | ||
49 | void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *); | 45 | void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *); |
50 | int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *); | 46 | int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *); |
51 | int qeth_l3_create_device_attributes(struct device *); | 47 | int qeth_l3_create_device_attributes(struct device *); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index afebb9709763..625227ad16ee 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -55,12 +55,12 @@ static int qeth_l3_isxdigit(char *buf) | |||
55 | return 1; | 55 | return 1; |
56 | } | 56 | } |
57 | 57 | ||
58 | void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) | 58 | static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) |
59 | { | 59 | { |
60 | sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); | 60 | sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); |
61 | } | 61 | } |
62 | 62 | ||
63 | int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) | 63 | static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) |
64 | { | 64 | { |
65 | int count = 0, rc = 0; | 65 | int count = 0, rc = 0; |
66 | unsigned int in[4]; | 66 | unsigned int in[4]; |
@@ -78,12 +78,12 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) | |||
78 | return 0; | 78 | return 0; |
79 | } | 79 | } |
80 | 80 | ||
81 | void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) | 81 | static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) |
82 | { | 82 | { |
83 | sprintf(buf, "%pI6", addr); | 83 | sprintf(buf, "%pI6", addr); |
84 | } | 84 | } |
85 | 85 | ||
86 | int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) | 86 | static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) |
87 | { | 87 | { |
88 | const char *end, *end_tmp, *start; | 88 | const char *end, *end_tmp, *start; |
89 | __u16 *in; | 89 | __u16 *in; |
@@ -2502,7 +2502,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) | |||
2502 | rc = -EFAULT; | 2502 | rc = -EFAULT; |
2503 | goto free_and_out; | 2503 | goto free_and_out; |
2504 | } | 2504 | } |
2505 | QETH_CARD_TEXT_(card, 4, "qacts"); | 2505 | QETH_CARD_TEXT(card, 4, "qacts"); |
2506 | } | 2506 | } |
2507 | free_and_out: | 2507 | free_and_out: |
2508 | kfree(qinfo.udata); | 2508 | kfree(qinfo.udata); |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ff560537dd61..7121a2e97ce2 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -42,6 +42,7 @@ struct ipv6_devconf { | |||
42 | __s32 accept_ra_from_local; | 42 | __s32 accept_ra_from_local; |
43 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 43 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
44 | __s32 optimistic_dad; | 44 | __s32 optimistic_dad; |
45 | __s32 use_optimistic; | ||
45 | #endif | 46 | #endif |
46 | #ifdef CONFIG_IPV6_MROUTE | 47 | #ifdef CONFIG_IPV6_MROUTE |
47 | __s32 mc_forwarding; | 48 | __s32 mc_forwarding; |
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 8e9a029e093d..e6982ac3200d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 | 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 |
17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 | 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 |
18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 | 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 |
19 | #define MARVELL_PHY_ID_88E3016 0x01410e60 | ||
19 | 20 | ||
20 | /* struct phy_device dev_flags definitions */ | 21 | /* struct phy_device dev_flags definitions */ |
21 | #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 | 22 | #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 379c02648ab3..ff5f5deb3dcf 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
@@ -67,6 +67,8 @@ enum { | |||
67 | MLX4_CMD_MAP_ICM_AUX = 0xffc, | 67 | MLX4_CMD_MAP_ICM_AUX = 0xffc, |
68 | MLX4_CMD_UNMAP_ICM_AUX = 0xffb, | 68 | MLX4_CMD_UNMAP_ICM_AUX = 0xffb, |
69 | MLX4_CMD_SET_ICM_SIZE = 0xffd, | 69 | MLX4_CMD_SET_ICM_SIZE = 0xffd, |
70 | MLX4_CMD_ACCESS_REG = 0x3b, | ||
71 | |||
70 | /*master notify fw on finish for slave's flr*/ | 72 | /*master notify fw on finish for slave's flr*/ |
71 | MLX4_CMD_INFORM_FLR_DONE = 0x5b, | 73 | MLX4_CMD_INFORM_FLR_DONE = 0x5b, |
72 | MLX4_CMD_GET_OP_REQ = 0x59, | 74 | MLX4_CMD_GET_OP_REQ = 0x59, |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 37e4404d0227..e4c136ebe79b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -186,7 +186,9 @@ enum { | |||
186 | MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, | 186 | MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, |
187 | MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, | 187 | MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, |
188 | MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, | 188 | MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, |
189 | MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 | 189 | MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, |
190 | MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, | ||
191 | MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15 | ||
190 | }; | 192 | }; |
191 | 193 | ||
192 | enum { | 194 | enum { |
@@ -379,6 +381,13 @@ enum { | |||
379 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ | 381 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ |
380 | MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) | 382 | MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) |
381 | 383 | ||
384 | enum mlx4_module_id { | ||
385 | MLX4_MODULE_ID_SFP = 0x3, | ||
386 | MLX4_MODULE_ID_QSFP = 0xC, | ||
387 | MLX4_MODULE_ID_QSFP_PLUS = 0xD, | ||
388 | MLX4_MODULE_ID_QSFP28 = 0x11, | ||
389 | }; | ||
390 | |||
382 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) | 391 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) |
383 | { | 392 | { |
384 | return (major << 32) | (minor << 16) | subminor; | 393 | return (major << 32) | (minor << 16) | subminor; |
@@ -799,6 +808,26 @@ struct mlx4_init_port_param { | |||
799 | u64 si_guid; | 808 | u64 si_guid; |
800 | }; | 809 | }; |
801 | 810 | ||
811 | #define MAD_IFC_DATA_SZ 192 | ||
812 | /* MAD IFC Mailbox */ | ||
813 | struct mlx4_mad_ifc { | ||
814 | u8 base_version; | ||
815 | u8 mgmt_class; | ||
816 | u8 class_version; | ||
817 | u8 method; | ||
818 | __be16 status; | ||
819 | __be16 class_specific; | ||
820 | __be64 tid; | ||
821 | __be16 attr_id; | ||
822 | __be16 resv; | ||
823 | __be32 attr_mod; | ||
824 | __be64 mkey; | ||
825 | __be16 dr_slid; | ||
826 | __be16 dr_dlid; | ||
827 | u8 reserved[28]; | ||
828 | u8 data[MAD_IFC_DATA_SZ]; | ||
829 | } __packed; | ||
830 | |||
802 | #define mlx4_foreach_port(port, dev, type) \ | 831 | #define mlx4_foreach_port(port, dev, type) \ |
803 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ | 832 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ |
804 | if ((type) == (dev)->caps.port_mask[(port)]) | 833 | if ((type) == (dev)->caps.port_mask[(port)]) |
@@ -1283,10 +1312,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
1283 | u64 iova, u64 size, int npages, | 1312 | u64 iova, u64 size, int npages, |
1284 | int page_shift, struct mlx4_mpt_entry *mpt_entry); | 1313 | int page_shift, struct mlx4_mpt_entry *mpt_entry); |
1285 | 1314 | ||
1315 | int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, | ||
1316 | u16 offset, u16 size, u8 *data); | ||
1317 | |||
1286 | /* Returns true if running in low memory profile (kdump kernel) */ | 1318 | /* Returns true if running in low memory profile (kdump kernel) */ |
1287 | static inline bool mlx4_low_memory_profile(void) | 1319 | static inline bool mlx4_low_memory_profile(void) |
1288 | { | 1320 | { |
1289 | return is_kdump_kernel(); | 1321 | return is_kdump_kernel(); |
1290 | } | 1322 | } |
1291 | 1323 | ||
1324 | /* ACCESS REG commands */ | ||
1325 | enum mlx4_access_reg_method { | ||
1326 | MLX4_ACCESS_REG_QUERY = 0x1, | ||
1327 | MLX4_ACCESS_REG_WRITE = 0x2, | ||
1328 | }; | ||
1329 | |||
1330 | /* ACCESS PTYS Reg command */ | ||
1331 | enum mlx4_ptys_proto { | ||
1332 | MLX4_PTYS_IB = 1<<0, | ||
1333 | MLX4_PTYS_EN = 1<<2, | ||
1334 | }; | ||
1335 | |||
1336 | struct mlx4_ptys_reg { | ||
1337 | u8 resrvd1; | ||
1338 | u8 local_port; | ||
1339 | u8 resrvd2; | ||
1340 | u8 proto_mask; | ||
1341 | __be32 resrvd3[2]; | ||
1342 | __be32 eth_proto_cap; | ||
1343 | __be16 ib_width_cap; | ||
1344 | __be16 ib_speed_cap; | ||
1345 | __be32 resrvd4; | ||
1346 | __be32 eth_proto_admin; | ||
1347 | __be16 ib_width_admin; | ||
1348 | __be16 ib_speed_admin; | ||
1349 | __be32 resrvd5; | ||
1350 | __be32 eth_proto_oper; | ||
1351 | __be16 ib_width_oper; | ||
1352 | __be16 ib_speed_oper; | ||
1353 | __be32 resrvd6; | ||
1354 | __be32 eth_proto_lp_adv; | ||
1355 | } __packed; | ||
1356 | |||
1357 | int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, | ||
1358 | enum mlx4_access_reg_method method, | ||
1359 | struct mlx4_ptys_reg *ptys_reg); | ||
1360 | |||
1292 | #endif /* MLX4_DEVICE_H */ | 1361 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 74fd5d37f15a..c85e06512246 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -386,6 +386,7 @@ typedef enum rx_handler_result rx_handler_result_t; | |||
386 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | 386 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); |
387 | 387 | ||
388 | void __napi_schedule(struct napi_struct *n); | 388 | void __napi_schedule(struct napi_struct *n); |
389 | void __napi_schedule_irqoff(struct napi_struct *n); | ||
389 | 390 | ||
390 | static inline bool napi_disable_pending(struct napi_struct *n) | 391 | static inline bool napi_disable_pending(struct napi_struct *n) |
391 | { | 392 | { |
@@ -420,6 +421,18 @@ static inline void napi_schedule(struct napi_struct *n) | |||
420 | __napi_schedule(n); | 421 | __napi_schedule(n); |
421 | } | 422 | } |
422 | 423 | ||
424 | /** | ||
425 | * napi_schedule_irqoff - schedule NAPI poll | ||
426 | * @n: napi context | ||
427 | * | ||
428 | * Variant of napi_schedule(), assuming hard irqs are masked. | ||
429 | */ | ||
430 | static inline void napi_schedule_irqoff(struct napi_struct *n) | ||
431 | { | ||
432 | if (napi_schedule_prep(n)) | ||
433 | __napi_schedule_irqoff(n); | ||
434 | } | ||
435 | |||
423 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ | 436 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
424 | static inline bool napi_reschedule(struct napi_struct *napi) | 437 | static inline bool napi_reschedule(struct napi_struct *napi) |
425 | { | 438 | { |
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h index 18d75e795606..e1ab6e86cdb3 100644 --- a/include/linux/pxa168_eth.h +++ b/include/linux/pxa168_eth.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #ifndef __LINUX_PXA168_ETH_H | 4 | #ifndef __LINUX_PXA168_ETH_H |
5 | #define __LINUX_PXA168_ETH_H | 5 | #define __LINUX_PXA168_ETH_H |
6 | 6 | ||
7 | #include <linux/phy.h> | ||
8 | |||
7 | struct pxa168_eth_platform_data { | 9 | struct pxa168_eth_platform_data { |
8 | int port_number; | 10 | int port_number; |
9 | int phy_addr; | 11 | int phy_addr; |
@@ -13,6 +15,7 @@ struct pxa168_eth_platform_data { | |||
13 | */ | 15 | */ |
14 | int speed; /* 0, SPEED_10, SPEED_100 */ | 16 | int speed; /* 0, SPEED_10, SPEED_100 */ |
15 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ | 17 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ |
18 | phy_interface_t intf; | ||
16 | 19 | ||
17 | /* | 20 | /* |
18 | * Override default RX/TX queue sizes if nonzero. | 21 | * Override default RX/TX queue sizes if nonzero. |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c2dee7deefa8..f566b8567892 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -204,10 +204,10 @@ struct tcp_sock { | |||
204 | 204 | ||
205 | u16 urg_data; /* Saved octet of OOB data and control flags */ | 205 | u16 urg_data; /* Saved octet of OOB data and control flags */ |
206 | u8 ecn_flags; /* ECN status bits. */ | 206 | u8 ecn_flags; /* ECN status bits. */ |
207 | u8 reordering; /* Packet reordering metric. */ | 207 | u8 keepalive_probes; /* num of allowed keep alive probes */ |
208 | u32 reordering; /* Packet reordering metric. */ | ||
208 | u32 snd_up; /* Urgent pointer */ | 209 | u32 snd_up; /* Urgent pointer */ |
209 | 210 | ||
210 | u8 keepalive_probes; /* num of allowed keep alive probes */ | ||
211 | /* | 211 | /* |
212 | * Options received (usually on last packet, some only on SYN packets). | 212 | * Options received (usually on last packet, some only on SYN packets). |
213 | */ | 213 | */ |
diff --git a/include/net/dsa.h b/include/net/dsa.h index b76559293535..ed3c34bbb67a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h | |||
@@ -38,6 +38,9 @@ struct dsa_chip_data { | |||
38 | struct device *host_dev; | 38 | struct device *host_dev; |
39 | int sw_addr; | 39 | int sw_addr; |
40 | 40 | ||
41 | /* set to size of eeprom if supported by the switch */ | ||
42 | int eeprom_len; | ||
43 | |||
41 | /* Device tree node pointer for this specific switch chip | 44 | /* Device tree node pointer for this specific switch chip |
42 | * used during switch setup in case additional properties | 45 | * used during switch setup in case additional properties |
43 | * and resources needs to be used | 46 | * and resources needs to be used |
@@ -139,6 +142,14 @@ struct dsa_switch { | |||
139 | */ | 142 | */ |
140 | struct device *master_dev; | 143 | struct device *master_dev; |
141 | 144 | ||
145 | #ifdef CONFIG_NET_DSA_HWMON | ||
146 | /* | ||
147 | * Hardware monitoring information | ||
148 | */ | ||
149 | char hwmon_name[IFNAMSIZ + 8]; | ||
150 | struct device *hwmon_dev; | ||
151 | #endif | ||
152 | |||
142 | /* | 153 | /* |
143 | * Slave mii_bus and devices for the individual ports. | 154 | * Slave mii_bus and devices for the individual ports. |
144 | */ | 155 | */ |
@@ -242,6 +253,28 @@ struct dsa_switch_driver { | |||
242 | struct ethtool_eee *e); | 253 | struct ethtool_eee *e); |
243 | int (*get_eee)(struct dsa_switch *ds, int port, | 254 | int (*get_eee)(struct dsa_switch *ds, int port, |
244 | struct ethtool_eee *e); | 255 | struct ethtool_eee *e); |
256 | |||
257 | #ifdef CONFIG_NET_DSA_HWMON | ||
258 | /* Hardware monitoring */ | ||
259 | int (*get_temp)(struct dsa_switch *ds, int *temp); | ||
260 | int (*get_temp_limit)(struct dsa_switch *ds, int *temp); | ||
261 | int (*set_temp_limit)(struct dsa_switch *ds, int temp); | ||
262 | int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm); | ||
263 | #endif | ||
264 | |||
265 | /* EEPROM access */ | ||
266 | int (*get_eeprom_len)(struct dsa_switch *ds); | ||
267 | int (*get_eeprom)(struct dsa_switch *ds, | ||
268 | struct ethtool_eeprom *eeprom, u8 *data); | ||
269 | int (*set_eeprom)(struct dsa_switch *ds, | ||
270 | struct ethtool_eeprom *eeprom, u8 *data); | ||
271 | |||
272 | /* | ||
273 | * Register access. | ||
274 | */ | ||
275 | int (*get_regs_len)(struct dsa_switch *ds, int port); | ||
276 | void (*get_regs)(struct dsa_switch *ds, int port, | ||
277 | struct ethtool_regs *regs, void *p); | ||
245 | }; | 278 | }; |
246 | 279 | ||
247 | void register_switch_driver(struct dsa_switch_driver *type); | 280 | void register_switch_driver(struct dsa_switch_driver *type); |
diff --git a/include/net/ipx.h b/include/net/ipx.h index 0143180fecc9..320f47b64a7a 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h | |||
@@ -42,6 +42,9 @@ struct ipxhdr { | |||
42 | struct ipx_address ipx_source __packed; | 42 | struct ipx_address ipx_source __packed; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | /* From af_ipx.c */ | ||
46 | extern int sysctl_ipx_pprop_broadcasting; | ||
47 | |||
45 | static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) | 48 | static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) |
46 | { | 49 | { |
47 | return (struct ipxhdr *)skb_transport_header(skb); | 50 | return (struct ipxhdr *)skb_transport_header(skb); |
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f60558d0254c..dedfb188b1a7 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h | |||
@@ -69,7 +69,7 @@ struct neigh_parms { | |||
69 | struct net *net; | 69 | struct net *net; |
70 | #endif | 70 | #endif |
71 | struct net_device *dev; | 71 | struct net_device *dev; |
72 | struct neigh_parms *next; | 72 | struct list_head list; |
73 | int (*neigh_setup)(struct neighbour *); | 73 | int (*neigh_setup)(struct neighbour *); |
74 | void (*neigh_cleanup)(struct neighbour *); | 74 | void (*neigh_cleanup)(struct neighbour *); |
75 | struct neigh_table *tbl; | 75 | struct neigh_table *tbl; |
@@ -203,6 +203,7 @@ struct neigh_table { | |||
203 | void (*proxy_redo)(struct sk_buff *skb); | 203 | void (*proxy_redo)(struct sk_buff *skb); |
204 | char *id; | 204 | char *id; |
205 | struct neigh_parms parms; | 205 | struct neigh_parms parms; |
206 | struct list_head parms_list; | ||
206 | int gc_interval; | 207 | int gc_interval; |
207 | int gc_thresh1; | 208 | int gc_thresh1; |
208 | int gc_thresh2; | 209 | int gc_thresh2; |
diff --git a/include/net/netlink.h b/include/net/netlink.h index 7b903e1bdbbb..64158353ecb2 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h | |||
@@ -1185,4 +1185,14 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, | |||
1185 | #define nla_for_each_nested(pos, nla, rem) \ | 1185 | #define nla_for_each_nested(pos, nla, rem) \ |
1186 | nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem) | 1186 | nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem) |
1187 | 1187 | ||
1188 | /** | ||
1189 | * nla_is_last - Test if attribute is last in stream | ||
1190 | * @nla: attribute to test | ||
1191 | * @rem: bytes remaining in stream | ||
1192 | */ | ||
1193 | static inline bool nla_is_last(const struct nlattr *nla, int rem) | ||
1194 | { | ||
1195 | return nla->nla_len == rem; | ||
1196 | } | ||
1197 | |||
1188 | #endif | 1198 | #endif |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 4ff3f67be62c..806e3b5b3351 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -1116,7 +1116,6 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, | |||
1116 | sctp_scope_t sctp_scope(const union sctp_addr *); | 1116 | sctp_scope_t sctp_scope(const union sctp_addr *); |
1117 | int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope); | 1117 | int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope); |
1118 | int sctp_is_any(struct sock *sk, const union sctp_addr *addr); | 1118 | int sctp_is_any(struct sock *sk, const union sctp_addr *addr); |
1119 | int sctp_addr_is_valid(const union sctp_addr *addr); | ||
1120 | int sctp_is_ep_boundall(struct sock *sk); | 1119 | int sctp_is_ep_boundall(struct sock *sk); |
1121 | 1120 | ||
1122 | 1121 | ||
diff --git a/include/net/tcp.h b/include/net/tcp.h index 4062b4f0d121..3a35b1500359 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -55,9 +55,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); | |||
55 | #define MAX_TCP_HEADER (128 + MAX_HEADER) | 55 | #define MAX_TCP_HEADER (128 + MAX_HEADER) |
56 | #define MAX_TCP_OPTION_SPACE 40 | 56 | #define MAX_TCP_OPTION_SPACE 40 |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * Never offer a window over 32767 without using window scaling. Some | 59 | * Never offer a window over 32767 without using window scaling. Some |
60 | * poor stacks do signed 16bit maths! | 60 | * poor stacks do signed 16bit maths! |
61 | */ | 61 | */ |
62 | #define MAX_TCP_WINDOW 32767U | 62 | #define MAX_TCP_WINDOW 32767U |
63 | 63 | ||
@@ -70,9 +70,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); | |||
70 | /* After receiving this amount of duplicate ACKs fast retransmit starts. */ | 70 | /* After receiving this amount of duplicate ACKs fast retransmit starts. */ |
71 | #define TCP_FASTRETRANS_THRESH 3 | 71 | #define TCP_FASTRETRANS_THRESH 3 |
72 | 72 | ||
73 | /* Maximal reordering. */ | ||
74 | #define TCP_MAX_REORDERING 127 | ||
75 | |||
76 | /* Maximal number of ACKs sent quickly to accelerate slow-start. */ | 73 | /* Maximal number of ACKs sent quickly to accelerate slow-start. */ |
77 | #define TCP_MAX_QUICKACKS 16U | 74 | #define TCP_MAX_QUICKACKS 16U |
78 | 75 | ||
@@ -167,7 +164,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); | |||
167 | /* | 164 | /* |
168 | * TCP option | 165 | * TCP option |
169 | */ | 166 | */ |
170 | 167 | ||
171 | #define TCPOPT_NOP 1 /* Padding */ | 168 | #define TCPOPT_NOP 1 /* Padding */ |
172 | #define TCPOPT_EOL 0 /* End of options */ | 169 | #define TCPOPT_EOL 0 /* End of options */ |
173 | #define TCPOPT_MSS 2 /* Segment size negotiating */ | 170 | #define TCPOPT_MSS 2 /* Segment size negotiating */ |
@@ -252,6 +249,7 @@ extern int sysctl_tcp_abort_on_overflow; | |||
252 | extern int sysctl_tcp_max_orphans; | 249 | extern int sysctl_tcp_max_orphans; |
253 | extern int sysctl_tcp_fack; | 250 | extern int sysctl_tcp_fack; |
254 | extern int sysctl_tcp_reordering; | 251 | extern int sysctl_tcp_reordering; |
252 | extern int sysctl_tcp_max_reordering; | ||
255 | extern int sysctl_tcp_dsack; | 253 | extern int sysctl_tcp_dsack; |
256 | extern long sysctl_tcp_mem[3]; | 254 | extern long sysctl_tcp_mem[3]; |
257 | extern int sysctl_tcp_wmem[3]; | 255 | extern int sysctl_tcp_wmem[3]; |
@@ -1104,16 +1102,16 @@ static inline int tcp_win_from_space(int space) | |||
1104 | space - (space>>sysctl_tcp_adv_win_scale); | 1102 | space - (space>>sysctl_tcp_adv_win_scale); |
1105 | } | 1103 | } |
1106 | 1104 | ||
1107 | /* Note: caller must be prepared to deal with negative returns */ | 1105 | /* Note: caller must be prepared to deal with negative returns */ |
1108 | static inline int tcp_space(const struct sock *sk) | 1106 | static inline int tcp_space(const struct sock *sk) |
1109 | { | 1107 | { |
1110 | return tcp_win_from_space(sk->sk_rcvbuf - | 1108 | return tcp_win_from_space(sk->sk_rcvbuf - |
1111 | atomic_read(&sk->sk_rmem_alloc)); | 1109 | atomic_read(&sk->sk_rmem_alloc)); |
1112 | } | 1110 | } |
1113 | 1111 | ||
1114 | static inline int tcp_full_space(const struct sock *sk) | 1112 | static inline int tcp_full_space(const struct sock *sk) |
1115 | { | 1113 | { |
1116 | return tcp_win_from_space(sk->sk_rcvbuf); | 1114 | return tcp_win_from_space(sk->sk_rcvbuf); |
1117 | } | 1115 | } |
1118 | 1116 | ||
1119 | static inline void tcp_openreq_init(struct request_sock *req, | 1117 | static inline void tcp_openreq_init(struct request_sock *req, |
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 99b43056a6fe..eb2095b42fbb 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
@@ -1213,6 +1213,10 @@ enum ethtool_sfeatures_retval_bits { | |||
1213 | #define SUPPORTED_40000baseCR4_Full (1 << 24) | 1213 | #define SUPPORTED_40000baseCR4_Full (1 << 24) |
1214 | #define SUPPORTED_40000baseSR4_Full (1 << 25) | 1214 | #define SUPPORTED_40000baseSR4_Full (1 << 25) |
1215 | #define SUPPORTED_40000baseLR4_Full (1 << 26) | 1215 | #define SUPPORTED_40000baseLR4_Full (1 << 26) |
1216 | #define SUPPORTED_56000baseKR4_Full (1 << 27) | ||
1217 | #define SUPPORTED_56000baseCR4_Full (1 << 28) | ||
1218 | #define SUPPORTED_56000baseSR4_Full (1 << 29) | ||
1219 | #define SUPPORTED_56000baseLR4_Full (1 << 30) | ||
1216 | 1220 | ||
1217 | #define ADVERTISED_10baseT_Half (1 << 0) | 1221 | #define ADVERTISED_10baseT_Half (1 << 0) |
1218 | #define ADVERTISED_10baseT_Full (1 << 1) | 1222 | #define ADVERTISED_10baseT_Full (1 << 1) |
@@ -1241,6 +1245,10 @@ enum ethtool_sfeatures_retval_bits { | |||
1241 | #define ADVERTISED_40000baseCR4_Full (1 << 24) | 1245 | #define ADVERTISED_40000baseCR4_Full (1 << 24) |
1242 | #define ADVERTISED_40000baseSR4_Full (1 << 25) | 1246 | #define ADVERTISED_40000baseSR4_Full (1 << 25) |
1243 | #define ADVERTISED_40000baseLR4_Full (1 << 26) | 1247 | #define ADVERTISED_40000baseLR4_Full (1 << 26) |
1248 | #define ADVERTISED_56000baseKR4_Full (1 << 27) | ||
1249 | #define ADVERTISED_56000baseCR4_Full (1 << 28) | ||
1250 | #define ADVERTISED_56000baseSR4_Full (1 << 29) | ||
1251 | #define ADVERTISED_56000baseLR4_Full (1 << 30) | ||
1244 | 1252 | ||
1245 | /* The following are all involved in forcing a particular link | 1253 | /* The following are all involved in forcing a particular link |
1246 | * mode for the device for setting things. When getting the | 1254 | * mode for the device for setting things. When getting the |
@@ -1248,12 +1256,16 @@ enum ethtool_sfeatures_retval_bits { | |||
1248 | * it was forced up into this mode or autonegotiated. | 1256 | * it was forced up into this mode or autonegotiated. |
1249 | */ | 1257 | */ |
1250 | 1258 | ||
1251 | /* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ | 1259 | /* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */ |
1252 | #define SPEED_10 10 | 1260 | #define SPEED_10 10 |
1253 | #define SPEED_100 100 | 1261 | #define SPEED_100 100 |
1254 | #define SPEED_1000 1000 | 1262 | #define SPEED_1000 1000 |
1255 | #define SPEED_2500 2500 | 1263 | #define SPEED_2500 2500 |
1256 | #define SPEED_10000 10000 | 1264 | #define SPEED_10000 10000 |
1265 | #define SPEED_20000 20000 | ||
1266 | #define SPEED_40000 40000 | ||
1267 | #define SPEED_56000 56000 | ||
1268 | |||
1257 | #define SPEED_UNKNOWN -1 | 1269 | #define SPEED_UNKNOWN -1 |
1258 | 1270 | ||
1259 | /* Duplex, half or full. */ | 1271 | /* Duplex, half or full. */ |
@@ -1343,6 +1355,10 @@ enum ethtool_sfeatures_retval_bits { | |||
1343 | #define ETH_MODULE_SFF_8079_LEN 256 | 1355 | #define ETH_MODULE_SFF_8079_LEN 256 |
1344 | #define ETH_MODULE_SFF_8472 0x2 | 1356 | #define ETH_MODULE_SFF_8472 0x2 |
1345 | #define ETH_MODULE_SFF_8472_LEN 512 | 1357 | #define ETH_MODULE_SFF_8472_LEN 512 |
1358 | #define ETH_MODULE_SFF_8636 0x3 | ||
1359 | #define ETH_MODULE_SFF_8636_LEN 256 | ||
1360 | #define ETH_MODULE_SFF_8436 0x4 | ||
1361 | #define ETH_MODULE_SFF_8436_LEN 256 | ||
1346 | 1362 | ||
1347 | /* Reset flags */ | 1363 | /* Reset flags */ |
1348 | /* The reset() operation must clear the flags for the components which | 1364 | /* The reset() operation must clear the flags for the components which |
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 0bdb77e16875..7072d8325016 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h | |||
@@ -243,6 +243,7 @@ enum { | |||
243 | IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ | 243 | IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ |
244 | IFLA_BRPORT_LEARNING, /* mac learning */ | 244 | IFLA_BRPORT_LEARNING, /* mac learning */ |
245 | IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ | 245 | IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ |
246 | IFLA_BRPORT_PROXYARP, /* proxy ARP */ | ||
246 | __IFLA_BRPORT_MAX | 247 | __IFLA_BRPORT_MAX |
247 | }; | 248 | }; |
248 | #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) | 249 | #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) |
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index efa2666f4b8a..e863d088b9a5 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h | |||
@@ -164,6 +164,7 @@ enum { | |||
164 | DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL, | 164 | DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL, |
165 | DEVCONF_SUPPRESS_FRAG_NDISC, | 165 | DEVCONF_SUPPRESS_FRAG_NDISC, |
166 | DEVCONF_ACCEPT_RA_FROM_LOCAL, | 166 | DEVCONF_ACCEPT_RA_FROM_LOCAL, |
167 | DEVCONF_USE_OPTIMISTIC, | ||
167 | DEVCONF_MAX | 168 | DEVCONF_MAX |
168 | }; | 169 | }; |
169 | 170 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9f81818f2941..b6a1f7c14a67 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -153,22 +153,19 @@ struct reg_state { | |||
153 | 153 | ||
154 | enum bpf_stack_slot_type { | 154 | enum bpf_stack_slot_type { |
155 | STACK_INVALID, /* nothing was stored in this stack slot */ | 155 | STACK_INVALID, /* nothing was stored in this stack slot */ |
156 | STACK_SPILL, /* 1st byte of register spilled into stack */ | 156 | STACK_SPILL, /* register spilled into stack */ |
157 | STACK_SPILL_PART, /* other 7 bytes of register spill */ | ||
158 | STACK_MISC /* BPF program wrote some data into this slot */ | 157 | STACK_MISC /* BPF program wrote some data into this slot */ |
159 | }; | 158 | }; |
160 | 159 | ||
161 | struct bpf_stack_slot { | 160 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ |
162 | enum bpf_stack_slot_type stype; | ||
163 | struct reg_state reg_st; | ||
164 | }; | ||
165 | 161 | ||
166 | /* state of the program: | 162 | /* state of the program: |
167 | * type of all registers and stack info | 163 | * type of all registers and stack info |
168 | */ | 164 | */ |
169 | struct verifier_state { | 165 | struct verifier_state { |
170 | struct reg_state regs[MAX_BPF_REG]; | 166 | struct reg_state regs[MAX_BPF_REG]; |
171 | struct bpf_stack_slot stack[MAX_BPF_STACK]; | 167 | u8 stack_slot_type[MAX_BPF_STACK]; |
168 | struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; | ||
172 | }; | 169 | }; |
173 | 170 | ||
174 | /* linked list of verifier states used to prune search */ | 171 | /* linked list of verifier states used to prune search */ |
@@ -259,10 +256,10 @@ static void print_verifier_state(struct verifier_env *env) | |||
259 | env->cur_state.regs[i].map_ptr->key_size, | 256 | env->cur_state.regs[i].map_ptr->key_size, |
260 | env->cur_state.regs[i].map_ptr->value_size); | 257 | env->cur_state.regs[i].map_ptr->value_size); |
261 | } | 258 | } |
262 | for (i = 0; i < MAX_BPF_STACK; i++) { | 259 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { |
263 | if (env->cur_state.stack[i].stype == STACK_SPILL) | 260 | if (env->cur_state.stack_slot_type[i] == STACK_SPILL) |
264 | verbose(" fp%d=%s", -MAX_BPF_STACK + i, | 261 | verbose(" fp%d=%s", -MAX_BPF_STACK + i, |
265 | reg_type_str[env->cur_state.stack[i].reg_st.type]); | 262 | reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]); |
266 | } | 263 | } |
267 | verbose("\n"); | 264 | verbose("\n"); |
268 | } | 265 | } |
@@ -539,8 +536,10 @@ static int bpf_size_to_bytes(int bpf_size) | |||
539 | static int check_stack_write(struct verifier_state *state, int off, int size, | 536 | static int check_stack_write(struct verifier_state *state, int off, int size, |
540 | int value_regno) | 537 | int value_regno) |
541 | { | 538 | { |
542 | struct bpf_stack_slot *slot; | ||
543 | int i; | 539 | int i; |
540 | /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, | ||
541 | * so it's aligned access and [off, off + size) are within stack limits | ||
542 | */ | ||
544 | 543 | ||
545 | if (value_regno >= 0 && | 544 | if (value_regno >= 0 && |
546 | (state->regs[value_regno].type == PTR_TO_MAP_VALUE || | 545 | (state->regs[value_regno].type == PTR_TO_MAP_VALUE || |
@@ -548,30 +547,24 @@ static int check_stack_write(struct verifier_state *state, int off, int size, | |||
548 | state->regs[value_regno].type == PTR_TO_CTX)) { | 547 | state->regs[value_regno].type == PTR_TO_CTX)) { |
549 | 548 | ||
550 | /* register containing pointer is being spilled into stack */ | 549 | /* register containing pointer is being spilled into stack */ |
551 | if (size != 8) { | 550 | if (size != BPF_REG_SIZE) { |
552 | verbose("invalid size of register spill\n"); | 551 | verbose("invalid size of register spill\n"); |
553 | return -EACCES; | 552 | return -EACCES; |
554 | } | 553 | } |
555 | 554 | ||
556 | slot = &state->stack[MAX_BPF_STACK + off]; | ||
557 | slot->stype = STACK_SPILL; | ||
558 | /* save register state */ | 555 | /* save register state */ |
559 | slot->reg_st = state->regs[value_regno]; | 556 | state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = |
560 | for (i = 1; i < 8; i++) { | 557 | state->regs[value_regno]; |
561 | slot = &state->stack[MAX_BPF_STACK + off + i]; | ||
562 | slot->stype = STACK_SPILL_PART; | ||
563 | slot->reg_st.type = UNKNOWN_VALUE; | ||
564 | slot->reg_st.map_ptr = NULL; | ||
565 | } | ||
566 | } else { | ||
567 | 558 | ||
559 | for (i = 0; i < BPF_REG_SIZE; i++) | ||
560 | state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; | ||
561 | } else { | ||
568 | /* regular write of data into stack */ | 562 | /* regular write of data into stack */ |
569 | for (i = 0; i < size; i++) { | 563 | state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = |
570 | slot = &state->stack[MAX_BPF_STACK + off + i]; | 564 | (struct reg_state) {}; |
571 | slot->stype = STACK_MISC; | 565 | |
572 | slot->reg_st.type = UNKNOWN_VALUE; | 566 | for (i = 0; i < size; i++) |
573 | slot->reg_st.map_ptr = NULL; | 567 | state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; |
574 | } | ||
575 | } | 568 | } |
576 | return 0; | 569 | return 0; |
577 | } | 570 | } |
@@ -579,19 +572,18 @@ static int check_stack_write(struct verifier_state *state, int off, int size, | |||
579 | static int check_stack_read(struct verifier_state *state, int off, int size, | 572 | static int check_stack_read(struct verifier_state *state, int off, int size, |
580 | int value_regno) | 573 | int value_regno) |
581 | { | 574 | { |
575 | u8 *slot_type; | ||
582 | int i; | 576 | int i; |
583 | struct bpf_stack_slot *slot; | ||
584 | 577 | ||
585 | slot = &state->stack[MAX_BPF_STACK + off]; | 578 | slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; |
586 | 579 | ||
587 | if (slot->stype == STACK_SPILL) { | 580 | if (slot_type[0] == STACK_SPILL) { |
588 | if (size != 8) { | 581 | if (size != BPF_REG_SIZE) { |
589 | verbose("invalid size of register spill\n"); | 582 | verbose("invalid size of register spill\n"); |
590 | return -EACCES; | 583 | return -EACCES; |
591 | } | 584 | } |
592 | for (i = 1; i < 8; i++) { | 585 | for (i = 1; i < BPF_REG_SIZE; i++) { |
593 | if (state->stack[MAX_BPF_STACK + off + i].stype != | 586 | if (slot_type[i] != STACK_SPILL) { |
594 | STACK_SPILL_PART) { | ||
595 | verbose("corrupted spill memory\n"); | 587 | verbose("corrupted spill memory\n"); |
596 | return -EACCES; | 588 | return -EACCES; |
597 | } | 589 | } |
@@ -599,12 +591,12 @@ static int check_stack_read(struct verifier_state *state, int off, int size, | |||
599 | 591 | ||
600 | if (value_regno >= 0) | 592 | if (value_regno >= 0) |
601 | /* restore register state from stack */ | 593 | /* restore register state from stack */ |
602 | state->regs[value_regno] = slot->reg_st; | 594 | state->regs[value_regno] = |
595 | state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; | ||
603 | return 0; | 596 | return 0; |
604 | } else { | 597 | } else { |
605 | for (i = 0; i < size; i++) { | 598 | for (i = 0; i < size; i++) { |
606 | if (state->stack[MAX_BPF_STACK + off + i].stype != | 599 | if (slot_type[i] != STACK_MISC) { |
607 | STACK_MISC) { | ||
608 | verbose("invalid read from stack off %d+%d size %d\n", | 600 | verbose("invalid read from stack off %d+%d size %d\n", |
609 | off, i, size); | 601 | off, i, size); |
610 | return -EACCES; | 602 | return -EACCES; |
@@ -747,7 +739,7 @@ static int check_stack_boundary(struct verifier_env *env, | |||
747 | } | 739 | } |
748 | 740 | ||
749 | for (i = 0; i < access_size; i++) { | 741 | for (i = 0; i < access_size; i++) { |
750 | if (state->stack[MAX_BPF_STACK + off + i].stype != STACK_MISC) { | 742 | if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { |
751 | verbose("invalid indirect read from stack off %d+%d size %d\n", | 743 | verbose("invalid indirect read from stack off %d+%d size %d\n", |
752 | off, i, access_size); | 744 | off, i, access_size); |
753 | return -EACCES; | 745 | return -EACCES; |
@@ -1417,12 +1409,33 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur) | |||
1417 | } | 1409 | } |
1418 | 1410 | ||
1419 | for (i = 0; i < MAX_BPF_STACK; i++) { | 1411 | for (i = 0; i < MAX_BPF_STACK; i++) { |
1420 | if (memcmp(&old->stack[i], &cur->stack[i], | 1412 | if (old->stack_slot_type[i] == STACK_INVALID) |
1421 | sizeof(old->stack[0])) != 0) { | 1413 | continue; |
1422 | if (old->stack[i].stype == STACK_INVALID) | 1414 | if (old->stack_slot_type[i] != cur->stack_slot_type[i]) |
1423 | continue; | 1415 | /* Ex: old explored (safe) state has STACK_SPILL in |
1416 | * this stack slot, but current has has STACK_MISC -> | ||
1417 | * this verifier states are not equivalent, | ||
1418 | * return false to continue verification of this path | ||
1419 | */ | ||
1424 | return false; | 1420 | return false; |
1425 | } | 1421 | if (i % BPF_REG_SIZE) |
1422 | continue; | ||
1423 | if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], | ||
1424 | &cur->spilled_regs[i / BPF_REG_SIZE], | ||
1425 | sizeof(old->spilled_regs[0]))) | ||
1426 | /* when explored and current stack slot types are | ||
1427 | * the same, check that stored pointers types | ||
1428 | * are the same as well. | ||
1429 | * Ex: explored safe path could have stored | ||
1430 | * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} | ||
1431 | * but current path has stored: | ||
1432 | * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} | ||
1433 | * such verifier states are not equivalent. | ||
1434 | * return false to continue verification of this path | ||
1435 | */ | ||
1436 | return false; | ||
1437 | else | ||
1438 | continue; | ||
1426 | } | 1439 | } |
1427 | return true; | 1440 | return true; |
1428 | } | 1441 | } |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 23e070bcf72d..3f167d2eeb94 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
@@ -1756,6 +1756,49 @@ static struct bpf_test tests[] = { | |||
1756 | { }, | 1756 | { }, |
1757 | { { 0, 1 } } | 1757 | { { 0, 1 } } |
1758 | }, | 1758 | }, |
1759 | { | ||
1760 | "nmap reduced", | ||
1761 | .u.insns_int = { | ||
1762 | BPF_MOV64_REG(R6, R1), | ||
1763 | BPF_LD_ABS(BPF_H, 12), | ||
1764 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28), | ||
1765 | BPF_LD_ABS(BPF_H, 12), | ||
1766 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26), | ||
1767 | BPF_MOV32_IMM(R0, 18), | ||
1768 | BPF_STX_MEM(BPF_W, R10, R0, -64), | ||
1769 | BPF_LDX_MEM(BPF_W, R7, R10, -64), | ||
1770 | BPF_LD_IND(BPF_W, R7, 14), | ||
1771 | BPF_STX_MEM(BPF_W, R10, R0, -60), | ||
1772 | BPF_MOV32_IMM(R0, 280971478), | ||
1773 | BPF_STX_MEM(BPF_W, R10, R0, -56), | ||
1774 | BPF_LDX_MEM(BPF_W, R7, R10, -56), | ||
1775 | BPF_LDX_MEM(BPF_W, R0, R10, -60), | ||
1776 | BPF_ALU32_REG(BPF_SUB, R0, R7), | ||
1777 | BPF_JMP_IMM(BPF_JNE, R0, 0, 15), | ||
1778 | BPF_LD_ABS(BPF_H, 12), | ||
1779 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13), | ||
1780 | BPF_MOV32_IMM(R0, 22), | ||
1781 | BPF_STX_MEM(BPF_W, R10, R0, -56), | ||
1782 | BPF_LDX_MEM(BPF_W, R7, R10, -56), | ||
1783 | BPF_LD_IND(BPF_H, R7, 14), | ||
1784 | BPF_STX_MEM(BPF_W, R10, R0, -52), | ||
1785 | BPF_MOV32_IMM(R0, 17366), | ||
1786 | BPF_STX_MEM(BPF_W, R10, R0, -48), | ||
1787 | BPF_LDX_MEM(BPF_W, R7, R10, -48), | ||
1788 | BPF_LDX_MEM(BPF_W, R0, R10, -52), | ||
1789 | BPF_ALU32_REG(BPF_SUB, R0, R7), | ||
1790 | BPF_JMP_IMM(BPF_JNE, R0, 0, 2), | ||
1791 | BPF_MOV32_IMM(R0, 256), | ||
1792 | BPF_EXIT_INSN(), | ||
1793 | BPF_MOV32_IMM(R0, 0), | ||
1794 | BPF_EXIT_INSN(), | ||
1795 | }, | ||
1796 | INTERNAL, | ||
1797 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, | ||
1798 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
1799 | 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, | ||
1800 | { { 38, 256 } } | ||
1801 | }, | ||
1759 | }; | 1802 | }; |
1760 | 1803 | ||
1761 | static struct net_device dev; | 1804 | static struct net_device dev; |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 44cb786b925a..f96933a823e3 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -184,6 +184,11 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, | |||
184 | /* Do not flood unicast traffic to ports that turn it off */ | 184 | /* Do not flood unicast traffic to ports that turn it off */ |
185 | if (unicast && !(p->flags & BR_FLOOD)) | 185 | if (unicast && !(p->flags & BR_FLOOD)) |
186 | continue; | 186 | continue; |
187 | |||
188 | /* Do not flood to ports that enable proxy ARP */ | ||
189 | if (p->flags & BR_PROXYARP) | ||
190 | continue; | ||
191 | |||
187 | prev = maybe_deliver(prev, p, skb, __packet_hook); | 192 | prev = maybe_deliver(prev, p, skb, __packet_hook); |
188 | if (IS_ERR(prev)) | 193 | if (IS_ERR(prev)) |
189 | goto out; | 194 | goto out; |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 6fd5522df696..1f1de715197c 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <linux/etherdevice.h> | 17 | #include <linux/etherdevice.h> |
18 | #include <linux/netfilter_bridge.h> | 18 | #include <linux/netfilter_bridge.h> |
19 | #include <linux/neighbour.h> | ||
20 | #include <net/arp.h> | ||
19 | #include <linux/export.h> | 21 | #include <linux/export.h> |
20 | #include <linux/rculist.h> | 22 | #include <linux/rculist.h> |
21 | #include "br_private.h" | 23 | #include "br_private.h" |
@@ -57,6 +59,60 @@ static int br_pass_frame_up(struct sk_buff *skb) | |||
57 | netif_receive_skb); | 59 | netif_receive_skb); |
58 | } | 60 | } |
59 | 61 | ||
62 | static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, | ||
63 | u16 vid) | ||
64 | { | ||
65 | struct net_device *dev = br->dev; | ||
66 | struct neighbour *n; | ||
67 | struct arphdr *parp; | ||
68 | u8 *arpptr, *sha; | ||
69 | __be32 sip, tip; | ||
70 | |||
71 | if (dev->flags & IFF_NOARP) | ||
72 | return; | ||
73 | |||
74 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) { | ||
75 | dev->stats.tx_dropped++; | ||
76 | return; | ||
77 | } | ||
78 | parp = arp_hdr(skb); | ||
79 | |||
80 | if (parp->ar_pro != htons(ETH_P_IP) || | ||
81 | parp->ar_op != htons(ARPOP_REQUEST) || | ||
82 | parp->ar_hln != dev->addr_len || | ||
83 | parp->ar_pln != 4) | ||
84 | return; | ||
85 | |||
86 | arpptr = (u8 *)parp + sizeof(struct arphdr); | ||
87 | sha = arpptr; | ||
88 | arpptr += dev->addr_len; /* sha */ | ||
89 | memcpy(&sip, arpptr, sizeof(sip)); | ||
90 | arpptr += sizeof(sip); | ||
91 | arpptr += dev->addr_len; /* tha */ | ||
92 | memcpy(&tip, arpptr, sizeof(tip)); | ||
93 | |||
94 | if (ipv4_is_loopback(tip) || | ||
95 | ipv4_is_multicast(tip)) | ||
96 | return; | ||
97 | |||
98 | n = neigh_lookup(&arp_tbl, &tip, dev); | ||
99 | if (n) { | ||
100 | struct net_bridge_fdb_entry *f; | ||
101 | |||
102 | if (!(n->nud_state & NUD_VALID)) { | ||
103 | neigh_release(n); | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | f = __br_fdb_get(br, n->ha, vid); | ||
108 | if (f) | ||
109 | arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip, | ||
110 | sha, n->ha, sha); | ||
111 | |||
112 | neigh_release(n); | ||
113 | } | ||
114 | } | ||
115 | |||
60 | /* note: already called with rcu_read_lock */ | 116 | /* note: already called with rcu_read_lock */ |
61 | int br_handle_frame_finish(struct sk_buff *skb) | 117 | int br_handle_frame_finish(struct sk_buff *skb) |
62 | { | 118 | { |
@@ -98,6 +154,10 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
98 | dst = NULL; | 154 | dst = NULL; |
99 | 155 | ||
100 | if (is_broadcast_ether_addr(dest)) { | 156 | if (is_broadcast_ether_addr(dest)) { |
157 | if (p->flags & BR_PROXYARP && | ||
158 | skb->protocol == htons(ETH_P_ARP)) | ||
159 | br_do_proxy_arp(skb, br, vid); | ||
160 | |||
101 | skb2 = skb; | 161 | skb2 = skb; |
102 | unicast = false; | 162 | unicast = false; |
103 | } else if (is_multicast_ether_addr(dest)) { | 163 | } else if (is_multicast_ether_addr(dest)) { |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 2ff9706647f2..86c239b06f6e 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -60,7 +60,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, | |||
60 | nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || | 60 | nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || |
61 | nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || | 61 | nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || |
62 | nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || | 62 | nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || |
63 | nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD))) | 63 | nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) || |
64 | nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP))) | ||
64 | return -EMSGSIZE; | 65 | return -EMSGSIZE; |
65 | 66 | ||
66 | return 0; | 67 | return 0; |
@@ -332,6 +333,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) | |||
332 | br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); | 333 | br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); |
333 | br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); | 334 | br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); |
334 | br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); | 335 | br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); |
336 | br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); | ||
335 | 337 | ||
336 | if (tb[IFLA_BRPORT_COST]) { | 338 | if (tb[IFLA_BRPORT_COST]) { |
337 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); | 339 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 4d783d071305..8f3f08140258 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -172,6 +172,7 @@ struct net_bridge_port | |||
172 | #define BR_FLOOD 0x00000040 | 172 | #define BR_FLOOD 0x00000040 |
173 | #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) | 173 | #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) |
174 | #define BR_PROMISC 0x00000080 | 174 | #define BR_PROMISC 0x00000080 |
175 | #define BR_PROXYARP 0x00000100 | ||
175 | 176 | ||
176 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 177 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
177 | struct bridge_mcast_own_query ip4_own_query; | 178 | struct bridge_mcast_own_query ip4_own_query; |
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index e561cd59b8a6..2de5d91199e8 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c | |||
@@ -170,6 +170,7 @@ BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); | |||
170 | BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); | 170 | BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); |
171 | BRPORT_ATTR_FLAG(learning, BR_LEARNING); | 171 | BRPORT_ATTR_FLAG(learning, BR_LEARNING); |
172 | BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD); | 172 | BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD); |
173 | BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP); | ||
173 | 174 | ||
174 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 175 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
175 | static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) | 176 | static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) |
@@ -213,6 +214,7 @@ static const struct brport_attribute *brport_attrs[] = { | |||
213 | &brport_attr_multicast_router, | 214 | &brport_attr_multicast_router, |
214 | &brport_attr_multicast_fast_leave, | 215 | &brport_attr_multicast_fast_leave, |
215 | #endif | 216 | #endif |
217 | &brport_attr_proxyarp, | ||
216 | NULL | 218 | NULL |
217 | }; | 219 | }; |
218 | 220 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 945bbd001359..ebf778df58cd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4376,7 +4376,8 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
4376 | * __napi_schedule - schedule for receive | 4376 | * __napi_schedule - schedule for receive |
4377 | * @n: entry to schedule | 4377 | * @n: entry to schedule |
4378 | * | 4378 | * |
4379 | * The entry's receive function will be scheduled to run | 4379 | * The entry's receive function will be scheduled to run. |
4380 | * Consider using __napi_schedule_irqoff() if hard irqs are masked. | ||
4380 | */ | 4381 | */ |
4381 | void __napi_schedule(struct napi_struct *n) | 4382 | void __napi_schedule(struct napi_struct *n) |
4382 | { | 4383 | { |
@@ -4388,6 +4389,18 @@ void __napi_schedule(struct napi_struct *n) | |||
4388 | } | 4389 | } |
4389 | EXPORT_SYMBOL(__napi_schedule); | 4390 | EXPORT_SYMBOL(__napi_schedule); |
4390 | 4391 | ||
4392 | /** | ||
4393 | * __napi_schedule_irqoff - schedule for receive | ||
4394 | * @n: entry to schedule | ||
4395 | * | ||
4396 | * Variant of __napi_schedule() assuming hard irqs are masked | ||
4397 | */ | ||
4398 | void __napi_schedule_irqoff(struct napi_struct *n) | ||
4399 | { | ||
4400 | ____napi_schedule(this_cpu_ptr(&softnet_data), n); | ||
4401 | } | ||
4402 | EXPORT_SYMBOL(__napi_schedule_irqoff); | ||
4403 | |||
4391 | void __napi_complete(struct napi_struct *n) | 4404 | void __napi_complete(struct napi_struct *n) |
4392 | { | 4405 | { |
4393 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 4406 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ef31fef25e5a..edd04116ecb7 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -773,7 +773,7 @@ static void neigh_periodic_work(struct work_struct *work) | |||
773 | if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { | 773 | if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { |
774 | struct neigh_parms *p; | 774 | struct neigh_parms *p; |
775 | tbl->last_rand = jiffies; | 775 | tbl->last_rand = jiffies; |
776 | for (p = &tbl->parms; p; p = p->next) | 776 | list_for_each_entry(p, &tbl->parms_list, list) |
777 | p->reachable_time = | 777 | p->reachable_time = |
778 | neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); | 778 | neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); |
779 | } | 779 | } |
@@ -1446,7 +1446,7 @@ static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, | |||
1446 | { | 1446 | { |
1447 | struct neigh_parms *p; | 1447 | struct neigh_parms *p; |
1448 | 1448 | ||
1449 | for (p = &tbl->parms; p; p = p->next) { | 1449 | list_for_each_entry(p, &tbl->parms_list, list) { |
1450 | if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || | 1450 | if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || |
1451 | (!p->dev && !ifindex && net_eq(net, &init_net))) | 1451 | (!p->dev && !ifindex && net_eq(net, &init_net))) |
1452 | return p; | 1452 | return p; |
@@ -1481,8 +1481,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, | |||
1481 | } | 1481 | } |
1482 | 1482 | ||
1483 | write_lock_bh(&tbl->lock); | 1483 | write_lock_bh(&tbl->lock); |
1484 | p->next = tbl->parms.next; | 1484 | list_add(&p->list, &tbl->parms.list); |
1485 | tbl->parms.next = p; | ||
1486 | write_unlock_bh(&tbl->lock); | 1485 | write_unlock_bh(&tbl->lock); |
1487 | 1486 | ||
1488 | neigh_parms_data_state_cleanall(p); | 1487 | neigh_parms_data_state_cleanall(p); |
@@ -1501,24 +1500,15 @@ static void neigh_rcu_free_parms(struct rcu_head *head) | |||
1501 | 1500 | ||
1502 | void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) | 1501 | void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) |
1503 | { | 1502 | { |
1504 | struct neigh_parms **p; | ||
1505 | |||
1506 | if (!parms || parms == &tbl->parms) | 1503 | if (!parms || parms == &tbl->parms) |
1507 | return; | 1504 | return; |
1508 | write_lock_bh(&tbl->lock); | 1505 | write_lock_bh(&tbl->lock); |
1509 | for (p = &tbl->parms.next; *p; p = &(*p)->next) { | 1506 | list_del(&parms->list); |
1510 | if (*p == parms) { | 1507 | parms->dead = 1; |
1511 | *p = parms->next; | ||
1512 | parms->dead = 1; | ||
1513 | write_unlock_bh(&tbl->lock); | ||
1514 | if (parms->dev) | ||
1515 | dev_put(parms->dev); | ||
1516 | call_rcu(&parms->rcu_head, neigh_rcu_free_parms); | ||
1517 | return; | ||
1518 | } | ||
1519 | } | ||
1520 | write_unlock_bh(&tbl->lock); | 1508 | write_unlock_bh(&tbl->lock); |
1521 | neigh_dbg(1, "%s: not found\n", __func__); | 1509 | if (parms->dev) |
1510 | dev_put(parms->dev); | ||
1511 | call_rcu(&parms->rcu_head, neigh_rcu_free_parms); | ||
1522 | } | 1512 | } |
1523 | EXPORT_SYMBOL(neigh_parms_release); | 1513 | EXPORT_SYMBOL(neigh_parms_release); |
1524 | 1514 | ||
@@ -1535,6 +1525,8 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl) | |||
1535 | unsigned long now = jiffies; | 1525 | unsigned long now = jiffies; |
1536 | unsigned long phsize; | 1526 | unsigned long phsize; |
1537 | 1527 | ||
1528 | INIT_LIST_HEAD(&tbl->parms_list); | ||
1529 | list_add(&tbl->parms.list, &tbl->parms_list); | ||
1538 | write_pnet(&tbl->parms.net, &init_net); | 1530 | write_pnet(&tbl->parms.net, &init_net); |
1539 | atomic_set(&tbl->parms.refcnt, 1); | 1531 | atomic_set(&tbl->parms.refcnt, 1); |
1540 | tbl->parms.reachable_time = | 1532 | tbl->parms.reachable_time = |
@@ -2154,7 +2146,9 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
2154 | NLM_F_MULTI) <= 0) | 2146 | NLM_F_MULTI) <= 0) |
2155 | break; | 2147 | break; |
2156 | 2148 | ||
2157 | for (nidx = 0, p = tbl->parms.next; p; p = p->next) { | 2149 | nidx = 0; |
2150 | p = list_next_entry(&tbl->parms, list); | ||
2151 | list_for_each_entry_from(p, &tbl->parms_list, list) { | ||
2158 | if (!net_eq(neigh_parms_net(p), net)) | 2152 | if (!net_eq(neigh_parms_net(p), net)) |
2159 | continue; | 2153 | continue; |
2160 | 2154 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c16615bfb61e..e48e5c02e877 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3099,6 +3099,16 @@ perform_csum_check: | |||
3099 | * (see validate_xmit_skb_list() for example) | 3099 | * (see validate_xmit_skb_list() for example) |
3100 | */ | 3100 | */ |
3101 | segs->prev = tail; | 3101 | segs->prev = tail; |
3102 | |||
3103 | /* Following permits correct backpressure, for protocols | ||
3104 | * using skb_set_owner_w(). | ||
3105 | * Idea is to tranfert ownership from head_skb to last segment. | ||
3106 | */ | ||
3107 | if (head_skb->destructor == sock_wfree) { | ||
3108 | swap(tail->truesize, head_skb->truesize); | ||
3109 | swap(tail->destructor, head_skb->destructor); | ||
3110 | swap(tail->sk, head_skb->sk); | ||
3111 | } | ||
3102 | return segs; | 3112 | return segs; |
3103 | 3113 | ||
3104 | err: | 3114 | err: |
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index a585fd6352eb..5f8ac404535b 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig | |||
@@ -11,6 +11,17 @@ config NET_DSA | |||
11 | 11 | ||
12 | if NET_DSA | 12 | if NET_DSA |
13 | 13 | ||
14 | config NET_DSA_HWMON | ||
15 | bool "Distributed Switch Architecture HWMON support" | ||
16 | default y | ||
17 | depends on HWMON && !(NET_DSA=y && HWMON=m) | ||
18 | ---help--- | ||
19 | Say Y if you want to expose thermal sensor data on switches supported | ||
20 | by the Distributed Switch Architecture. | ||
21 | |||
22 | Some of those switches contain thermal sensors. This data is available | ||
23 | via the hwmon sysfs interface and exposes the onboard sensors. | ||
24 | |||
14 | # tagging formats | 25 | # tagging formats |
15 | config NET_DSA_TAG_BRCM | 26 | config NET_DSA_TAG_BRCM |
16 | bool | 27 | bool |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 6317b41c99b0..dd646a8025cb 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/ctype.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/hwmon.h> | ||
12 | #include <linux/list.h> | 15 | #include <linux/list.h> |
13 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -17,6 +20,7 @@ | |||
17 | #include <linux/of.h> | 20 | #include <linux/of.h> |
18 | #include <linux/of_mdio.h> | 21 | #include <linux/of_mdio.h> |
19 | #include <linux/of_platform.h> | 22 | #include <linux/of_platform.h> |
23 | #include <linux/sysfs.h> | ||
20 | #include "dsa_priv.h" | 24 | #include "dsa_priv.h" |
21 | 25 | ||
22 | char dsa_driver_version[] = "0.1"; | 26 | char dsa_driver_version[] = "0.1"; |
@@ -71,6 +75,104 @@ dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name) | |||
71 | return ret; | 75 | return ret; |
72 | } | 76 | } |
73 | 77 | ||
78 | /* hwmon support ************************************************************/ | ||
79 | |||
80 | #ifdef CONFIG_NET_DSA_HWMON | ||
81 | |||
82 | static ssize_t temp1_input_show(struct device *dev, | ||
83 | struct device_attribute *attr, char *buf) | ||
84 | { | ||
85 | struct dsa_switch *ds = dev_get_drvdata(dev); | ||
86 | int temp, ret; | ||
87 | |||
88 | ret = ds->drv->get_temp(ds, &temp); | ||
89 | if (ret < 0) | ||
90 | return ret; | ||
91 | |||
92 | return sprintf(buf, "%d\n", temp * 1000); | ||
93 | } | ||
94 | static DEVICE_ATTR_RO(temp1_input); | ||
95 | |||
96 | static ssize_t temp1_max_show(struct device *dev, | ||
97 | struct device_attribute *attr, char *buf) | ||
98 | { | ||
99 | struct dsa_switch *ds = dev_get_drvdata(dev); | ||
100 | int temp, ret; | ||
101 | |||
102 | ret = ds->drv->get_temp_limit(ds, &temp); | ||
103 | if (ret < 0) | ||
104 | return ret; | ||
105 | |||
106 | return sprintf(buf, "%d\n", temp * 1000); | ||
107 | } | ||
108 | |||
109 | static ssize_t temp1_max_store(struct device *dev, | ||
110 | struct device_attribute *attr, const char *buf, | ||
111 | size_t count) | ||
112 | { | ||
113 | struct dsa_switch *ds = dev_get_drvdata(dev); | ||
114 | int temp, ret; | ||
115 | |||
116 | ret = kstrtoint(buf, 0, &temp); | ||
117 | if (ret < 0) | ||
118 | return ret; | ||
119 | |||
120 | ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000)); | ||
121 | if (ret < 0) | ||
122 | return ret; | ||
123 | |||
124 | return count; | ||
125 | } | ||
126 | static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store); | ||
127 | |||
128 | static ssize_t temp1_max_alarm_show(struct device *dev, | ||
129 | struct device_attribute *attr, char *buf) | ||
130 | { | ||
131 | struct dsa_switch *ds = dev_get_drvdata(dev); | ||
132 | bool alarm; | ||
133 | int ret; | ||
134 | |||
135 | ret = ds->drv->get_temp_alarm(ds, &alarm); | ||
136 | if (ret < 0) | ||
137 | return ret; | ||
138 | |||
139 | return sprintf(buf, "%d\n", alarm); | ||
140 | } | ||
141 | static DEVICE_ATTR_RO(temp1_max_alarm); | ||
142 | |||
143 | static struct attribute *dsa_hwmon_attrs[] = { | ||
144 | &dev_attr_temp1_input.attr, /* 0 */ | ||
145 | &dev_attr_temp1_max.attr, /* 1 */ | ||
146 | &dev_attr_temp1_max_alarm.attr, /* 2 */ | ||
147 | NULL | ||
148 | }; | ||
149 | |||
150 | static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, | ||
151 | struct attribute *attr, int index) | ||
152 | { | ||
153 | struct device *dev = container_of(kobj, struct device, kobj); | ||
154 | struct dsa_switch *ds = dev_get_drvdata(dev); | ||
155 | struct dsa_switch_driver *drv = ds->drv; | ||
156 | umode_t mode = attr->mode; | ||
157 | |||
158 | if (index == 1) { | ||
159 | if (!drv->get_temp_limit) | ||
160 | mode = 0; | ||
161 | else if (drv->set_temp_limit) | ||
162 | mode |= S_IWUSR; | ||
163 | } else if (index == 2 && !drv->get_temp_alarm) { | ||
164 | mode = 0; | ||
165 | } | ||
166 | return mode; | ||
167 | } | ||
168 | |||
169 | static const struct attribute_group dsa_hwmon_group = { | ||
170 | .attrs = dsa_hwmon_attrs, | ||
171 | .is_visible = dsa_hwmon_attrs_visible, | ||
172 | }; | ||
173 | __ATTRIBUTE_GROUPS(dsa_hwmon); | ||
174 | |||
175 | #endif /* CONFIG_NET_DSA_HWMON */ | ||
74 | 176 | ||
75 | /* basic switch operations **************************************************/ | 177 | /* basic switch operations **************************************************/ |
76 | static struct dsa_switch * | 178 | static struct dsa_switch * |
@@ -228,6 +330,31 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, | |||
228 | ds->ports[i] = slave_dev; | 330 | ds->ports[i] = slave_dev; |
229 | } | 331 | } |
230 | 332 | ||
333 | #ifdef CONFIG_NET_DSA_HWMON | ||
334 | /* If the switch provides a temperature sensor, | ||
335 | * register with hardware monitoring subsystem. | ||
336 | * Treat registration error as non-fatal and ignore it. | ||
337 | */ | ||
338 | if (drv->get_temp) { | ||
339 | const char *netname = netdev_name(dst->master_netdev); | ||
340 | char hname[IFNAMSIZ + 1]; | ||
341 | int i, j; | ||
342 | |||
343 | /* Create valid hwmon 'name' attribute */ | ||
344 | for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) { | ||
345 | if (isalnum(netname[i])) | ||
346 | hname[j++] = netname[i]; | ||
347 | } | ||
348 | hname[j] = '\0'; | ||
349 | scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d", | ||
350 | hname, index); | ||
351 | ds->hwmon_dev = hwmon_device_register_with_groups(NULL, | ||
352 | ds->hwmon_name, ds, dsa_hwmon_groups); | ||
353 | if (IS_ERR(ds->hwmon_dev)) | ||
354 | ds->hwmon_dev = NULL; | ||
355 | } | ||
356 | #endif /* CONFIG_NET_DSA_HWMON */ | ||
357 | |||
231 | return ds; | 358 | return ds; |
232 | 359 | ||
233 | out_free: | 360 | out_free: |
@@ -239,6 +366,10 @@ out: | |||
239 | 366 | ||
240 | static void dsa_switch_destroy(struct dsa_switch *ds) | 367 | static void dsa_switch_destroy(struct dsa_switch *ds) |
241 | { | 368 | { |
369 | #ifdef CONFIG_NET_DSA_HWMON | ||
370 | if (ds->hwmon_dev) | ||
371 | hwmon_device_unregister(ds->hwmon_dev); | ||
372 | #endif | ||
242 | } | 373 | } |
243 | 374 | ||
244 | #ifdef CONFIG_PM_SLEEP | 375 | #ifdef CONFIG_PM_SLEEP |
@@ -447,6 +578,7 @@ static int dsa_of_probe(struct platform_device *pdev) | |||
447 | const char *port_name; | 578 | const char *port_name; |
448 | int chip_index, port_index; | 579 | int chip_index, port_index; |
449 | const unsigned int *sw_addr, *port_reg; | 580 | const unsigned int *sw_addr, *port_reg; |
581 | u32 eeprom_len; | ||
450 | int ret; | 582 | int ret; |
451 | 583 | ||
452 | mdio = of_parse_phandle(np, "dsa,mii-bus", 0); | 584 | mdio = of_parse_phandle(np, "dsa,mii-bus", 0); |
@@ -498,6 +630,9 @@ static int dsa_of_probe(struct platform_device *pdev) | |||
498 | if (cd->sw_addr > PHY_MAX_ADDR) | 630 | if (cd->sw_addr > PHY_MAX_ADDR) |
499 | continue; | 631 | continue; |
500 | 632 | ||
633 | if (!of_property_read_u32(np, "eeprom-length", &eeprom_len)) | ||
634 | cd->eeprom_len = eeprom_len; | ||
635 | |||
501 | for_each_available_child_of_node(child, port) { | 636 | for_each_available_child_of_node(child, port) { |
502 | port_reg = of_get_property(port, "reg", NULL); | 637 | port_reg = of_get_property(port, "reg", NULL); |
503 | if (!port_reg) | 638 | if (!port_reg) |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6d1817449c36..474f2962590a 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -249,6 +249,27 @@ static void dsa_slave_get_drvinfo(struct net_device *dev, | |||
249 | strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); | 249 | strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); |
250 | } | 250 | } |
251 | 251 | ||
252 | static int dsa_slave_get_regs_len(struct net_device *dev) | ||
253 | { | ||
254 | struct dsa_slave_priv *p = netdev_priv(dev); | ||
255 | struct dsa_switch *ds = p->parent; | ||
256 | |||
257 | if (ds->drv->get_regs_len) | ||
258 | return ds->drv->get_regs_len(ds, p->port); | ||
259 | |||
260 | return -EOPNOTSUPP; | ||
261 | } | ||
262 | |||
263 | static void | ||
264 | dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) | ||
265 | { | ||
266 | struct dsa_slave_priv *p = netdev_priv(dev); | ||
267 | struct dsa_switch *ds = p->parent; | ||
268 | |||
269 | if (ds->drv->get_regs) | ||
270 | ds->drv->get_regs(ds, p->port, regs, _p); | ||
271 | } | ||
272 | |||
252 | static int dsa_slave_nway_reset(struct net_device *dev) | 273 | static int dsa_slave_nway_reset(struct net_device *dev) |
253 | { | 274 | { |
254 | struct dsa_slave_priv *p = netdev_priv(dev); | 275 | struct dsa_slave_priv *p = netdev_priv(dev); |
@@ -271,6 +292,44 @@ static u32 dsa_slave_get_link(struct net_device *dev) | |||
271 | return -EOPNOTSUPP; | 292 | return -EOPNOTSUPP; |
272 | } | 293 | } |
273 | 294 | ||
295 | static int dsa_slave_get_eeprom_len(struct net_device *dev) | ||
296 | { | ||
297 | struct dsa_slave_priv *p = netdev_priv(dev); | ||
298 | struct dsa_switch *ds = p->parent; | ||
299 | |||
300 | if (ds->pd->eeprom_len) | ||
301 | return ds->pd->eeprom_len; | ||
302 | |||
303 | if (ds->drv->get_eeprom_len) | ||
304 | return ds->drv->get_eeprom_len(ds); | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int dsa_slave_get_eeprom(struct net_device *dev, | ||
310 | struct ethtool_eeprom *eeprom, u8 *data) | ||
311 | { | ||
312 | struct dsa_slave_priv *p = netdev_priv(dev); | ||
313 | struct dsa_switch *ds = p->parent; | ||
314 | |||
315 | if (ds->drv->get_eeprom) | ||
316 | return ds->drv->get_eeprom(ds, eeprom, data); | ||
317 | |||
318 | return -EOPNOTSUPP; | ||
319 | } | ||
320 | |||
321 | static int dsa_slave_set_eeprom(struct net_device *dev, | ||
322 | struct ethtool_eeprom *eeprom, u8 *data) | ||
323 | { | ||
324 | struct dsa_slave_priv *p = netdev_priv(dev); | ||
325 | struct dsa_switch *ds = p->parent; | ||
326 | |||
327 | if (ds->drv->set_eeprom) | ||
328 | return ds->drv->set_eeprom(ds, eeprom, data); | ||
329 | |||
330 | return -EOPNOTSUPP; | ||
331 | } | ||
332 | |||
274 | static void dsa_slave_get_strings(struct net_device *dev, | 333 | static void dsa_slave_get_strings(struct net_device *dev, |
275 | uint32_t stringset, uint8_t *data) | 334 | uint32_t stringset, uint8_t *data) |
276 | { | 335 | { |
@@ -385,8 +444,13 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { | |||
385 | .get_settings = dsa_slave_get_settings, | 444 | .get_settings = dsa_slave_get_settings, |
386 | .set_settings = dsa_slave_set_settings, | 445 | .set_settings = dsa_slave_set_settings, |
387 | .get_drvinfo = dsa_slave_get_drvinfo, | 446 | .get_drvinfo = dsa_slave_get_drvinfo, |
447 | .get_regs_len = dsa_slave_get_regs_len, | ||
448 | .get_regs = dsa_slave_get_regs, | ||
388 | .nway_reset = dsa_slave_nway_reset, | 449 | .nway_reset = dsa_slave_nway_reset, |
389 | .get_link = dsa_slave_get_link, | 450 | .get_link = dsa_slave_get_link, |
451 | .get_eeprom_len = dsa_slave_get_eeprom_len, | ||
452 | .get_eeprom = dsa_slave_get_eeprom, | ||
453 | .set_eeprom = dsa_slave_set_eeprom, | ||
390 | .get_strings = dsa_slave_get_strings, | 454 | .get_strings = dsa_slave_get_strings, |
391 | .get_ethtool_stats = dsa_slave_get_ethtool_stats, | 455 | .get_ethtool_stats = dsa_slave_get_ethtool_stats, |
392 | .get_sset_count = dsa_slave_get_sset_count, | 456 | .get_sset_count = dsa_slave_get_sset_count, |
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index ce90c8bdc658..2dab27063273 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c | |||
@@ -63,8 +63,6 @@ static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) | |||
63 | dsa_header[3] = 0x00; | 63 | dsa_header[3] = 0x00; |
64 | } | 64 | } |
65 | 65 | ||
66 | skb->protocol = htons(ETH_P_DSA); | ||
67 | |||
68 | skb->dev = p->parent->dst->master_netdev; | 66 | skb->dev = p->parent->dst->master_netdev; |
69 | dev_queue_xmit(skb); | 67 | dev_queue_xmit(skb); |
70 | 68 | ||
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 94fcce778679..9aeda596f7ec 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c | |||
@@ -76,8 +76,6 @@ static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) | |||
76 | edsa_header[7] = 0x00; | 76 | edsa_header[7] = 0x00; |
77 | } | 77 | } |
78 | 78 | ||
79 | skb->protocol = htons(ETH_P_EDSA); | ||
80 | |||
81 | skb->dev = p->parent->dst->master_netdev; | 79 | skb->dev = p->parent->dst->master_netdev; |
82 | dev_queue_xmit(skb); | 80 | dev_queue_xmit(skb); |
83 | 81 | ||
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 115fdca34077..e268f9db8893 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
@@ -57,8 +57,6 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) | |||
57 | trailer[2] = 0x10; | 57 | trailer[2] = 0x10; |
58 | trailer[3] = 0x00; | 58 | trailer[3] = 0x00; |
59 | 59 | ||
60 | nskb->protocol = htons(ETH_P_TRAILER); | ||
61 | |||
62 | nskb->dev = p->parent->dst->master_netdev; | 60 | nskb->dev = p->parent->dst->master_netdev; |
63 | dev_queue_xmit(nskb); | 61 | dev_queue_xmit(nskb); |
64 | 62 | ||
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 065cd94c640c..91861fe77ed1 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c | |||
@@ -104,7 +104,7 @@ static void geneve_build_header(struct genevehdr *geneveh, | |||
104 | memcpy(geneveh->options, options, options_len); | 104 | memcpy(geneveh->options, options, options_len); |
105 | } | 105 | } |
106 | 106 | ||
107 | /* Transmit a fully formated Geneve frame. | 107 | /* Transmit a fully formatted Geneve frame. |
108 | * | 108 | * |
109 | * When calling this function. The skb->data should point | 109 | * When calling this function. The skb->data should point |
110 | * to the geneve header which is fully formed. | 110 | * to the geneve header which is fully formed. |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 648fa1490ea7..a896da50f398 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -498,7 +498,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
498 | struct arphdr *rarp; | 498 | struct arphdr *rarp; |
499 | unsigned char *rarp_ptr; | 499 | unsigned char *rarp_ptr; |
500 | __be32 sip, tip; | 500 | __be32 sip, tip; |
501 | unsigned char *sha, *tha; /* s for "source", t for "target" */ | 501 | unsigned char *tha; /* t for "target" */ |
502 | struct ic_device *d; | 502 | struct ic_device *d; |
503 | 503 | ||
504 | if (!net_eq(dev_net(dev), &init_net)) | 504 | if (!net_eq(dev_net(dev), &init_net)) |
@@ -549,7 +549,6 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
549 | goto drop_unlock; /* should never happen */ | 549 | goto drop_unlock; /* should never happen */ |
550 | 550 | ||
551 | /* Extract variable-width fields */ | 551 | /* Extract variable-width fields */ |
552 | sha = rarp_ptr; | ||
553 | rarp_ptr += dev->addr_len; | 552 | rarp_ptr += dev->addr_len; |
554 | memcpy(&sip, rarp_ptr, 4); | 553 | memcpy(&sip, rarp_ptr, 4); |
555 | rarp_ptr += 4; | 554 | rarp_ptr += 4; |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 32b98d0207b4..4ac7bcaf2f46 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -275,8 +275,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) | |||
275 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) | 275 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) |
276 | goto out; | 276 | goto out; |
277 | 277 | ||
278 | if (tcp_synq_no_recent_overflow(sk) || | 278 | if (tcp_synq_no_recent_overflow(sk)) |
279 | (mss = __cookie_v4_check(ip_hdr(skb), th, cookie)) == 0) { | 279 | goto out; |
280 | |||
281 | mss = __cookie_v4_check(ip_hdr(skb), th, cookie); | ||
282 | if (mss == 0) { | ||
280 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); | 283 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); |
281 | goto out; | 284 | goto out; |
282 | } | 285 | } |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index b3c53c8b331e..e0ee384a448f 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -496,6 +496,13 @@ static struct ctl_table ipv4_table[] = { | |||
496 | .proc_handler = proc_dointvec | 496 | .proc_handler = proc_dointvec |
497 | }, | 497 | }, |
498 | { | 498 | { |
499 | .procname = "tcp_max_reordering", | ||
500 | .data = &sysctl_tcp_max_reordering, | ||
501 | .maxlen = sizeof(int), | ||
502 | .mode = 0644, | ||
503 | .proc_handler = proc_dointvec | ||
504 | }, | ||
505 | { | ||
499 | .procname = "tcp_dsack", | 506 | .procname = "tcp_dsack", |
500 | .data = &sysctl_tcp_dsack, | 507 | .data = &sysctl_tcp_dsack, |
501 | .maxlen = sizeof(int), | 508 | .maxlen = sizeof(int), |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a12b455928e5..4e4617e90417 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -81,6 +81,7 @@ int sysctl_tcp_window_scaling __read_mostly = 1; | |||
81 | int sysctl_tcp_sack __read_mostly = 1; | 81 | int sysctl_tcp_sack __read_mostly = 1; |
82 | int sysctl_tcp_fack __read_mostly = 1; | 82 | int sysctl_tcp_fack __read_mostly = 1; |
83 | int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; | 83 | int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; |
84 | int sysctl_tcp_max_reordering __read_mostly = 300; | ||
84 | EXPORT_SYMBOL(sysctl_tcp_reordering); | 85 | EXPORT_SYMBOL(sysctl_tcp_reordering); |
85 | int sysctl_tcp_dsack __read_mostly = 1; | 86 | int sysctl_tcp_dsack __read_mostly = 1; |
86 | int sysctl_tcp_app_win __read_mostly = 31; | 87 | int sysctl_tcp_app_win __read_mostly = 31; |
@@ -833,7 +834,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
833 | if (metric > tp->reordering) { | 834 | if (metric > tp->reordering) { |
834 | int mib_idx; | 835 | int mib_idx; |
835 | 836 | ||
836 | tp->reordering = min(TCP_MAX_REORDERING, metric); | 837 | tp->reordering = min(sysctl_tcp_max_reordering, metric); |
837 | 838 | ||
838 | /* This exciting event is worth to be remembered. 8) */ | 839 | /* This exciting event is worth to be remembered. 8) */ |
839 | if (ts) | 840 | if (ts) |
@@ -5028,7 +5029,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | |||
5028 | /* step 3: check security and precedence [ignored] */ | 5029 | /* step 3: check security and precedence [ignored] */ |
5029 | 5030 | ||
5030 | /* step 4: Check for a SYN | 5031 | /* step 4: Check for a SYN |
5031 | * RFC 5691 4.2 : Send a challenge ack | 5032 | * RFC 5961 4.2 : Send a challenge ack |
5032 | */ | 5033 | */ |
5033 | if (th->syn) { | 5034 | if (th->syn) { |
5034 | syn_challenge: | 5035 | syn_challenge: |
@@ -5865,7 +5866,7 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) | |||
5865 | * If we receive a SYN packet with these bits set, it means a | 5866 | * If we receive a SYN packet with these bits set, it means a |
5866 | * network is playing bad games with TOS bits. In order to | 5867 | * network is playing bad games with TOS bits. In order to |
5867 | * avoid possible false congestion notifications, we disable | 5868 | * avoid possible false congestion notifications, we disable |
5868 | * TCP ECN negociation. | 5869 | * TCP ECN negotiation. |
5869 | * | 5870 | * |
5870 | * Exception: tcp_ca wants ECN. This is required for DCTCP | 5871 | * Exception: tcp_ca wants ECN. This is required for DCTCP |
5871 | * congestion control; it requires setting ECT on all packets, | 5872 | * congestion control; it requires setting ECT on all packets, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0169ccf5aa4f..06e897832a7a 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1170,6 +1170,9 @@ enum { | |||
1170 | IPV6_SADDR_RULE_PRIVACY, | 1170 | IPV6_SADDR_RULE_PRIVACY, |
1171 | IPV6_SADDR_RULE_ORCHID, | 1171 | IPV6_SADDR_RULE_ORCHID, |
1172 | IPV6_SADDR_RULE_PREFIX, | 1172 | IPV6_SADDR_RULE_PREFIX, |
1173 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | ||
1174 | IPV6_SADDR_RULE_NOT_OPTIMISTIC, | ||
1175 | #endif | ||
1173 | IPV6_SADDR_RULE_MAX | 1176 | IPV6_SADDR_RULE_MAX |
1174 | }; | 1177 | }; |
1175 | 1178 | ||
@@ -1197,6 +1200,15 @@ static inline int ipv6_saddr_preferred(int type) | |||
1197 | return 0; | 1200 | return 0; |
1198 | } | 1201 | } |
1199 | 1202 | ||
1203 | static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev) | ||
1204 | { | ||
1205 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | ||
1206 | return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic; | ||
1207 | #else | ||
1208 | return false; | ||
1209 | #endif | ||
1210 | } | ||
1211 | |||
1200 | static int ipv6_get_saddr_eval(struct net *net, | 1212 | static int ipv6_get_saddr_eval(struct net *net, |
1201 | struct ipv6_saddr_score *score, | 1213 | struct ipv6_saddr_score *score, |
1202 | struct ipv6_saddr_dst *dst, | 1214 | struct ipv6_saddr_dst *dst, |
@@ -1257,10 +1269,16 @@ static int ipv6_get_saddr_eval(struct net *net, | |||
1257 | score->scopedist = ret; | 1269 | score->scopedist = ret; |
1258 | break; | 1270 | break; |
1259 | case IPV6_SADDR_RULE_PREFERRED: | 1271 | case IPV6_SADDR_RULE_PREFERRED: |
1272 | { | ||
1260 | /* Rule 3: Avoid deprecated and optimistic addresses */ | 1273 | /* Rule 3: Avoid deprecated and optimistic addresses */ |
1274 | u8 avoid = IFA_F_DEPRECATED; | ||
1275 | |||
1276 | if (!ipv6_use_optimistic_addr(score->ifa->idev)) | ||
1277 | avoid |= IFA_F_OPTIMISTIC; | ||
1261 | ret = ipv6_saddr_preferred(score->addr_type) || | 1278 | ret = ipv6_saddr_preferred(score->addr_type) || |
1262 | !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)); | 1279 | !(score->ifa->flags & avoid); |
1263 | break; | 1280 | break; |
1281 | } | ||
1264 | #ifdef CONFIG_IPV6_MIP6 | 1282 | #ifdef CONFIG_IPV6_MIP6 |
1265 | case IPV6_SADDR_RULE_HOA: | 1283 | case IPV6_SADDR_RULE_HOA: |
1266 | { | 1284 | { |
@@ -1306,6 +1324,14 @@ static int ipv6_get_saddr_eval(struct net *net, | |||
1306 | ret = score->ifa->prefix_len; | 1324 | ret = score->ifa->prefix_len; |
1307 | score->matchlen = ret; | 1325 | score->matchlen = ret; |
1308 | break; | 1326 | break; |
1327 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | ||
1328 | case IPV6_SADDR_RULE_NOT_OPTIMISTIC: | ||
1329 | /* Optimistic addresses still have lower precedence than other | ||
1330 | * preferred addresses. | ||
1331 | */ | ||
1332 | ret = !(score->ifa->flags & IFA_F_OPTIMISTIC); | ||
1333 | break; | ||
1334 | #endif | ||
1309 | default: | 1335 | default: |
1310 | ret = 0; | 1336 | ret = 0; |
1311 | } | 1337 | } |
@@ -2315,8 +2341,8 @@ ok: | |||
2315 | else | 2341 | else |
2316 | stored_lft = 0; | 2342 | stored_lft = 0; |
2317 | if (!update_lft && !create && stored_lft) { | 2343 | if (!update_lft && !create && stored_lft) { |
2318 | const u32 minimum_lft = min( | 2344 | const u32 minimum_lft = min_t(u32, |
2319 | stored_lft, (u32)MIN_VALID_LIFETIME); | 2345 | stored_lft, MIN_VALID_LIFETIME); |
2320 | valid_lft = max(valid_lft, minimum_lft); | 2346 | valid_lft = max(valid_lft, minimum_lft); |
2321 | 2347 | ||
2322 | /* RFC4862 Section 5.5.3e: | 2348 | /* RFC4862 Section 5.5.3e: |
@@ -3222,8 +3248,15 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) | |||
3222 | * Optimistic nodes can start receiving | 3248 | * Optimistic nodes can start receiving |
3223 | * Frames right away | 3249 | * Frames right away |
3224 | */ | 3250 | */ |
3225 | if (ifp->flags & IFA_F_OPTIMISTIC) | 3251 | if (ifp->flags & IFA_F_OPTIMISTIC) { |
3226 | ip6_ins_rt(ifp->rt); | 3252 | ip6_ins_rt(ifp->rt); |
3253 | if (ipv6_use_optimistic_addr(idev)) { | ||
3254 | /* Because optimistic nodes can use this address, | ||
3255 | * notify listeners. If DAD fails, RTM_DELADDR is sent. | ||
3256 | */ | ||
3257 | ipv6_ifa_notify(RTM_NEWADDR, ifp); | ||
3258 | } | ||
3259 | } | ||
3227 | 3260 | ||
3228 | addrconf_dad_kick(ifp); | 3261 | addrconf_dad_kick(ifp); |
3229 | out: | 3262 | out: |
@@ -4330,6 +4363,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, | |||
4330 | array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route; | 4363 | array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route; |
4331 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 4364 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
4332 | array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad; | 4365 | array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad; |
4366 | array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic; | ||
4333 | #endif | 4367 | #endif |
4334 | #ifdef CONFIG_IPV6_MROUTE | 4368 | #ifdef CONFIG_IPV6_MROUTE |
4335 | array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; | 4369 | array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; |
@@ -5156,6 +5190,14 @@ static struct addrconf_sysctl_table | |||
5156 | .proc_handler = proc_dointvec, | 5190 | .proc_handler = proc_dointvec, |
5157 | 5191 | ||
5158 | }, | 5192 | }, |
5193 | { | ||
5194 | .procname = "use_optimistic", | ||
5195 | .data = &ipv6_devconf.use_optimistic, | ||
5196 | .maxlen = sizeof(int), | ||
5197 | .mode = 0644, | ||
5198 | .proc_handler = proc_dointvec, | ||
5199 | |||
5200 | }, | ||
5159 | #endif | 5201 | #endif |
5160 | #ifdef CONFIG_IPV6_MROUTE | 5202 | #ifdef CONFIG_IPV6_MROUTE |
5161 | { | 5203 | { |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index bfde361b6134..601d896f22d0 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #include <net/xfrm.h> | 47 | #include <net/xfrm.h> |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #include <asm/uaccess.h> | 50 | #include <linux/uaccess.h> |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * Parsing tlv encoded headers. | 53 | * Parsing tlv encoded headers. |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 97ae70077a4f..62c1037d9e83 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -1009,4 +1009,3 @@ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) | |||
1009 | return table; | 1009 | return table; |
1010 | } | 1010 | } |
1011 | #endif | 1011 | #endif |
1012 | |||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9409887fb664..8c97cd1048c2 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -477,6 +477,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
477 | int rel_msg = 0; | 477 | int rel_msg = 0; |
478 | u8 rel_type = ICMPV6_DEST_UNREACH; | 478 | u8 rel_type = ICMPV6_DEST_UNREACH; |
479 | u8 rel_code = ICMPV6_ADDR_UNREACH; | 479 | u8 rel_code = ICMPV6_ADDR_UNREACH; |
480 | u8 tproto; | ||
480 | __u32 rel_info = 0; | 481 | __u32 rel_info = 0; |
481 | __u16 len; | 482 | __u16 len; |
482 | int err = -ENOENT; | 483 | int err = -ENOENT; |
@@ -490,7 +491,8 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
490 | &ipv6h->saddr)) == NULL) | 491 | &ipv6h->saddr)) == NULL) |
491 | goto out; | 492 | goto out; |
492 | 493 | ||
493 | if (t->parms.proto != ipproto && t->parms.proto != 0) | 494 | tproto = ACCESS_ONCE(t->parms.proto); |
495 | if (tproto != ipproto && tproto != 0) | ||
494 | goto out; | 496 | goto out; |
495 | 497 | ||
496 | err = 0; | 498 | err = 0; |
@@ -791,6 +793,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
791 | { | 793 | { |
792 | struct ip6_tnl *t; | 794 | struct ip6_tnl *t; |
793 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 795 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
796 | u8 tproto; | ||
794 | int err; | 797 | int err; |
795 | 798 | ||
796 | rcu_read_lock(); | 799 | rcu_read_lock(); |
@@ -799,7 +802,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
799 | &ipv6h->daddr)) != NULL) { | 802 | &ipv6h->daddr)) != NULL) { |
800 | struct pcpu_sw_netstats *tstats; | 803 | struct pcpu_sw_netstats *tstats; |
801 | 804 | ||
802 | if (t->parms.proto != ipproto && t->parms.proto != 0) { | 805 | tproto = ACCESS_ONCE(t->parms.proto); |
806 | if (tproto != ipproto && tproto != 0) { | ||
803 | rcu_read_unlock(); | 807 | rcu_read_unlock(); |
804 | goto discard; | 808 | goto discard; |
805 | } | 809 | } |
@@ -1078,9 +1082,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1078 | struct flowi6 fl6; | 1082 | struct flowi6 fl6; |
1079 | __u8 dsfield; | 1083 | __u8 dsfield; |
1080 | __u32 mtu; | 1084 | __u32 mtu; |
1085 | u8 tproto; | ||
1081 | int err; | 1086 | int err; |
1082 | 1087 | ||
1083 | if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) || | 1088 | tproto = ACCESS_ONCE(t->parms.proto); |
1089 | if ((tproto != IPPROTO_IPIP && tproto != 0) || | ||
1084 | !ip6_tnl_xmit_ctl(t)) | 1090 | !ip6_tnl_xmit_ctl(t)) |
1085 | return -1; | 1091 | return -1; |
1086 | 1092 | ||
@@ -1120,9 +1126,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1120 | struct flowi6 fl6; | 1126 | struct flowi6 fl6; |
1121 | __u8 dsfield; | 1127 | __u8 dsfield; |
1122 | __u32 mtu; | 1128 | __u32 mtu; |
1129 | u8 tproto; | ||
1123 | int err; | 1130 | int err; |
1124 | 1131 | ||
1125 | if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || | 1132 | tproto = ACCESS_ONCE(t->parms.proto); |
1133 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || | ||
1126 | !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) | 1134 | !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) |
1127 | return -1; | 1135 | return -1; |
1128 | 1136 | ||
@@ -1285,6 +1293,14 @@ static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) | |||
1285 | return err; | 1293 | return err; |
1286 | } | 1294 | } |
1287 | 1295 | ||
1296 | static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) | ||
1297 | { | ||
1298 | /* for default tnl0 device allow to change only the proto */ | ||
1299 | t->parms.proto = p->proto; | ||
1300 | netdev_state_change(t->dev); | ||
1301 | return 0; | ||
1302 | } | ||
1303 | |||
1288 | static void | 1304 | static void |
1289 | ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) | 1305 | ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) |
1290 | { | 1306 | { |
@@ -1384,7 +1400,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1384 | break; | 1400 | break; |
1385 | ip6_tnl_parm_from_user(&p1, &p); | 1401 | ip6_tnl_parm_from_user(&p1, &p); |
1386 | t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); | 1402 | t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); |
1387 | if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { | 1403 | if (cmd == SIOCCHGTUNNEL) { |
1388 | if (t != NULL) { | 1404 | if (t != NULL) { |
1389 | if (t->dev != dev) { | 1405 | if (t->dev != dev) { |
1390 | err = -EEXIST; | 1406 | err = -EEXIST; |
@@ -1392,8 +1408,10 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1392 | } | 1408 | } |
1393 | } else | 1409 | } else |
1394 | t = netdev_priv(dev); | 1410 | t = netdev_priv(dev); |
1395 | 1411 | if (dev == ip6n->fb_tnl_dev) | |
1396 | err = ip6_tnl_update(t, &p1); | 1412 | err = ip6_tnl0_update(t, &p1); |
1413 | else | ||
1414 | err = ip6_tnl_update(t, &p1); | ||
1397 | } | 1415 | } |
1398 | if (t) { | 1416 | if (t) { |
1399 | err = 0; | 1417 | err = 0; |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0171f08325c3..467f310dbbb3 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -2090,7 +2090,7 @@ static void ip6_mr_forward(struct net *net, struct mr6_table *mrt, | |||
2090 | if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) { | 2090 | if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) { |
2091 | struct mfc6_cache *cache_proxy; | 2091 | struct mfc6_cache *cache_proxy; |
2092 | 2092 | ||
2093 | /* For an (*,G) entry, we only check that the incomming | 2093 | /* For an (*,G) entry, we only check that the incoming |
2094 | * interface is part of the static tree. | 2094 | * interface is part of the static tree. |
2095 | */ | 2095 | */ |
2096 | cache_proxy = ip6mr_cache_find_any_parent(mrt, vif); | 2096 | cache_proxy = ip6mr_cache_find_any_parent(mrt, vif); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 896af8807979..075a0fb400e7 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -548,7 +548,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, | |||
548 | if (!rp->checksum) | 548 | if (!rp->checksum) |
549 | goto send; | 549 | goto send; |
550 | 550 | ||
551 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) | 551 | skb = skb_peek(&sk->sk_write_queue); |
552 | if (!skb) | ||
552 | goto out; | 553 | goto out; |
553 | 554 | ||
554 | offset = rp->offset; | 555 | offset = rp->offset; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 1a157ca2ebc1..51ab096ae574 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -69,7 +69,7 @@ struct ip6frag_skb_cb { | |||
69 | 69 | ||
70 | #define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb)) | 70 | #define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb)) |
71 | 71 | ||
72 | static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) | 72 | static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) |
73 | { | 73 | { |
74 | return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); | 74 | return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); |
75 | } | 75 | } |
@@ -178,7 +178,7 @@ static void ip6_frag_expire(unsigned long data) | |||
178 | ip6_expire_frag_queue(net, fq, &ip6_frags); | 178 | ip6_expire_frag_queue(net, fq, &ip6_frags); |
179 | } | 179 | } |
180 | 180 | ||
181 | static __inline__ struct frag_queue * | 181 | static struct frag_queue * |
182 | fq_find(struct net *net, __be32 id, const struct in6_addr *src, | 182 | fq_find(struct net *net, __be32 id, const struct in6_addr *src, |
183 | const struct in6_addr *dst, u8 ecn) | 183 | const struct in6_addr *dst, u8 ecn) |
184 | { | 184 | { |
@@ -684,21 +684,21 @@ static void ip6_frags_sysctl_unregister(void) | |||
684 | unregister_net_sysctl_table(ip6_ctl_header); | 684 | unregister_net_sysctl_table(ip6_ctl_header); |
685 | } | 685 | } |
686 | #else | 686 | #else |
687 | static inline int ip6_frags_ns_sysctl_register(struct net *net) | 687 | static int ip6_frags_ns_sysctl_register(struct net *net) |
688 | { | 688 | { |
689 | return 0; | 689 | return 0; |
690 | } | 690 | } |
691 | 691 | ||
692 | static inline void ip6_frags_ns_sysctl_unregister(struct net *net) | 692 | static void ip6_frags_ns_sysctl_unregister(struct net *net) |
693 | { | 693 | { |
694 | } | 694 | } |
695 | 695 | ||
696 | static inline int ip6_frags_sysctl_register(void) | 696 | static int ip6_frags_sysctl_register(void) |
697 | { | 697 | { |
698 | return 0; | 698 | return 0; |
699 | } | 699 | } |
700 | 700 | ||
701 | static inline void ip6_frags_sysctl_unregister(void) | 701 | static void ip6_frags_sysctl_unregister(void) |
702 | { | 702 | { |
703 | } | 703 | } |
704 | #endif | 704 | #endif |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a318dd89b6d9..c91083156edb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -772,23 +772,22 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
772 | } | 772 | } |
773 | #endif | 773 | #endif |
774 | 774 | ||
775 | #define BACKTRACK(__net, saddr) \ | 775 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, |
776 | do { \ | 776 | struct in6_addr *saddr) |
777 | if (rt == __net->ipv6.ip6_null_entry) { \ | 777 | { |
778 | struct fib6_node *pn; \ | 778 | struct fib6_node *pn; |
779 | while (1) { \ | 779 | while (1) { |
780 | if (fn->fn_flags & RTN_TL_ROOT) \ | 780 | if (fn->fn_flags & RTN_TL_ROOT) |
781 | goto out; \ | 781 | return NULL; |
782 | pn = fn->parent; \ | 782 | pn = fn->parent; |
783 | if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \ | 783 | if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) |
784 | fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \ | 784 | fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); |
785 | else \ | 785 | else |
786 | fn = pn; \ | 786 | fn = pn; |
787 | if (fn->fn_flags & RTN_RTINFO) \ | 787 | if (fn->fn_flags & RTN_RTINFO) |
788 | goto restart; \ | 788 | return fn; |
789 | } \ | 789 | } |
790 | } \ | 790 | } |
791 | } while (0) | ||
792 | 791 | ||
793 | static struct rt6_info *ip6_pol_route_lookup(struct net *net, | 792 | static struct rt6_info *ip6_pol_route_lookup(struct net *net, |
794 | struct fib6_table *table, | 793 | struct fib6_table *table, |
@@ -804,8 +803,11 @@ restart: | |||
804 | rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); | 803 | rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); |
805 | if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) | 804 | if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) |
806 | rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); | 805 | rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); |
807 | BACKTRACK(net, &fl6->saddr); | 806 | if (rt == net->ipv6.ip6_null_entry) { |
808 | out: | 807 | fn = fib6_backtrack(fn, &fl6->saddr); |
808 | if (fn) | ||
809 | goto restart; | ||
810 | } | ||
809 | dst_use(&rt->dst, jiffies); | 811 | dst_use(&rt->dst, jiffies); |
810 | read_unlock_bh(&table->tb6_lock); | 812 | read_unlock_bh(&table->tb6_lock); |
811 | return rt; | 813 | return rt; |
@@ -915,33 +917,48 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, | |||
915 | static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, | 917 | static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, |
916 | struct flowi6 *fl6, int flags) | 918 | struct flowi6 *fl6, int flags) |
917 | { | 919 | { |
918 | struct fib6_node *fn; | 920 | struct fib6_node *fn, *saved_fn; |
919 | struct rt6_info *rt, *nrt; | 921 | struct rt6_info *rt, *nrt; |
920 | int strict = 0; | 922 | int strict = 0; |
921 | int attempts = 3; | 923 | int attempts = 3; |
922 | int err; | 924 | int err; |
923 | int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE; | ||
924 | 925 | ||
925 | strict |= flags & RT6_LOOKUP_F_IFACE; | 926 | strict |= flags & RT6_LOOKUP_F_IFACE; |
927 | if (net->ipv6.devconf_all->forwarding == 0) | ||
928 | strict |= RT6_LOOKUP_F_REACHABLE; | ||
926 | 929 | ||
927 | relookup: | 930 | redo_fib6_lookup_lock: |
928 | read_lock_bh(&table->tb6_lock); | 931 | read_lock_bh(&table->tb6_lock); |
929 | 932 | ||
930 | restart_2: | ||
931 | fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); | 933 | fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); |
934 | saved_fn = fn; | ||
932 | 935 | ||
933 | restart: | 936 | redo_rt6_select: |
934 | rt = rt6_select(fn, oif, strict | reachable); | 937 | rt = rt6_select(fn, oif, strict); |
935 | if (rt->rt6i_nsiblings) | 938 | if (rt->rt6i_nsiblings) |
936 | rt = rt6_multipath_select(rt, fl6, oif, strict | reachable); | 939 | rt = rt6_multipath_select(rt, fl6, oif, strict); |
937 | BACKTRACK(net, &fl6->saddr); | 940 | if (rt == net->ipv6.ip6_null_entry) { |
938 | if (rt == net->ipv6.ip6_null_entry || | 941 | fn = fib6_backtrack(fn, &fl6->saddr); |
939 | rt->rt6i_flags & RTF_CACHE) | 942 | if (fn) |
940 | goto out; | 943 | goto redo_rt6_select; |
944 | else if (strict & RT6_LOOKUP_F_REACHABLE) { | ||
945 | /* also consider unreachable route */ | ||
946 | strict &= ~RT6_LOOKUP_F_REACHABLE; | ||
947 | fn = saved_fn; | ||
948 | goto redo_rt6_select; | ||
949 | } else { | ||
950 | dst_hold(&rt->dst); | ||
951 | read_unlock_bh(&table->tb6_lock); | ||
952 | goto out2; | ||
953 | } | ||
954 | } | ||
941 | 955 | ||
942 | dst_hold(&rt->dst); | 956 | dst_hold(&rt->dst); |
943 | read_unlock_bh(&table->tb6_lock); | 957 | read_unlock_bh(&table->tb6_lock); |
944 | 958 | ||
959 | if (rt->rt6i_flags & RTF_CACHE) | ||
960 | goto out2; | ||
961 | |||
945 | if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY))) | 962 | if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY))) |
946 | nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); | 963 | nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); |
947 | else if (!(rt->dst.flags & DST_HOST)) | 964 | else if (!(rt->dst.flags & DST_HOST)) |
@@ -967,15 +984,8 @@ restart: | |||
967 | * released someone could insert this route. Relookup. | 984 | * released someone could insert this route. Relookup. |
968 | */ | 985 | */ |
969 | ip6_rt_put(rt); | 986 | ip6_rt_put(rt); |
970 | goto relookup; | 987 | goto redo_fib6_lookup_lock; |
971 | 988 | ||
972 | out: | ||
973 | if (reachable) { | ||
974 | reachable = 0; | ||
975 | goto restart_2; | ||
976 | } | ||
977 | dst_hold(&rt->dst); | ||
978 | read_unlock_bh(&table->tb6_lock); | ||
979 | out2: | 989 | out2: |
980 | rt->dst.lastuse = jiffies; | 990 | rt->dst.lastuse = jiffies; |
981 | rt->dst.__use++; | 991 | rt->dst.__use++; |
@@ -1235,10 +1245,12 @@ restart: | |||
1235 | rt = net->ipv6.ip6_null_entry; | 1245 | rt = net->ipv6.ip6_null_entry; |
1236 | else if (rt->dst.error) { | 1246 | else if (rt->dst.error) { |
1237 | rt = net->ipv6.ip6_null_entry; | 1247 | rt = net->ipv6.ip6_null_entry; |
1238 | goto out; | 1248 | } else if (rt == net->ipv6.ip6_null_entry) { |
1249 | fn = fib6_backtrack(fn, &fl6->saddr); | ||
1250 | if (fn) | ||
1251 | goto restart; | ||
1239 | } | 1252 | } |
1240 | BACKTRACK(net, &fl6->saddr); | 1253 | |
1241 | out: | ||
1242 | dst_hold(&rt->dst); | 1254 | dst_hold(&rt->dst); |
1243 | 1255 | ||
1244 | read_unlock_bh(&table->tb6_lock); | 1256 | read_unlock_bh(&table->tb6_lock); |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 2f25cb6347ca..be291baa2ec2 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -171,8 +171,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
171 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) | 171 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) |
172 | goto out; | 172 | goto out; |
173 | 173 | ||
174 | if (tcp_synq_no_recent_overflow(sk) || | 174 | if (tcp_synq_no_recent_overflow(sk)) |
175 | (mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie)) == 0) { | 175 | goto out; |
176 | |||
177 | mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); | ||
178 | if (mss == 0) { | ||
176 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); | 179 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); |
177 | goto out; | 180 | goto out; |
178 | } | 181 | } |
@@ -269,4 +272,3 @@ out_free: | |||
269 | reqsk_free(req); | 272 | reqsk_free(req); |
270 | return NULL; | 273 | return NULL; |
271 | } | 274 | } |
272 | |||
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 91729b807c7d..313ef4644069 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -306,7 +306,7 @@ void ipxitf_down(struct ipx_interface *intrfc) | |||
306 | spin_unlock_bh(&ipx_interfaces_lock); | 306 | spin_unlock_bh(&ipx_interfaces_lock); |
307 | } | 307 | } |
308 | 308 | ||
309 | static __inline__ void __ipxitf_put(struct ipx_interface *intrfc) | 309 | static void __ipxitf_put(struct ipx_interface *intrfc) |
310 | { | 310 | { |
311 | if (atomic_dec_and_test(&intrfc->refcnt)) | 311 | if (atomic_dec_and_test(&intrfc->refcnt)) |
312 | __ipxitf_down(intrfc); | 312 | __ipxitf_down(intrfc); |
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index e15c16a517e7..c1d247ebe916 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c | |||
@@ -45,7 +45,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | i = list_entry(v, struct ipx_interface, node); | 47 | i = list_entry(v, struct ipx_interface, node); |
48 | seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum)); | 48 | seq_printf(seq, "%08X ", ntohl(i->if_netnum)); |
49 | seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", | 49 | seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", |
50 | i->if_node[0], i->if_node[1], i->if_node[2], | 50 | i->if_node[0], i->if_node[1], i->if_node[2], |
51 | i->if_node[3], i->if_node[4], i->if_node[5]); | 51 | i->if_node[3], i->if_node[4], i->if_node[5]); |
@@ -87,10 +87,10 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v) | |||
87 | 87 | ||
88 | rt = list_entry(v, struct ipx_route, node); | 88 | rt = list_entry(v, struct ipx_route, node); |
89 | 89 | ||
90 | seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net)); | 90 | seq_printf(seq, "%08X ", ntohl(rt->ir_net)); |
91 | if (rt->ir_routed) | 91 | if (rt->ir_routed) |
92 | seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n", | 92 | seq_printf(seq, "%08X %02X%02X%02X%02X%02X%02X\n", |
93 | (long unsigned int)ntohl(rt->ir_intrfc->if_netnum), | 93 | ntohl(rt->ir_intrfc->if_netnum), |
94 | rt->ir_router_node[0], rt->ir_router_node[1], | 94 | rt->ir_router_node[0], rt->ir_router_node[1], |
95 | rt->ir_router_node[2], rt->ir_router_node[3], | 95 | rt->ir_router_node[2], rt->ir_router_node[3], |
96 | rt->ir_router_node[4], rt->ir_router_node[5]); | 96 | rt->ir_router_node[4], rt->ir_router_node[5]); |
@@ -194,19 +194,19 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v) | |||
194 | s = v; | 194 | s = v; |
195 | ipxs = ipx_sk(s); | 195 | ipxs = ipx_sk(s); |
196 | #ifdef CONFIG_IPX_INTERN | 196 | #ifdef CONFIG_IPX_INTERN |
197 | seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", | 197 | seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ", |
198 | (unsigned long)ntohl(ipxs->intrfc->if_netnum), | 198 | ntohl(ipxs->intrfc->if_netnum), |
199 | ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3], | 199 | ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3], |
200 | ipxs->node[4], ipxs->node[5], ntohs(ipxs->port)); | 200 | ipxs->node[4], ipxs->node[5], ntohs(ipxs->port)); |
201 | #else | 201 | #else |
202 | seq_printf(seq, "%08lX:%04X ", (unsigned long) ntohl(ipxs->intrfc->if_netnum), | 202 | seq_printf(seq, "%08X:%04X ", ntohl(ipxs->intrfc->if_netnum), |
203 | ntohs(ipxs->port)); | 203 | ntohs(ipxs->port)); |
204 | #endif /* CONFIG_IPX_INTERN */ | 204 | #endif /* CONFIG_IPX_INTERN */ |
205 | if (s->sk_state != TCP_ESTABLISHED) | 205 | if (s->sk_state != TCP_ESTABLISHED) |
206 | seq_printf(seq, "%-28s", "Not_Connected"); | 206 | seq_printf(seq, "%-28s", "Not_Connected"); |
207 | else { | 207 | else { |
208 | seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", | 208 | seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ", |
209 | (unsigned long)ntohl(ipxs->dest_addr.net), | 209 | ntohl(ipxs->dest_addr.net), |
210 | ipxs->dest_addr.node[0], ipxs->dest_addr.node[1], | 210 | ipxs->dest_addr.node[0], ipxs->dest_addr.node[1], |
211 | ipxs->dest_addr.node[2], ipxs->dest_addr.node[3], | 211 | ipxs->dest_addr.node[2], ipxs->dest_addr.node[3], |
212 | ipxs->dest_addr.node[4], ipxs->dest_addr.node[5], | 212 | ipxs->dest_addr.node[4], ipxs->dest_addr.node[5], |
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c index ad7c03dedaab..0dafcc561ed6 100644 --- a/net/ipx/sysctl_net_ipx.c +++ b/net/ipx/sysctl_net_ipx.c | |||
@@ -9,14 +9,12 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/sysctl.h> | 10 | #include <linux/sysctl.h> |
11 | #include <net/net_namespace.h> | 11 | #include <net/net_namespace.h> |
12 | #include <net/ipx.h> | ||
12 | 13 | ||
13 | #ifndef CONFIG_SYSCTL | 14 | #ifndef CONFIG_SYSCTL |
14 | #error This file should not be compiled without CONFIG_SYSCTL defined | 15 | #error This file should not be compiled without CONFIG_SYSCTL defined |
15 | #endif | 16 | #endif |
16 | 17 | ||
17 | /* From af_ipx.c */ | ||
18 | extern int sysctl_ipx_pprop_broadcasting; | ||
19 | |||
20 | static struct ctl_table ipx_table[] = { | 18 | static struct ctl_table ipx_table[] = { |
21 | { | 19 | { |
22 | .procname = "ipx_pprop_broadcasting", | 20 | .procname = "ipx_pprop_broadcasting", |
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index 3cdaa046c1bc..fc60d9d738b5 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c | |||
@@ -73,6 +73,7 @@ static void __lapb_remove_cb(struct lapb_cb *lapb) | |||
73 | lapb_put(lapb); | 73 | lapb_put(lapb); |
74 | } | 74 | } |
75 | } | 75 | } |
76 | EXPORT_SYMBOL(lapb_register); | ||
76 | 77 | ||
77 | /* | 78 | /* |
78 | * Add a socket to the bound sockets list. | 79 | * Add a socket to the bound sockets list. |
@@ -195,6 +196,7 @@ out: | |||
195 | write_unlock_bh(&lapb_list_lock); | 196 | write_unlock_bh(&lapb_list_lock); |
196 | return rc; | 197 | return rc; |
197 | } | 198 | } |
199 | EXPORT_SYMBOL(lapb_unregister); | ||
198 | 200 | ||
199 | int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) | 201 | int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) |
200 | { | 202 | { |
@@ -227,6 +229,7 @@ int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) | |||
227 | out: | 229 | out: |
228 | return rc; | 230 | return rc; |
229 | } | 231 | } |
232 | EXPORT_SYMBOL(lapb_getparms); | ||
230 | 233 | ||
231 | int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) | 234 | int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) |
232 | { | 235 | { |
@@ -262,6 +265,7 @@ out_put: | |||
262 | out: | 265 | out: |
263 | return rc; | 266 | return rc; |
264 | } | 267 | } |
268 | EXPORT_SYMBOL(lapb_setparms); | ||
265 | 269 | ||
266 | int lapb_connect_request(struct net_device *dev) | 270 | int lapb_connect_request(struct net_device *dev) |
267 | { | 271 | { |
@@ -290,6 +294,7 @@ out_put: | |||
290 | out: | 294 | out: |
291 | return rc; | 295 | return rc; |
292 | } | 296 | } |
297 | EXPORT_SYMBOL(lapb_connect_request); | ||
293 | 298 | ||
294 | int lapb_disconnect_request(struct net_device *dev) | 299 | int lapb_disconnect_request(struct net_device *dev) |
295 | { | 300 | { |
@@ -334,6 +339,7 @@ out_put: | |||
334 | out: | 339 | out: |
335 | return rc; | 340 | return rc; |
336 | } | 341 | } |
342 | EXPORT_SYMBOL(lapb_disconnect_request); | ||
337 | 343 | ||
338 | int lapb_data_request(struct net_device *dev, struct sk_buff *skb) | 344 | int lapb_data_request(struct net_device *dev, struct sk_buff *skb) |
339 | { | 345 | { |
@@ -355,6 +361,7 @@ out_put: | |||
355 | out: | 361 | out: |
356 | return rc; | 362 | return rc; |
357 | } | 363 | } |
364 | EXPORT_SYMBOL(lapb_data_request); | ||
358 | 365 | ||
359 | int lapb_data_received(struct net_device *dev, struct sk_buff *skb) | 366 | int lapb_data_received(struct net_device *dev, struct sk_buff *skb) |
360 | { | 367 | { |
@@ -369,6 +376,7 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb) | |||
369 | 376 | ||
370 | return rc; | 377 | return rc; |
371 | } | 378 | } |
379 | EXPORT_SYMBOL(lapb_data_received); | ||
372 | 380 | ||
373 | void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) | 381 | void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) |
374 | { | 382 | { |
@@ -415,15 +423,6 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) | |||
415 | return used; | 423 | return used; |
416 | } | 424 | } |
417 | 425 | ||
418 | EXPORT_SYMBOL(lapb_register); | ||
419 | EXPORT_SYMBOL(lapb_unregister); | ||
420 | EXPORT_SYMBOL(lapb_getparms); | ||
421 | EXPORT_SYMBOL(lapb_setparms); | ||
422 | EXPORT_SYMBOL(lapb_connect_request); | ||
423 | EXPORT_SYMBOL(lapb_disconnect_request); | ||
424 | EXPORT_SYMBOL(lapb_data_request); | ||
425 | EXPORT_SYMBOL(lapb_data_received); | ||
426 | |||
427 | static int __init lapb_init(void) | 426 | static int __init lapb_init(void) |
428 | { | 427 | { |
429 | return 0; | 428 | return 0; |
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c index 25c31c0a3fdb..6daf391b3e84 100644 --- a/net/llc/llc_if.c +++ b/net/llc/llc_if.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <asm/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <net/llc_if.h> | 19 | #include <net/llc_if.h> |
20 | #include <net/llc_sap.h> | 20 | #include <net/llc_sap.h> |
21 | #include <net/llc_s_ev.h> | 21 | #include <net/llc_s_ev.h> |
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index ba3bb8203b99..2a9673e39ca1 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig | |||
@@ -29,11 +29,11 @@ config OPENVSWITCH | |||
29 | If unsure, say N. | 29 | If unsure, say N. |
30 | 30 | ||
31 | config OPENVSWITCH_GRE | 31 | config OPENVSWITCH_GRE |
32 | bool "Open vSwitch GRE tunneling support" | 32 | tristate "Open vSwitch GRE tunneling support" |
33 | depends on INET | 33 | depends on INET |
34 | depends on OPENVSWITCH | 34 | depends on OPENVSWITCH |
35 | depends on NET_IPGRE_DEMUX && !(OPENVSWITCH=y && NET_IPGRE_DEMUX=m) | 35 | depends on NET_IPGRE_DEMUX |
36 | default y | 36 | default OPENVSWITCH |
37 | ---help--- | 37 | ---help--- |
38 | If you say Y here, then the Open vSwitch will be able create GRE | 38 | If you say Y here, then the Open vSwitch will be able create GRE |
39 | vport. | 39 | vport. |
@@ -43,11 +43,11 @@ config OPENVSWITCH_GRE | |||
43 | If unsure, say Y. | 43 | If unsure, say Y. |
44 | 44 | ||
45 | config OPENVSWITCH_VXLAN | 45 | config OPENVSWITCH_VXLAN |
46 | bool "Open vSwitch VXLAN tunneling support" | 46 | tristate "Open vSwitch VXLAN tunneling support" |
47 | depends on INET | 47 | depends on INET |
48 | depends on OPENVSWITCH | 48 | depends on OPENVSWITCH |
49 | depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m) | 49 | depends on VXLAN |
50 | default y | 50 | default OPENVSWITCH |
51 | ---help--- | 51 | ---help--- |
52 | If you say Y here, then the Open vSwitch will be able create vxlan vport. | 52 | If you say Y here, then the Open vSwitch will be able create vxlan vport. |
53 | 53 | ||
@@ -56,11 +56,11 @@ config OPENVSWITCH_VXLAN | |||
56 | If unsure, say Y. | 56 | If unsure, say Y. |
57 | 57 | ||
58 | config OPENVSWITCH_GENEVE | 58 | config OPENVSWITCH_GENEVE |
59 | bool "Open vSwitch Geneve tunneling support" | 59 | tristate "Open vSwitch Geneve tunneling support" |
60 | depends on INET | 60 | depends on INET |
61 | depends on OPENVSWITCH | 61 | depends on OPENVSWITCH |
62 | depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m) | 62 | depends on GENEVE |
63 | default y | 63 | default OPENVSWITCH |
64 | ---help--- | 64 | ---help--- |
65 | If you say Y here, then the Open vSwitch will be able create geneve vport. | 65 | If you say Y here, then the Open vSwitch will be able create geneve vport. |
66 | 66 | ||
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index 9a33a273c375..91b9478413ef 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile | |||
@@ -15,14 +15,6 @@ openvswitch-y := \ | |||
15 | vport-internal_dev.o \ | 15 | vport-internal_dev.o \ |
16 | vport-netdev.o | 16 | vport-netdev.o |
17 | 17 | ||
18 | ifneq ($(CONFIG_OPENVSWITCH_GENEVE),) | 18 | obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o |
19 | openvswitch-y += vport-geneve.o | 19 | obj-$(CONFIG_OPENVSWITCH_VXLAN) += vport-vxlan.o |
20 | endif | 20 | obj-$(CONFIG_OPENVSWITCH_GRE) += vport-gre.o |
21 | |||
22 | ifneq ($(CONFIG_OPENVSWITCH_VXLAN),) | ||
23 | openvswitch-y += vport-vxlan.o | ||
24 | endif | ||
25 | |||
26 | ifneq ($(CONFIG_OPENVSWITCH_GRE),) | ||
27 | openvswitch-y += vport-gre.o | ||
28 | endif | ||
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 006886dbee36..922c133b1933 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -504,11 +504,6 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, | |||
504 | return ovs_dp_upcall(dp, skb, &upcall); | 504 | return ovs_dp_upcall(dp, skb, &upcall); |
505 | } | 505 | } |
506 | 506 | ||
507 | static bool last_action(const struct nlattr *a, int rem) | ||
508 | { | ||
509 | return a->nla_len == rem; | ||
510 | } | ||
511 | |||
512 | static int sample(struct datapath *dp, struct sk_buff *skb, | 507 | static int sample(struct datapath *dp, struct sk_buff *skb, |
513 | struct sw_flow_key *key, const struct nlattr *attr) | 508 | struct sw_flow_key *key, const struct nlattr *attr) |
514 | { | 509 | { |
@@ -543,7 +538,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb, | |||
543 | * user space. This skb will be consumed by its caller. | 538 | * user space. This skb will be consumed by its caller. |
544 | */ | 539 | */ |
545 | if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && | 540 | if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && |
546 | last_action(a, rem))) | 541 | nla_is_last(a, rem))) |
547 | return output_userspace(dp, skb, key, a); | 542 | return output_userspace(dp, skb, key, a); |
548 | 543 | ||
549 | skb = skb_clone(skb, GFP_ATOMIC); | 544 | skb = skb_clone(skb, GFP_ATOMIC); |
@@ -633,7 +628,7 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb, | |||
633 | if (err) | 628 | if (err) |
634 | return err; | 629 | return err; |
635 | 630 | ||
636 | if (!last_action(a, rem)) { | 631 | if (!nla_is_last(a, rem)) { |
637 | /* Recirc action is the not the last action | 632 | /* Recirc action is the not the last action |
638 | * of the action list, need to clone the skb. | 633 | * of the action list, need to clone the skb. |
639 | */ | 634 | */ |
@@ -707,7 +702,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
707 | 702 | ||
708 | case OVS_ACTION_ATTR_RECIRC: | 703 | case OVS_ACTION_ATTR_RECIRC: |
709 | err = execute_recirc(dp, skb, key, a, rem); | 704 | err = execute_recirc(dp, skb, key, a, rem); |
710 | if (last_action(a, rem)) { | 705 | if (nla_is_last(a, rem)) { |
711 | /* If this is the last action, the skb has | 706 | /* If this is the last action, the skb has |
712 | * been consumed or freed. | 707 | * been consumed or freed. |
713 | * Return immediately. | 708 | * Return immediately. |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e6d7255183eb..f18302f32049 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #include "vport-netdev.h" | 59 | #include "vport-netdev.h" |
60 | 60 | ||
61 | int ovs_net_id __read_mostly; | 61 | int ovs_net_id __read_mostly; |
62 | EXPORT_SYMBOL(ovs_net_id); | ||
62 | 63 | ||
63 | static struct genl_family dp_packet_genl_family; | 64 | static struct genl_family dp_packet_genl_family; |
64 | static struct genl_family dp_flow_genl_family; | 65 | static struct genl_family dp_flow_genl_family; |
@@ -130,6 +131,7 @@ int lockdep_ovsl_is_held(void) | |||
130 | else | 131 | else |
131 | return 1; | 132 | return 1; |
132 | } | 133 | } |
134 | EXPORT_SYMBOL(lockdep_ovsl_is_held); | ||
133 | #endif | 135 | #endif |
134 | 136 | ||
135 | static struct vport *new_vport(const struct vport_parms *); | 137 | static struct vport *new_vport(const struct vport_parms *); |
@@ -1764,6 +1766,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1764 | return -ENOMEM; | 1766 | return -ENOMEM; |
1765 | 1767 | ||
1766 | ovs_lock(); | 1768 | ovs_lock(); |
1769 | restart: | ||
1767 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1770 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1768 | err = -ENODEV; | 1771 | err = -ENODEV; |
1769 | if (!dp) | 1772 | if (!dp) |
@@ -1795,8 +1798,11 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1795 | 1798 | ||
1796 | vport = new_vport(&parms); | 1799 | vport = new_vport(&parms); |
1797 | err = PTR_ERR(vport); | 1800 | err = PTR_ERR(vport); |
1798 | if (IS_ERR(vport)) | 1801 | if (IS_ERR(vport)) { |
1802 | if (err == -EAGAIN) | ||
1803 | goto restart; | ||
1799 | goto exit_unlock_free; | 1804 | goto exit_unlock_free; |
1805 | } | ||
1800 | 1806 | ||
1801 | err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, | 1807 | err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, |
1802 | info->snd_seq, 0, OVS_VPORT_CMD_NEW); | 1808 | info->snd_seq, 0, OVS_VPORT_CMD_NEW); |
@@ -2112,12 +2118,18 @@ static int __init dp_init(void) | |||
2112 | if (err) | 2118 | if (err) |
2113 | goto error_netns_exit; | 2119 | goto error_netns_exit; |
2114 | 2120 | ||
2121 | err = ovs_netdev_init(); | ||
2122 | if (err) | ||
2123 | goto error_unreg_notifier; | ||
2124 | |||
2115 | err = dp_register_genl(); | 2125 | err = dp_register_genl(); |
2116 | if (err < 0) | 2126 | if (err < 0) |
2117 | goto error_unreg_notifier; | 2127 | goto error_unreg_netdev; |
2118 | 2128 | ||
2119 | return 0; | 2129 | return 0; |
2120 | 2130 | ||
2131 | error_unreg_netdev: | ||
2132 | ovs_netdev_exit(); | ||
2121 | error_unreg_notifier: | 2133 | error_unreg_notifier: |
2122 | unregister_netdevice_notifier(&ovs_dp_device_notifier); | 2134 | unregister_netdevice_notifier(&ovs_dp_device_notifier); |
2123 | error_netns_exit: | 2135 | error_netns_exit: |
@@ -2137,6 +2149,7 @@ error: | |||
2137 | static void dp_cleanup(void) | 2149 | static void dp_cleanup(void) |
2138 | { | 2150 | { |
2139 | dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); | 2151 | dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); |
2152 | ovs_netdev_exit(); | ||
2140 | unregister_netdevice_notifier(&ovs_dp_device_notifier); | 2153 | unregister_netdevice_notifier(&ovs_dp_device_notifier); |
2141 | unregister_pernet_device(&ovs_net_ops); | 2154 | unregister_pernet_device(&ovs_net_ops); |
2142 | rcu_barrier(); | 2155 | rcu_barrier(); |
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index 106a9d80b663..70c9765011f4 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/rculist.h> | 17 | #include <linux/rculist.h> |
18 | #include <linux/udp.h> | 18 | #include <linux/udp.h> |
19 | #include <linux/if_vlan.h> | 19 | #include <linux/if_vlan.h> |
20 | #include <linux/module.h> | ||
20 | 21 | ||
21 | #include <net/geneve.h> | 22 | #include <net/geneve.h> |
22 | #include <net/icmp.h> | 23 | #include <net/icmp.h> |
@@ -28,6 +29,8 @@ | |||
28 | #include "datapath.h" | 29 | #include "datapath.h" |
29 | #include "vport.h" | 30 | #include "vport.h" |
30 | 31 | ||
32 | static struct vport_ops ovs_geneve_vport_ops; | ||
33 | |||
31 | /** | 34 | /** |
32 | * struct geneve_port - Keeps track of open UDP ports | 35 | * struct geneve_port - Keeps track of open UDP ports |
33 | * @gs: The socket created for this port number. | 36 | * @gs: The socket created for this port number. |
@@ -225,11 +228,29 @@ static const char *geneve_get_name(const struct vport *vport) | |||
225 | return geneve_port->name; | 228 | return geneve_port->name; |
226 | } | 229 | } |
227 | 230 | ||
228 | const struct vport_ops ovs_geneve_vport_ops = { | 231 | static struct vport_ops ovs_geneve_vport_ops = { |
229 | .type = OVS_VPORT_TYPE_GENEVE, | 232 | .type = OVS_VPORT_TYPE_GENEVE, |
230 | .create = geneve_tnl_create, | 233 | .create = geneve_tnl_create, |
231 | .destroy = geneve_tnl_destroy, | 234 | .destroy = geneve_tnl_destroy, |
232 | .get_name = geneve_get_name, | 235 | .get_name = geneve_get_name, |
233 | .get_options = geneve_get_options, | 236 | .get_options = geneve_get_options, |
234 | .send = geneve_tnl_send, | 237 | .send = geneve_tnl_send, |
238 | .owner = THIS_MODULE, | ||
235 | }; | 239 | }; |
240 | |||
241 | static int __init ovs_geneve_tnl_init(void) | ||
242 | { | ||
243 | return ovs_vport_ops_register(&ovs_geneve_vport_ops); | ||
244 | } | ||
245 | |||
246 | static void __exit ovs_geneve_tnl_exit(void) | ||
247 | { | ||
248 | ovs_vport_ops_unregister(&ovs_geneve_vport_ops); | ||
249 | } | ||
250 | |||
251 | module_init(ovs_geneve_tnl_init); | ||
252 | module_exit(ovs_geneve_tnl_exit); | ||
253 | |||
254 | MODULE_DESCRIPTION("OVS: Geneve swiching port"); | ||
255 | MODULE_LICENSE("GPL"); | ||
256 | MODULE_ALIAS("vport-type-5"); | ||
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 108b82da2fd9..00270b608844 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/jhash.h> | 29 | #include <linux/jhash.h> |
30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/module.h> | ||
32 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
33 | #include <linux/rculist.h> | 34 | #include <linux/rculist.h> |
34 | #include <net/route.h> | 35 | #include <net/route.h> |
@@ -45,6 +46,8 @@ | |||
45 | #include "datapath.h" | 46 | #include "datapath.h" |
46 | #include "vport.h" | 47 | #include "vport.h" |
47 | 48 | ||
49 | static struct vport_ops ovs_gre_vport_ops; | ||
50 | |||
48 | /* Returns the least-significant 32 bits of a __be64. */ | 51 | /* Returns the least-significant 32 bits of a __be64. */ |
49 | static __be32 be64_get_low32(__be64 x) | 52 | static __be32 be64_get_low32(__be64 x) |
50 | { | 53 | { |
@@ -281,10 +284,28 @@ static void gre_tnl_destroy(struct vport *vport) | |||
281 | gre_exit(); | 284 | gre_exit(); |
282 | } | 285 | } |
283 | 286 | ||
284 | const struct vport_ops ovs_gre_vport_ops = { | 287 | static struct vport_ops ovs_gre_vport_ops = { |
285 | .type = OVS_VPORT_TYPE_GRE, | 288 | .type = OVS_VPORT_TYPE_GRE, |
286 | .create = gre_create, | 289 | .create = gre_create, |
287 | .destroy = gre_tnl_destroy, | 290 | .destroy = gre_tnl_destroy, |
288 | .get_name = gre_get_name, | 291 | .get_name = gre_get_name, |
289 | .send = gre_tnl_send, | 292 | .send = gre_tnl_send, |
293 | .owner = THIS_MODULE, | ||
290 | }; | 294 | }; |
295 | |||
296 | static int __init ovs_gre_tnl_init(void) | ||
297 | { | ||
298 | return ovs_vport_ops_register(&ovs_gre_vport_ops); | ||
299 | } | ||
300 | |||
301 | static void __exit ovs_gre_tnl_exit(void) | ||
302 | { | ||
303 | ovs_vport_ops_unregister(&ovs_gre_vport_ops); | ||
304 | } | ||
305 | |||
306 | module_init(ovs_gre_tnl_init); | ||
307 | module_exit(ovs_gre_tnl_exit); | ||
308 | |||
309 | MODULE_DESCRIPTION("OVS: GRE switching port"); | ||
310 | MODULE_LICENSE("GPL"); | ||
311 | MODULE_ALIAS("vport-type-3"); | ||
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 84516126e5f3..10dc07e1678b 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c | |||
@@ -36,6 +36,8 @@ struct internal_dev { | |||
36 | struct vport *vport; | 36 | struct vport *vport; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static struct vport_ops ovs_internal_vport_ops; | ||
40 | |||
39 | static struct internal_dev *internal_dev_priv(struct net_device *netdev) | 41 | static struct internal_dev *internal_dev_priv(struct net_device *netdev) |
40 | { | 42 | { |
41 | return netdev_priv(netdev); | 43 | return netdev_priv(netdev); |
@@ -238,7 +240,7 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) | |||
238 | return len; | 240 | return len; |
239 | } | 241 | } |
240 | 242 | ||
241 | const struct vport_ops ovs_internal_vport_ops = { | 243 | static struct vport_ops ovs_internal_vport_ops = { |
242 | .type = OVS_VPORT_TYPE_INTERNAL, | 244 | .type = OVS_VPORT_TYPE_INTERNAL, |
243 | .create = internal_dev_create, | 245 | .create = internal_dev_create, |
244 | .destroy = internal_dev_destroy, | 246 | .destroy = internal_dev_destroy, |
@@ -261,10 +263,21 @@ struct vport *ovs_internal_dev_get_vport(struct net_device *netdev) | |||
261 | 263 | ||
262 | int ovs_internal_dev_rtnl_link_register(void) | 264 | int ovs_internal_dev_rtnl_link_register(void) |
263 | { | 265 | { |
264 | return rtnl_link_register(&internal_dev_link_ops); | 266 | int err; |
267 | |||
268 | err = rtnl_link_register(&internal_dev_link_ops); | ||
269 | if (err < 0) | ||
270 | return err; | ||
271 | |||
272 | err = ovs_vport_ops_register(&ovs_internal_vport_ops); | ||
273 | if (err < 0) | ||
274 | rtnl_link_unregister(&internal_dev_link_ops); | ||
275 | |||
276 | return err; | ||
265 | } | 277 | } |
266 | 278 | ||
267 | void ovs_internal_dev_rtnl_link_unregister(void) | 279 | void ovs_internal_dev_rtnl_link_unregister(void) |
268 | { | 280 | { |
281 | ovs_vport_ops_unregister(&ovs_internal_vport_ops); | ||
269 | rtnl_link_unregister(&internal_dev_link_ops); | 282 | rtnl_link_unregister(&internal_dev_link_ops); |
270 | } | 283 | } |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index d21f77d875ba..877ee74b4f08 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include "vport-internal_dev.h" | 33 | #include "vport-internal_dev.h" |
34 | #include "vport-netdev.h" | 34 | #include "vport-netdev.h" |
35 | 35 | ||
36 | static struct vport_ops ovs_netdev_vport_ops; | ||
37 | |||
36 | /* Must be called with rcu_read_lock. */ | 38 | /* Must be called with rcu_read_lock. */ |
37 | static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) | 39 | static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) |
38 | { | 40 | { |
@@ -224,10 +226,20 @@ struct vport *ovs_netdev_get_vport(struct net_device *dev) | |||
224 | return NULL; | 226 | return NULL; |
225 | } | 227 | } |
226 | 228 | ||
227 | const struct vport_ops ovs_netdev_vport_ops = { | 229 | static struct vport_ops ovs_netdev_vport_ops = { |
228 | .type = OVS_VPORT_TYPE_NETDEV, | 230 | .type = OVS_VPORT_TYPE_NETDEV, |
229 | .create = netdev_create, | 231 | .create = netdev_create, |
230 | .destroy = netdev_destroy, | 232 | .destroy = netdev_destroy, |
231 | .get_name = ovs_netdev_get_name, | 233 | .get_name = ovs_netdev_get_name, |
232 | .send = netdev_send, | 234 | .send = netdev_send, |
233 | }; | 235 | }; |
236 | |||
237 | int __init ovs_netdev_init(void) | ||
238 | { | ||
239 | return ovs_vport_ops_register(&ovs_netdev_vport_ops); | ||
240 | } | ||
241 | |||
242 | void ovs_netdev_exit(void) | ||
243 | { | ||
244 | ovs_vport_ops_unregister(&ovs_netdev_vport_ops); | ||
245 | } | ||
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h index 8df01c1127e5..6f7038e79c52 100644 --- a/net/openvswitch/vport-netdev.h +++ b/net/openvswitch/vport-netdev.h | |||
@@ -41,4 +41,7 @@ netdev_vport_priv(const struct vport *vport) | |||
41 | const char *ovs_netdev_get_name(const struct vport *); | 41 | const char *ovs_netdev_get_name(const struct vport *); |
42 | void ovs_netdev_detach_dev(struct vport *); | 42 | void ovs_netdev_detach_dev(struct vport *); |
43 | 43 | ||
44 | int __init ovs_netdev_init(void); | ||
45 | void ovs_netdev_exit(void); | ||
46 | |||
44 | #endif /* vport_netdev.h */ | 47 | #endif /* vport_netdev.h */ |
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 2735e01dca73..965e7500c5a6 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/net.h> | 24 | #include <linux/net.h> |
25 | #include <linux/rculist.h> | 25 | #include <linux/rculist.h> |
26 | #include <linux/udp.h> | 26 | #include <linux/udp.h> |
27 | #include <linux/module.h> | ||
27 | 28 | ||
28 | #include <net/icmp.h> | 29 | #include <net/icmp.h> |
29 | #include <net/ip.h> | 30 | #include <net/ip.h> |
@@ -50,6 +51,8 @@ struct vxlan_port { | |||
50 | char name[IFNAMSIZ]; | 51 | char name[IFNAMSIZ]; |
51 | }; | 52 | }; |
52 | 53 | ||
54 | static struct vport_ops ovs_vxlan_vport_ops; | ||
55 | |||
53 | static inline struct vxlan_port *vxlan_vport(const struct vport *vport) | 56 | static inline struct vxlan_port *vxlan_vport(const struct vport *vport) |
54 | { | 57 | { |
55 | return vport_priv(vport); | 58 | return vport_priv(vport); |
@@ -192,11 +195,29 @@ static const char *vxlan_get_name(const struct vport *vport) | |||
192 | return vxlan_port->name; | 195 | return vxlan_port->name; |
193 | } | 196 | } |
194 | 197 | ||
195 | const struct vport_ops ovs_vxlan_vport_ops = { | 198 | static struct vport_ops ovs_vxlan_vport_ops = { |
196 | .type = OVS_VPORT_TYPE_VXLAN, | 199 | .type = OVS_VPORT_TYPE_VXLAN, |
197 | .create = vxlan_tnl_create, | 200 | .create = vxlan_tnl_create, |
198 | .destroy = vxlan_tnl_destroy, | 201 | .destroy = vxlan_tnl_destroy, |
199 | .get_name = vxlan_get_name, | 202 | .get_name = vxlan_get_name, |
200 | .get_options = vxlan_get_options, | 203 | .get_options = vxlan_get_options, |
201 | .send = vxlan_tnl_send, | 204 | .send = vxlan_tnl_send, |
205 | .owner = THIS_MODULE, | ||
202 | }; | 206 | }; |
207 | |||
208 | static int __init ovs_vxlan_tnl_init(void) | ||
209 | { | ||
210 | return ovs_vport_ops_register(&ovs_vxlan_vport_ops); | ||
211 | } | ||
212 | |||
213 | static void __exit ovs_vxlan_tnl_exit(void) | ||
214 | { | ||
215 | ovs_vport_ops_unregister(&ovs_vxlan_vport_ops); | ||
216 | } | ||
217 | |||
218 | module_init(ovs_vxlan_tnl_init); | ||
219 | module_exit(ovs_vxlan_tnl_exit); | ||
220 | |||
221 | MODULE_DESCRIPTION("OVS: VXLAN switching port"); | ||
222 | MODULE_LICENSE("GPL"); | ||
223 | MODULE_ALIAS("vport-type-4"); | ||
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 6015802ebe6f..8168ef021337 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/rtnetlink.h> | 28 | #include <linux/rtnetlink.h> |
29 | #include <linux/compat.h> | 29 | #include <linux/compat.h> |
30 | #include <net/net_namespace.h> | 30 | #include <net/net_namespace.h> |
31 | #include <linux/module.h> | ||
31 | 32 | ||
32 | #include "datapath.h" | 33 | #include "datapath.h" |
33 | #include "vport.h" | 34 | #include "vport.h" |
@@ -36,22 +37,7 @@ | |||
36 | static void ovs_vport_record_error(struct vport *, | 37 | static void ovs_vport_record_error(struct vport *, |
37 | enum vport_err_type err_type); | 38 | enum vport_err_type err_type); |
38 | 39 | ||
39 | /* List of statically compiled vport implementations. Don't forget to also | 40 | static LIST_HEAD(vport_ops_list); |
40 | * add yours to the list at the bottom of vport.h. */ | ||
41 | static const struct vport_ops *vport_ops_list[] = { | ||
42 | &ovs_netdev_vport_ops, | ||
43 | &ovs_internal_vport_ops, | ||
44 | |||
45 | #ifdef CONFIG_OPENVSWITCH_GRE | ||
46 | &ovs_gre_vport_ops, | ||
47 | #endif | ||
48 | #ifdef CONFIG_OPENVSWITCH_VXLAN | ||
49 | &ovs_vxlan_vport_ops, | ||
50 | #endif | ||
51 | #ifdef CONFIG_OPENVSWITCH_GENEVE | ||
52 | &ovs_geneve_vport_ops, | ||
53 | #endif | ||
54 | }; | ||
55 | 41 | ||
56 | /* Protected by RCU read lock for reading, ovs_mutex for writing. */ | 42 | /* Protected by RCU read lock for reading, ovs_mutex for writing. */ |
57 | static struct hlist_head *dev_table; | 43 | static struct hlist_head *dev_table; |
@@ -88,6 +74,32 @@ static struct hlist_head *hash_bucket(struct net *net, const char *name) | |||
88 | return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; | 74 | return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; |
89 | } | 75 | } |
90 | 76 | ||
77 | int ovs_vport_ops_register(struct vport_ops *ops) | ||
78 | { | ||
79 | int err = -EEXIST; | ||
80 | struct vport_ops *o; | ||
81 | |||
82 | ovs_lock(); | ||
83 | list_for_each_entry(o, &vport_ops_list, list) | ||
84 | if (ops->type == o->type) | ||
85 | goto errout; | ||
86 | |||
87 | list_add_tail(&ops->list, &vport_ops_list); | ||
88 | err = 0; | ||
89 | errout: | ||
90 | ovs_unlock(); | ||
91 | return err; | ||
92 | } | ||
93 | EXPORT_SYMBOL(ovs_vport_ops_register); | ||
94 | |||
95 | void ovs_vport_ops_unregister(struct vport_ops *ops) | ||
96 | { | ||
97 | ovs_lock(); | ||
98 | list_del(&ops->list); | ||
99 | ovs_unlock(); | ||
100 | } | ||
101 | EXPORT_SYMBOL(ovs_vport_ops_unregister); | ||
102 | |||
91 | /** | 103 | /** |
92 | * ovs_vport_locate - find a port that has already been created | 104 | * ovs_vport_locate - find a port that has already been created |
93 | * | 105 | * |
@@ -153,6 +165,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, | |||
153 | 165 | ||
154 | return vport; | 166 | return vport; |
155 | } | 167 | } |
168 | EXPORT_SYMBOL(ovs_vport_alloc); | ||
156 | 169 | ||
157 | /** | 170 | /** |
158 | * ovs_vport_free - uninitialize and free vport | 171 | * ovs_vport_free - uninitialize and free vport |
@@ -173,6 +186,18 @@ void ovs_vport_free(struct vport *vport) | |||
173 | free_percpu(vport->percpu_stats); | 186 | free_percpu(vport->percpu_stats); |
174 | kfree(vport); | 187 | kfree(vport); |
175 | } | 188 | } |
189 | EXPORT_SYMBOL(ovs_vport_free); | ||
190 | |||
191 | static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms) | ||
192 | { | ||
193 | struct vport_ops *ops; | ||
194 | |||
195 | list_for_each_entry(ops, &vport_ops_list, list) | ||
196 | if (ops->type == parms->type) | ||
197 | return ops; | ||
198 | |||
199 | return NULL; | ||
200 | } | ||
176 | 201 | ||
177 | /** | 202 | /** |
178 | * ovs_vport_add - add vport device (for kernel callers) | 203 | * ovs_vport_add - add vport device (for kernel callers) |
@@ -184,31 +209,40 @@ void ovs_vport_free(struct vport *vport) | |||
184 | */ | 209 | */ |
185 | struct vport *ovs_vport_add(const struct vport_parms *parms) | 210 | struct vport *ovs_vport_add(const struct vport_parms *parms) |
186 | { | 211 | { |
212 | struct vport_ops *ops; | ||
187 | struct vport *vport; | 213 | struct vport *vport; |
188 | int err = 0; | ||
189 | int i; | ||
190 | 214 | ||
191 | for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { | 215 | ops = ovs_vport_lookup(parms); |
192 | if (vport_ops_list[i]->type == parms->type) { | 216 | if (ops) { |
193 | struct hlist_head *bucket; | 217 | struct hlist_head *bucket; |
194 | 218 | ||
195 | vport = vport_ops_list[i]->create(parms); | 219 | if (!try_module_get(ops->owner)) |
196 | if (IS_ERR(vport)) { | 220 | return ERR_PTR(-EAFNOSUPPORT); |
197 | err = PTR_ERR(vport); | ||
198 | goto out; | ||
199 | } | ||
200 | 221 | ||
201 | bucket = hash_bucket(ovs_dp_get_net(vport->dp), | 222 | vport = ops->create(parms); |
202 | vport->ops->get_name(vport)); | 223 | if (IS_ERR(vport)) { |
203 | hlist_add_head_rcu(&vport->hash_node, bucket); | 224 | module_put(ops->owner); |
204 | return vport; | 225 | return vport; |
205 | } | 226 | } |
227 | |||
228 | bucket = hash_bucket(ovs_dp_get_net(vport->dp), | ||
229 | vport->ops->get_name(vport)); | ||
230 | hlist_add_head_rcu(&vport->hash_node, bucket); | ||
231 | return vport; | ||
206 | } | 232 | } |
207 | 233 | ||
208 | err = -EAFNOSUPPORT; | 234 | /* Unlock to attempt module load and return -EAGAIN if load |
235 | * was successful as we need to restart the port addition | ||
236 | * workflow. | ||
237 | */ | ||
238 | ovs_unlock(); | ||
239 | request_module("vport-type-%d", parms->type); | ||
240 | ovs_lock(); | ||
209 | 241 | ||
210 | out: | 242 | if (!ovs_vport_lookup(parms)) |
211 | return ERR_PTR(err); | 243 | return ERR_PTR(-EAFNOSUPPORT); |
244 | else | ||
245 | return ERR_PTR(-EAGAIN); | ||
212 | } | 246 | } |
213 | 247 | ||
214 | /** | 248 | /** |
@@ -242,6 +276,8 @@ void ovs_vport_del(struct vport *vport) | |||
242 | hlist_del_rcu(&vport->hash_node); | 276 | hlist_del_rcu(&vport->hash_node); |
243 | 277 | ||
244 | vport->ops->destroy(vport); | 278 | vport->ops->destroy(vport); |
279 | |||
280 | module_put(vport->ops->owner); | ||
245 | } | 281 | } |
246 | 282 | ||
247 | /** | 283 | /** |
@@ -457,6 +493,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, | |||
457 | } | 493 | } |
458 | ovs_dp_process_packet(skb, &key); | 494 | ovs_dp_process_packet(skb, &key); |
459 | } | 495 | } |
496 | EXPORT_SYMBOL(ovs_vport_receive); | ||
460 | 497 | ||
461 | /** | 498 | /** |
462 | * ovs_vport_send - send a packet on a device | 499 | * ovs_vport_send - send a packet on a device |
@@ -535,3 +572,4 @@ void ovs_vport_deferred_free(struct vport *vport) | |||
535 | 572 | ||
536 | call_rcu(&vport->rcu, free_vport_rcu); | 573 | call_rcu(&vport->rcu, free_vport_rcu); |
537 | } | 574 | } |
575 | EXPORT_SYMBOL(ovs_vport_deferred_free); | ||
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 8942125de3a6..e41c3facf799 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h | |||
@@ -161,6 +161,9 @@ struct vport_ops { | |||
161 | const char *(*get_name)(const struct vport *); | 161 | const char *(*get_name)(const struct vport *); |
162 | 162 | ||
163 | int (*send)(struct vport *, struct sk_buff *); | 163 | int (*send)(struct vport *, struct sk_buff *); |
164 | |||
165 | struct module *owner; | ||
166 | struct list_head list; | ||
164 | }; | 167 | }; |
165 | 168 | ||
166 | enum vport_err_type { | 169 | enum vport_err_type { |
@@ -209,14 +212,6 @@ static inline struct vport *vport_from_priv(void *priv) | |||
209 | void ovs_vport_receive(struct vport *, struct sk_buff *, | 212 | void ovs_vport_receive(struct vport *, struct sk_buff *, |
210 | struct ovs_tunnel_info *); | 213 | struct ovs_tunnel_info *); |
211 | 214 | ||
212 | /* List of statically compiled vport implementations. Don't forget to also | ||
213 | * add yours to the list at the top of vport.c. */ | ||
214 | extern const struct vport_ops ovs_netdev_vport_ops; | ||
215 | extern const struct vport_ops ovs_internal_vport_ops; | ||
216 | extern const struct vport_ops ovs_gre_vport_ops; | ||
217 | extern const struct vport_ops ovs_vxlan_vport_ops; | ||
218 | extern const struct vport_ops ovs_geneve_vport_ops; | ||
219 | |||
220 | static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, | 215 | static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, |
221 | const void *start, unsigned int len) | 216 | const void *start, unsigned int len) |
222 | { | 217 | { |
@@ -224,4 +219,7 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, | |||
224 | skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); | 219 | skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); |
225 | } | 220 | } |
226 | 221 | ||
222 | int ovs_vport_ops_register(struct vport_ops *ops); | ||
223 | void ovs_vport_ops_unregister(struct vport_ops *ops); | ||
224 | |||
227 | #endif /* vport.h */ | 225 | #endif /* vport.h */ |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 34229ee7f379..0697eda5aed8 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -417,7 +417,7 @@ static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) | |||
417 | 417 | ||
418 | if (*pos == 0) | 418 | if (*pos == 0) |
419 | seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " | 419 | seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " |
420 | "REM_ADDR_RTX START\n"); | 420 | "REM_ADDR_RTX START STATE\n"); |
421 | 421 | ||
422 | return (void *)pos; | 422 | return (void *)pos; |
423 | } | 423 | } |
@@ -490,14 +490,20 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
490 | * Note: We don't have a way to tally this at the moment | 490 | * Note: We don't have a way to tally this at the moment |
491 | * so lets just leave it as zero for the moment | 491 | * so lets just leave it as zero for the moment |
492 | */ | 492 | */ |
493 | seq_printf(seq, "0 "); | 493 | seq_puts(seq, "0 "); |
494 | 494 | ||
495 | /* | 495 | /* |
496 | * remote address start time (START). This is also not | 496 | * remote address start time (START). This is also not |
497 | * currently implemented, but we can record it with a | 497 | * currently implemented, but we can record it with a |
498 | * jiffies marker in a subsequent patch | 498 | * jiffies marker in a subsequent patch |
499 | */ | 499 | */ |
500 | seq_printf(seq, "0"); | 500 | seq_puts(seq, "0 "); |
501 | |||
502 | /* | ||
503 | * The current state of this destination. I.e. | ||
504 | * SCTP_ACTIVE, SCTP_INACTIVE, ... | ||
505 | */ | ||
506 | seq_printf(seq, "%d", tsp->state); | ||
501 | 507 | ||
502 | seq_printf(seq, "\n"); | 508 | seq_printf(seq, "\n"); |
503 | } | 509 | } |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 74745a47d72a..ec18076e81ec 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -91,7 +91,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, | |||
91 | * @*headbuf: in: NULL for first frag, otherwise value returned from prev call | 91 | * @*headbuf: in: NULL for first frag, otherwise value returned from prev call |
92 | * out: set when successful non-complete reassembly, otherwise NULL | 92 | * out: set when successful non-complete reassembly, otherwise NULL |
93 | * @*buf: in: the buffer to append. Always defined | 93 | * @*buf: in: the buffer to append. Always defined |
94 | * out: head buf after sucessful complete reassembly, otherwise NULL | 94 | * out: head buf after successful complete reassembly, otherwise NULL |
95 | * Returns 1 when reassembly complete, otherwise 0 | 95 | * Returns 1 when reassembly complete, otherwise 0 |
96 | */ | 96 | */ |
97 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | 97 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) |
@@ -311,7 +311,7 @@ bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) | |||
311 | * @mtu: max allowable size for the bundle buffer, inclusive header | 311 | * @mtu: max allowable size for the bundle buffer, inclusive header |
312 | * @dnode: destination node for message. (Not always present in header) | 312 | * @dnode: destination node for message. (Not always present in header) |
313 | * Replaces buffer if successful | 313 | * Replaces buffer if successful |
314 | * Returns true if sucess, otherwise false | 314 | * Returns true if success, otherwise false |
315 | */ | 315 | */ |
316 | bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) | 316 | bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) |
317 | { | 317 | { |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 51bddc236a15..ad8a1a1e2275 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1556,7 +1556,7 @@ static void tipc_data_ready(struct sock *sk) | |||
1556 | * @tsk: TIPC socket | 1556 | * @tsk: TIPC socket |
1557 | * @msg: message | 1557 | * @msg: message |
1558 | * | 1558 | * |
1559 | * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise | 1559 | * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise |
1560 | */ | 1560 | */ |
1561 | static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) | 1561 | static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) |
1562 | { | 1562 | { |
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index eb4bec0ad8af..63402742345e 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c | |||
@@ -602,6 +602,45 @@ static struct bpf_test tests[] = { | |||
602 | }, | 602 | }, |
603 | .result = ACCEPT, | 603 | .result = ACCEPT, |
604 | }, | 604 | }, |
605 | { | ||
606 | "jump test 5", | ||
607 | .insns = { | ||
608 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
609 | BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), | ||
610 | BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), | ||
611 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), | ||
612 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
613 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), | ||
614 | BPF_JMP_IMM(BPF_JA, 0, 0, 0), | ||
615 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
616 | BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), | ||
617 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), | ||
618 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
619 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), | ||
620 | BPF_JMP_IMM(BPF_JA, 0, 0, 0), | ||
621 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
622 | BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), | ||
623 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), | ||
624 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
625 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), | ||
626 | BPF_JMP_IMM(BPF_JA, 0, 0, 0), | ||
627 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
628 | BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), | ||
629 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), | ||
630 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
631 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), | ||
632 | BPF_JMP_IMM(BPF_JA, 0, 0, 0), | ||
633 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
634 | BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), | ||
635 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), | ||
636 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
637 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), | ||
638 | BPF_JMP_IMM(BPF_JA, 0, 0, 0), | ||
639 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
640 | BPF_EXIT_INSN(), | ||
641 | }, | ||
642 | .result = ACCEPT, | ||
643 | }, | ||
605 | }; | 644 | }; |
606 | 645 | ||
607 | static int probe_filter_length(struct bpf_insn *fp) | 646 | static int probe_filter_length(struct bpf_insn *fp) |
@@ -630,7 +669,7 @@ static int create_map(void) | |||
630 | 669 | ||
631 | static int test(void) | 670 | static int test(void) |
632 | { | 671 | { |
633 | int prog_fd, i; | 672 | int prog_fd, i, pass_cnt = 0, err_cnt = 0; |
634 | 673 | ||
635 | for (i = 0; i < ARRAY_SIZE(tests); i++) { | 674 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
636 | struct bpf_insn *prog = tests[i].insns; | 675 | struct bpf_insn *prog = tests[i].insns; |
@@ -657,21 +696,25 @@ static int test(void) | |||
657 | printf("FAIL\nfailed to load prog '%s'\n", | 696 | printf("FAIL\nfailed to load prog '%s'\n", |
658 | strerror(errno)); | 697 | strerror(errno)); |
659 | printf("%s", bpf_log_buf); | 698 | printf("%s", bpf_log_buf); |
699 | err_cnt++; | ||
660 | goto fail; | 700 | goto fail; |
661 | } | 701 | } |
662 | } else { | 702 | } else { |
663 | if (prog_fd >= 0) { | 703 | if (prog_fd >= 0) { |
664 | printf("FAIL\nunexpected success to load\n"); | 704 | printf("FAIL\nunexpected success to load\n"); |
665 | printf("%s", bpf_log_buf); | 705 | printf("%s", bpf_log_buf); |
706 | err_cnt++; | ||
666 | goto fail; | 707 | goto fail; |
667 | } | 708 | } |
668 | if (strstr(bpf_log_buf, tests[i].errstr) == 0) { | 709 | if (strstr(bpf_log_buf, tests[i].errstr) == 0) { |
669 | printf("FAIL\nunexpected error message: %s", | 710 | printf("FAIL\nunexpected error message: %s", |
670 | bpf_log_buf); | 711 | bpf_log_buf); |
712 | err_cnt++; | ||
671 | goto fail; | 713 | goto fail; |
672 | } | 714 | } |
673 | } | 715 | } |
674 | 716 | ||
717 | pass_cnt++; | ||
675 | printf("OK\n"); | 718 | printf("OK\n"); |
676 | fail: | 719 | fail: |
677 | if (map_fd >= 0) | 720 | if (map_fd >= 0) |
@@ -679,6 +722,7 @@ fail: | |||
679 | close(prog_fd); | 722 | close(prog_fd); |
680 | 723 | ||
681 | } | 724 | } |
725 | printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt); | ||
682 | 726 | ||
683 | return 0; | 727 | return 0; |
684 | } | 728 | } |