diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-11 21:19:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-11 21:19:00 -0400 |
commit | ca321885b0511a85e2d1cd40caafedbeb18f4af6 (patch) | |
tree | 0042e8674aff7ae5785db467836d8d4101906f70 | |
parent | 052db7ec86dff26f734031c3ef5c2c03a94af0af (diff) | |
parent | 01d2d484e49e9bc0ed9b5fdaf345a0e2bf35ffed (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
"This set fixes a bunch of fallout from the changes that went in during
this merge window, particularly:
- Fix fsl_pq_mdio (Claudiu Manoil) and fm10k (Pranith Kumar) build
failures.
- Several networking drivers do atomic_set() on page counts where
that's not exactly legal. From Eric Dumazet.
- Make __skb_flow_get_ports() work cleanly with unaligned data, from
Alexander Duyck.
- Fix some kernel-doc buglets in rfkill and netlabel, from Fabian
Frederick.
- Unbalanced enable_irq_wake usage in bcmgenet and systemport
drivers, from Florian Fainelli.
- pxa168_eth needs to depend on HAS_DMA, from Geert Uytterhoeven.
- Multi-dequeue in the qdisc layer severely bypasses the fairness
limits the previous code used to enforce, reintroduce in a way that
at the same time doesn't compromise bulk dequeue opportunities.
From Jesper Dangaard Brouer.
- macvlan receive path unnecessarily hops through a softirq by using
netif_rx() instead of netif_receive_skb(). From Jason Baron"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (51 commits)
net: systemport: avoid unbalanced enable_irq_wake calls
net: bcmgenet: avoid unbalanced enable_irq_wake calls
net: bcmgenet: fix off-by-one in incrementing read pointer
net: fix races in page->_count manipulation
mlx4: fix race accessing page->_count
ixgbe: fix race accessing page->_count
igb: fix race accessing page->_count
fm10k: fix race accessing page->_count
net/phy: micrel: Add clock support for KSZ8021/KSZ8031
flow-dissector: Fix alignment issue in __skb_flow_get_ports
net: filter: fix the comments
Documentation: replace __sk_run_filter with __bpf_prog_run
macvlan: optimize the receive path
macvlan: pass 'bool' type to macvlan_count_rx()
drivers: net: xgene: Add 10GbE ethtool support
drivers: net: xgene: Add 10GbE support
drivers: net: xgene: Preparing for adding 10GbE support
dtb: Add 10GbE node to APM X-Gene SoC device tree
Documentation: dts: Update section header for APM X-Gene
MAINTAINERS: Update APM X-Gene section
...
62 files changed, 1017 insertions, 447 deletions
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index ebcad25efd0a..cfcc52705ed8 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt | |||
@@ -3,7 +3,7 @@ APM X-Gene SoC Ethernet nodes | |||
3 | Ethernet nodes are defined to describe on-chip ethernet interfaces in | 3 | Ethernet nodes are defined to describe on-chip ethernet interfaces in |
4 | APM X-Gene SoC. | 4 | APM X-Gene SoC. |
5 | 5 | ||
6 | Required properties: | 6 | Required properties for all the ethernet interfaces: |
7 | - compatible: Should be "apm,xgene-enet" | 7 | - compatible: Should be "apm,xgene-enet" |
8 | - reg: Address and length of the register set for the device. It contains the | 8 | - reg: Address and length of the register set for the device. It contains the |
9 | information of registers in the same order as described by reg-names | 9 | information of registers in the same order as described by reg-names |
@@ -15,6 +15,8 @@ Required properties: | |||
15 | - clocks: Reference to the clock entry. | 15 | - clocks: Reference to the clock entry. |
16 | - local-mac-address: MAC address assigned to this device | 16 | - local-mac-address: MAC address assigned to this device |
17 | - phy-connection-type: Interface type between ethernet device and PHY device | 17 | - phy-connection-type: Interface type between ethernet device and PHY device |
18 | |||
19 | Required properties for ethernet interfaces that have external PHY: | ||
18 | - phy-handle: Reference to a PHY node connected to this device | 20 | - phy-handle: Reference to a PHY node connected to this device |
19 | 21 | ||
20 | - mdio: Device tree subnode with the following required properties: | 22 | - mdio: Device tree subnode with the following required properties: |
diff --git a/Documentation/devicetree/bindings/net/micrel.txt b/Documentation/devicetree/bindings/net/micrel.txt index 98a3e61f9ee8..e1d99b95c4ec 100644 --- a/Documentation/devicetree/bindings/net/micrel.txt +++ b/Documentation/devicetree/bindings/net/micrel.txt | |||
@@ -16,3 +16,9 @@ Optional properties: | |||
16 | KSZ8051: register 0x1f, bits 5..4 | 16 | KSZ8051: register 0x1f, bits 5..4 |
17 | 17 | ||
18 | See the respective PHY datasheet for the mode values. | 18 | See the respective PHY datasheet for the mode values. |
19 | |||
20 | - clocks, clock-names: contains clocks according to the common clock bindings. | ||
21 | |||
22 | supported clocks: | ||
23 | - KSZ8021, KSZ8031: "rmii-ref": The RMII refence input clock. Used | ||
24 | to determine the XI input clock. | ||
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index b1935f9ce081..58d08f8d8d80 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt | |||
@@ -700,11 +700,11 @@ Some core changes of the new internal format: | |||
700 | bpf_exit | 700 | bpf_exit |
701 | 701 | ||
702 | If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and | 702 | If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and |
703 | returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to | 703 | returns will be seamless. Without JIT, __bpf_prog_run() interpreter needs to |
704 | be used to call into f2. | 704 | be used to call into f2. |
705 | 705 | ||
706 | For practical reasons all eBPF programs have only one argument 'ctx' which is | 706 | For practical reasons all eBPF programs have only one argument 'ctx' which is |
707 | already placed into R1 (e.g. on __sk_run_filter() startup) and the programs | 707 | already placed into R1 (e.g. on __bpf_prog_run() startup) and the programs |
708 | can call kernel functions with up to 5 arguments. Calls with 6 or more arguments | 708 | can call kernel functions with up to 5 arguments. Calls with 6 or more arguments |
709 | are currently not supported, but these restrictions can be lifted if necessary | 709 | are currently not supported, but these restrictions can be lifted if necessary |
710 | in the future. | 710 | in the future. |
diff --git a/MAINTAINERS b/MAINTAINERS index 1e53b32fa07b..d6964389f028 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -734,7 +734,6 @@ F: net/appletalk/ | |||
734 | APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER | 734 | APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER |
735 | M: Iyappan Subramanian <isubramanian@apm.com> | 735 | M: Iyappan Subramanian <isubramanian@apm.com> |
736 | M: Keyur Chudgar <kchudgar@apm.com> | 736 | M: Keyur Chudgar <kchudgar@apm.com> |
737 | M: Ravi Patel <rapatel@apm.com> | ||
738 | S: Supported | 737 | S: Supported |
739 | F: drivers/net/ethernet/apm/xgene/ | 738 | F: drivers/net/ethernet/apm/xgene/ |
740 | F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt | 739 | F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt |
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts index f64900052f4e..8eb6d94c7851 100644 --- a/arch/arm64/boot/dts/apm-mustang.dts +++ b/arch/arm64/boot/dts/apm-mustang.dts | |||
@@ -40,3 +40,7 @@ | |||
40 | &menet { | 40 | &menet { |
41 | status = "ok"; | 41 | status = "ok"; |
42 | }; | 42 | }; |
43 | |||
44 | &xgenet { | ||
45 | status = "ok"; | ||
46 | }; | ||
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi index 4f6d04d52cca..87d3205e98d5 100644 --- a/arch/arm64/boot/dts/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm-storm.dtsi | |||
@@ -176,6 +176,16 @@ | |||
176 | clock-output-names = "menetclk"; | 176 | clock-output-names = "menetclk"; |
177 | }; | 177 | }; |
178 | 178 | ||
179 | xge0clk: xge0clk@1f61c000 { | ||
180 | compatible = "apm,xgene-device-clock"; | ||
181 | #clock-cells = <1>; | ||
182 | clocks = <&socplldiv2 0>; | ||
183 | reg = <0x0 0x1f61c000 0x0 0x1000>; | ||
184 | reg-names = "csr-reg"; | ||
185 | csr-mask = <0x3>; | ||
186 | clock-output-names = "xge0clk"; | ||
187 | }; | ||
188 | |||
179 | sataphy1clk: sataphy1clk@1f21c000 { | 189 | sataphy1clk: sataphy1clk@1f21c000 { |
180 | compatible = "apm,xgene-device-clock"; | 190 | compatible = "apm,xgene-device-clock"; |
181 | #clock-cells = <1>; | 191 | #clock-cells = <1>; |
@@ -585,7 +595,8 @@ | |||
585 | interrupts = <0x0 0x3c 0x4>; | 595 | interrupts = <0x0 0x3c 0x4>; |
586 | dma-coherent; | 596 | dma-coherent; |
587 | clocks = <&menetclk 0>; | 597 | clocks = <&menetclk 0>; |
588 | local-mac-address = [00 01 73 00 00 01]; | 598 | /* mac address will be overwritten by the bootloader */ |
599 | local-mac-address = [00 00 00 00 00 00]; | ||
589 | phy-connection-type = "rgmii"; | 600 | phy-connection-type = "rgmii"; |
590 | phy-handle = <&menetphy>; | 601 | phy-handle = <&menetphy>; |
591 | mdio { | 602 | mdio { |
@@ -600,12 +611,26 @@ | |||
600 | }; | 611 | }; |
601 | }; | 612 | }; |
602 | 613 | ||
614 | xgenet: ethernet@1f610000 { | ||
615 | compatible = "apm,xgene-enet"; | ||
616 | status = "disabled"; | ||
617 | reg = <0x0 0x1f610000 0x0 0xd100>, | ||
618 | <0x0 0x1f600000 0x0 0X400>, | ||
619 | <0x0 0x18000000 0x0 0X200>; | ||
620 | reg-names = "enet_csr", "ring_csr", "ring_cmd"; | ||
621 | interrupts = <0x0 0x60 0x4>; | ||
622 | dma-coherent; | ||
623 | clocks = <&xge0clk 0>; | ||
624 | /* mac address will be overwritten by the bootloader */ | ||
625 | local-mac-address = [00 00 00 00 00 00]; | ||
626 | phy-connection-type = "xgmii"; | ||
627 | }; | ||
628 | |||
603 | rng: rng@10520000 { | 629 | rng: rng@10520000 { |
604 | compatible = "apm,xgene-rng"; | 630 | compatible = "apm,xgene-rng"; |
605 | reg = <0x0 0x10520000 0x0 0x100>; | 631 | reg = <0x0 0x10520000 0x0 0x100>; |
606 | interrupts = <0x0 0x41 0x4>; | 632 | interrupts = <0x0 0x41 0x4>; |
607 | clocks = <&rngpkaclk 0>; | 633 | clocks = <&rngpkaclk 0>; |
608 | }; | 634 | }; |
609 | |||
610 | }; | 635 | }; |
611 | }; | 636 | }; |
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile index c643e8a0a0dc..589b35247713 100644 --- a/drivers/net/ethernet/apm/xgene/Makefile +++ b/drivers/net/ethernet/apm/xgene/Makefile | |||
@@ -2,5 +2,6 @@ | |||
2 | # Makefile for APM X-Gene Ethernet Driver. | 2 | # Makefile for APM X-Gene Ethernet Driver. |
3 | # | 3 | # |
4 | 4 | ||
5 | xgene-enet-objs := xgene_enet_hw.o xgene_enet_main.o xgene_enet_ethtool.o | 5 | xgene-enet-objs := xgene_enet_hw.o xgene_enet_xgmac.o \ |
6 | xgene_enet_main.o xgene_enet_ethtool.o | ||
6 | obj-$(CONFIG_NET_XGENE) += xgene-enet.o | 7 | obj-$(CONFIG_NET_XGENE) += xgene-enet.o |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c index 63f2aa54a594..c1c997b92342 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c | |||
@@ -59,10 +59,22 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | |||
59 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | 59 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); |
60 | struct phy_device *phydev = pdata->phy_dev; | 60 | struct phy_device *phydev = pdata->phy_dev; |
61 | 61 | ||
62 | if (phydev == NULL) | 62 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { |
63 | return -ENODEV; | 63 | if (phydev == NULL) |
64 | return -ENODEV; | ||
64 | 65 | ||
65 | return phy_ethtool_gset(phydev, cmd); | 66 | return phy_ethtool_gset(phydev, cmd); |
67 | } | ||
68 | |||
69 | cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; | ||
70 | cmd->advertising = cmd->supported; | ||
71 | ethtool_cmd_speed_set(cmd, SPEED_10000); | ||
72 | cmd->duplex = DUPLEX_FULL; | ||
73 | cmd->port = PORT_FIBRE; | ||
74 | cmd->transceiver = XCVR_EXTERNAL; | ||
75 | cmd->autoneg = AUTONEG_DISABLE; | ||
76 | |||
77 | return 0; | ||
66 | } | 78 | } |
67 | 79 | ||
68 | static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | 80 | static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) |
@@ -70,10 +82,14 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | |||
70 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | 82 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); |
71 | struct phy_device *phydev = pdata->phy_dev; | 83 | struct phy_device *phydev = pdata->phy_dev; |
72 | 84 | ||
73 | if (phydev == NULL) | 85 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { |
74 | return -ENODEV; | 86 | if (phydev == NULL) |
87 | return -ENODEV; | ||
88 | |||
89 | return phy_ethtool_sset(phydev, cmd); | ||
90 | } | ||
75 | 91 | ||
76 | return phy_ethtool_sset(phydev, cmd); | 92 | return -EINVAL; |
77 | } | 93 | } |
78 | 94 | ||
79 | static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data) | 95 | static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data) |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 812d8d65159b..c8f3824f7606 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -402,7 +402,7 @@ static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, | |||
402 | return data; | 402 | return data; |
403 | } | 403 | } |
404 | 404 | ||
405 | void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) | 405 | static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) |
406 | { | 406 | { |
407 | u32 addr0, addr1; | 407 | u32 addr0, addr1; |
408 | u8 *dev_addr = pdata->ndev->dev_addr; | 408 | u8 *dev_addr = pdata->ndev->dev_addr; |
@@ -436,13 +436,13 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) | |||
436 | return 0; | 436 | return 0; |
437 | } | 437 | } |
438 | 438 | ||
439 | void xgene_gmac_reset(struct xgene_enet_pdata *pdata) | 439 | static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) |
440 | { | 440 | { |
441 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); | 441 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); |
442 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); | 442 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); |
443 | } | 443 | } |
444 | 444 | ||
445 | void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed) | 445 | static void xgene_gmac_init(struct xgene_enet_pdata *pdata) |
446 | { | 446 | { |
447 | u32 value, mc2; | 447 | u32 value, mc2; |
448 | u32 intf_ctl, rgmii; | 448 | u32 intf_ctl, rgmii; |
@@ -456,7 +456,7 @@ void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed) | |||
456 | xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); | 456 | xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); |
457 | xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); | 457 | xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); |
458 | 458 | ||
459 | switch (speed) { | 459 | switch (pdata->phy_speed) { |
460 | case SPEED_10: | 460 | case SPEED_10: |
461 | ENET_INTERFACE_MODE2_SET(&mc2, 1); | 461 | ENET_INTERFACE_MODE2_SET(&mc2, 1); |
462 | CFG_MACMODE_SET(&icm0, 0); | 462 | CFG_MACMODE_SET(&icm0, 0); |
@@ -525,8 +525,8 @@ static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) | |||
525 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); | 525 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); |
526 | } | 526 | } |
527 | 527 | ||
528 | void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, | 528 | static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, |
529 | u32 dst_ring_num, u16 bufpool_id) | 529 | u32 dst_ring_num, u16 bufpool_id) |
530 | { | 530 | { |
531 | u32 cb; | 531 | u32 cb; |
532 | u32 fpsel; | 532 | u32 fpsel; |
@@ -544,7 +544,7 @@ void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, | |||
544 | xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); | 544 | xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); |
545 | } | 545 | } |
546 | 546 | ||
547 | void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) | 547 | static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) |
548 | { | 548 | { |
549 | u32 data; | 549 | u32 data; |
550 | 550 | ||
@@ -552,7 +552,7 @@ void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) | |||
552 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); | 552 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); |
553 | } | 553 | } |
554 | 554 | ||
555 | void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) | 555 | static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) |
556 | { | 556 | { |
557 | u32 data; | 557 | u32 data; |
558 | 558 | ||
@@ -560,7 +560,7 @@ void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) | |||
560 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); | 560 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); |
561 | } | 561 | } |
562 | 562 | ||
563 | void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) | 563 | static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) |
564 | { | 564 | { |
565 | u32 data; | 565 | u32 data; |
566 | 566 | ||
@@ -568,7 +568,7 @@ void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) | |||
568 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); | 568 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); |
569 | } | 569 | } |
570 | 570 | ||
571 | void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) | 571 | static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) |
572 | { | 572 | { |
573 | u32 data; | 573 | u32 data; |
574 | 574 | ||
@@ -576,7 +576,7 @@ void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) | |||
576 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); | 576 | xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); |
577 | } | 577 | } |
578 | 578 | ||
579 | void xgene_enet_reset(struct xgene_enet_pdata *pdata) | 579 | static void xgene_enet_reset(struct xgene_enet_pdata *pdata) |
580 | { | 580 | { |
581 | u32 val; | 581 | u32 val; |
582 | 582 | ||
@@ -593,7 +593,7 @@ void xgene_enet_reset(struct xgene_enet_pdata *pdata) | |||
593 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); | 593 | xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); |
594 | } | 594 | } |
595 | 595 | ||
596 | void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) | 596 | static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) |
597 | { | 597 | { |
598 | clk_disable_unprepare(pdata->clk); | 598 | clk_disable_unprepare(pdata->clk); |
599 | } | 599 | } |
@@ -627,10 +627,10 @@ static void xgene_enet_adjust_link(struct net_device *ndev) | |||
627 | 627 | ||
628 | if (phydev->link) { | 628 | if (phydev->link) { |
629 | if (pdata->phy_speed != phydev->speed) { | 629 | if (pdata->phy_speed != phydev->speed) { |
630 | xgene_gmac_init(pdata, phydev->speed); | 630 | pdata->phy_speed = phydev->speed; |
631 | xgene_gmac_init(pdata); | ||
631 | xgene_gmac_rx_enable(pdata); | 632 | xgene_gmac_rx_enable(pdata); |
632 | xgene_gmac_tx_enable(pdata); | 633 | xgene_gmac_tx_enable(pdata); |
633 | pdata->phy_speed = phydev->speed; | ||
634 | phy_print_status(phydev); | 634 | phy_print_status(phydev); |
635 | } | 635 | } |
636 | } else { | 636 | } else { |
@@ -726,3 +726,19 @@ void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) | |||
726 | mdiobus_free(pdata->mdio_bus); | 726 | mdiobus_free(pdata->mdio_bus); |
727 | pdata->mdio_bus = NULL; | 727 | pdata->mdio_bus = NULL; |
728 | } | 728 | } |
729 | |||
730 | struct xgene_mac_ops xgene_gmac_ops = { | ||
731 | .init = xgene_gmac_init, | ||
732 | .reset = xgene_gmac_reset, | ||
733 | .rx_enable = xgene_gmac_rx_enable, | ||
734 | .tx_enable = xgene_gmac_tx_enable, | ||
735 | .rx_disable = xgene_gmac_rx_disable, | ||
736 | .tx_disable = xgene_gmac_tx_disable, | ||
737 | .set_mac_addr = xgene_gmac_set_mac_addr, | ||
738 | }; | ||
739 | |||
740 | struct xgene_port_ops xgene_gport_ops = { | ||
741 | .reset = xgene_enet_reset, | ||
742 | .cle_bypass = xgene_enet_cle_bypass, | ||
743 | .shutdown = xgene_gport_shutdown, | ||
744 | }; | ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 371e7a5b2507..15ec4267779c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | |||
@@ -42,6 +42,11 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) | |||
42 | return (val & GENMASK(end, start)) >> start; | 42 | return (val & GENMASK(end, start)) >> start; |
43 | } | 43 | } |
44 | 44 | ||
45 | enum xgene_enet_rm { | ||
46 | RM0, | ||
47 | RM3 = 3 | ||
48 | }; | ||
49 | |||
45 | #define CSR_RING_ID 0x0008 | 50 | #define CSR_RING_ID 0x0008 |
46 | #define OVERWRITE BIT(31) | 51 | #define OVERWRITE BIT(31) |
47 | #define IS_BUFFER_POOL BIT(20) | 52 | #define IS_BUFFER_POOL BIT(20) |
@@ -52,7 +57,6 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) | |||
52 | #define CSR_RING_WR_BASE 0x0070 | 57 | #define CSR_RING_WR_BASE 0x0070 |
53 | #define NUM_RING_CONFIG 5 | 58 | #define NUM_RING_CONFIG 5 |
54 | #define BUFPOOL_MODE 3 | 59 | #define BUFPOOL_MODE 3 |
55 | #define RM3 3 | ||
56 | #define INC_DEC_CMD_ADDR 0x002c | 60 | #define INC_DEC_CMD_ADDR 0x002c |
57 | #define UDP_HDR_SIZE 2 | 61 | #define UDP_HDR_SIZE 2 |
58 | #define BUF_LEN_CODE_2K 0x5000 | 62 | #define BUF_LEN_CODE_2K 0x5000 |
@@ -94,11 +98,9 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) | |||
94 | 98 | ||
95 | #define BLOCK_ETH_CSR_OFFSET 0x2000 | 99 | #define BLOCK_ETH_CSR_OFFSET 0x2000 |
96 | #define BLOCK_ETH_RING_IF_OFFSET 0x9000 | 100 | #define BLOCK_ETH_RING_IF_OFFSET 0x9000 |
97 | #define BLOCK_ETH_CLKRST_CSR_OFFSET 0xC000 | ||
98 | #define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 | 101 | #define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 |
99 | 102 | ||
100 | #define BLOCK_ETH_MAC_OFFSET 0x0000 | 103 | #define BLOCK_ETH_MAC_OFFSET 0x0000 |
101 | #define BLOCK_ETH_STATS_OFFSET 0x0014 | ||
102 | #define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 | 104 | #define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 |
103 | 105 | ||
104 | #define MAC_ADDR_REG_OFFSET 0x00 | 106 | #define MAC_ADDR_REG_OFFSET 0x00 |
@@ -107,12 +109,6 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) | |||
107 | #define MAC_READ_REG_OFFSET 0x0c | 109 | #define MAC_READ_REG_OFFSET 0x0c |
108 | #define MAC_COMMAND_DONE_REG_OFFSET 0x10 | 110 | #define MAC_COMMAND_DONE_REG_OFFSET 0x10 |
109 | 111 | ||
110 | #define STAT_ADDR_REG_OFFSET 0x00 | ||
111 | #define STAT_COMMAND_REG_OFFSET 0x04 | ||
112 | #define STAT_WRITE_REG_OFFSET 0x08 | ||
113 | #define STAT_READ_REG_OFFSET 0x0c | ||
114 | #define STAT_COMMAND_DONE_REG_OFFSET 0x10 | ||
115 | |||
116 | #define MII_MGMT_CONFIG_ADDR 0x20 | 112 | #define MII_MGMT_CONFIG_ADDR 0x20 |
117 | #define MII_MGMT_COMMAND_ADDR 0x24 | 113 | #define MII_MGMT_COMMAND_ADDR 0x24 |
118 | #define MII_MGMT_ADDRESS_ADDR 0x28 | 114 | #define MII_MGMT_ADDRESS_ADDR 0x28 |
@@ -318,20 +314,10 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, | |||
318 | struct xgene_enet_pdata *pdata, | 314 | struct xgene_enet_pdata *pdata, |
319 | enum xgene_enet_err_code status); | 315 | enum xgene_enet_err_code status); |
320 | 316 | ||
321 | void xgene_enet_reset(struct xgene_enet_pdata *priv); | ||
322 | void xgene_gmac_reset(struct xgene_enet_pdata *priv); | ||
323 | void xgene_gmac_init(struct xgene_enet_pdata *priv, int speed); | ||
324 | void xgene_gmac_tx_enable(struct xgene_enet_pdata *priv); | ||
325 | void xgene_gmac_rx_enable(struct xgene_enet_pdata *priv); | ||
326 | void xgene_gmac_tx_disable(struct xgene_enet_pdata *priv); | ||
327 | void xgene_gmac_rx_disable(struct xgene_enet_pdata *priv); | ||
328 | void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata); | ||
329 | void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, | ||
330 | u32 dst_ring_num, u16 bufpool_id); | ||
331 | void xgene_gport_shutdown(struct xgene_enet_pdata *priv); | ||
332 | void xgene_gmac_get_tx_stats(struct xgene_enet_pdata *pdata); | ||
333 | |||
334 | int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); | 317 | int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); |
335 | void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); | 318 | void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); |
336 | 319 | ||
320 | extern struct xgene_mac_ops xgene_gmac_ops; | ||
321 | extern struct xgene_port_ops xgene_gport_ops; | ||
322 | |||
337 | #endif /* __XGENE_ENET_HW_H__ */ | 323 | #endif /* __XGENE_ENET_HW_H__ */ |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index e4222af2baa6..9b85239ceedf 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "xgene_enet_main.h" | 22 | #include "xgene_enet_main.h" |
23 | #include "xgene_enet_hw.h" | 23 | #include "xgene_enet_hw.h" |
24 | #include "xgene_enet_xgmac.h" | ||
24 | 25 | ||
25 | static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) | 26 | static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) |
26 | { | 27 | { |
@@ -390,7 +391,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |||
390 | } | 391 | } |
391 | } | 392 | } |
392 | 393 | ||
393 | return budget; | 394 | return count; |
394 | } | 395 | } |
395 | 396 | ||
396 | static int xgene_enet_napi(struct napi_struct *napi, const int budget) | 397 | static int xgene_enet_napi(struct napi_struct *napi, const int budget) |
@@ -413,7 +414,7 @@ static void xgene_enet_timeout(struct net_device *ndev) | |||
413 | { | 414 | { |
414 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | 415 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); |
415 | 416 | ||
416 | xgene_gmac_reset(pdata); | 417 | pdata->mac_ops->reset(pdata); |
417 | } | 418 | } |
418 | 419 | ||
419 | static int xgene_enet_register_irq(struct net_device *ndev) | 420 | static int xgene_enet_register_irq(struct net_device *ndev) |
@@ -445,18 +446,21 @@ static void xgene_enet_free_irq(struct net_device *ndev) | |||
445 | static int xgene_enet_open(struct net_device *ndev) | 446 | static int xgene_enet_open(struct net_device *ndev) |
446 | { | 447 | { |
447 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | 448 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); |
449 | struct xgene_mac_ops *mac_ops = pdata->mac_ops; | ||
448 | int ret; | 450 | int ret; |
449 | 451 | ||
450 | xgene_gmac_tx_enable(pdata); | 452 | mac_ops->tx_enable(pdata); |
451 | xgene_gmac_rx_enable(pdata); | 453 | mac_ops->rx_enable(pdata); |
452 | 454 | ||
453 | ret = xgene_enet_register_irq(ndev); | 455 | ret = xgene_enet_register_irq(ndev); |
454 | if (ret) | 456 | if (ret) |
455 | return ret; | 457 | return ret; |
456 | napi_enable(&pdata->rx_ring->napi); | 458 | napi_enable(&pdata->rx_ring->napi); |
457 | 459 | ||
458 | if (pdata->phy_dev) | 460 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
459 | phy_start(pdata->phy_dev); | 461 | phy_start(pdata->phy_dev); |
462 | else | ||
463 | schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); | ||
460 | 464 | ||
461 | netif_start_queue(ndev); | 465 | netif_start_queue(ndev); |
462 | 466 | ||
@@ -466,18 +470,21 @@ static int xgene_enet_open(struct net_device *ndev) | |||
466 | static int xgene_enet_close(struct net_device *ndev) | 470 | static int xgene_enet_close(struct net_device *ndev) |
467 | { | 471 | { |
468 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | 472 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); |
473 | struct xgene_mac_ops *mac_ops = pdata->mac_ops; | ||
469 | 474 | ||
470 | netif_stop_queue(ndev); | 475 | netif_stop_queue(ndev); |
471 | 476 | ||
472 | if (pdata->phy_dev) | 477 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
473 | phy_stop(pdata->phy_dev); | 478 | phy_stop(pdata->phy_dev); |
479 | else | ||
480 | cancel_delayed_work_sync(&pdata->link_work); | ||
474 | 481 | ||
475 | napi_disable(&pdata->rx_ring->napi); | 482 | napi_disable(&pdata->rx_ring->napi); |
476 | xgene_enet_free_irq(ndev); | 483 | xgene_enet_free_irq(ndev); |
477 | xgene_enet_process_ring(pdata->rx_ring, -1); | 484 | xgene_enet_process_ring(pdata->rx_ring, -1); |
478 | 485 | ||
479 | xgene_gmac_tx_disable(pdata); | 486 | mac_ops->tx_disable(pdata); |
480 | xgene_gmac_rx_disable(pdata); | 487 | mac_ops->rx_disable(pdata); |
481 | 488 | ||
482 | return 0; | 489 | return 0; |
483 | } | 490 | } |
@@ -613,7 +620,6 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( | |||
613 | 620 | ||
614 | ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6); | 621 | ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6); |
615 | ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; | 622 | ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; |
616 | pdata->rm = RM3; | ||
617 | ring = xgene_enet_setup_ring(ring); | 623 | ring = xgene_enet_setup_ring(ring); |
618 | netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", | 624 | netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", |
619 | ring->num, ring->size, ring->id, ring->slots); | 625 | ring->num, ring->size, ring->id, ring->slots); |
@@ -724,7 +730,7 @@ static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) | |||
724 | ret = eth_mac_addr(ndev, addr); | 730 | ret = eth_mac_addr(ndev, addr); |
725 | if (ret) | 731 | if (ret) |
726 | return ret; | 732 | return ret; |
727 | xgene_gmac_set_mac_addr(pdata); | 733 | pdata->mac_ops->set_mac_addr(pdata); |
728 | 734 | ||
729 | return ret; | 735 | return ret; |
730 | } | 736 | } |
@@ -803,8 +809,13 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) | |||
803 | 809 | ||
804 | pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node); | 810 | pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node); |
805 | if (pdata->phy_mode < 0) { | 811 | if (pdata->phy_mode < 0) { |
806 | dev_err(dev, "Incorrect phy-connection-type in DTS\n"); | 812 | dev_err(dev, "Unable to get phy-connection-type\n"); |
807 | return -EINVAL; | 813 | return pdata->phy_mode; |
814 | } | ||
815 | if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && | ||
816 | pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { | ||
817 | dev_err(dev, "Incorrect phy-connection-type specified\n"); | ||
818 | return -ENODEV; | ||
808 | } | 819 | } |
809 | 820 | ||
810 | pdata->clk = devm_clk_get(&pdev->dev, NULL); | 821 | pdata->clk = devm_clk_get(&pdev->dev, NULL); |
@@ -819,12 +830,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) | |||
819 | pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; | 830 | pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; |
820 | pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; | 831 | pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; |
821 | pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; | 832 | pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; |
822 | pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; | 833 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { |
823 | pdata->mcx_stats_addr = base_addr + BLOCK_ETH_STATS_OFFSET; | 834 | pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; |
824 | pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; | 835 | pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; |
836 | pdata->rm = RM3; | ||
837 | } else { | ||
838 | pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; | ||
839 | pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; | ||
840 | pdata->rm = RM0; | ||
841 | } | ||
825 | pdata->rx_buff_cnt = NUM_PKT_BUF; | 842 | pdata->rx_buff_cnt = NUM_PKT_BUF; |
826 | 843 | ||
827 | return ret; | 844 | return 0; |
828 | } | 845 | } |
829 | 846 | ||
830 | static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) | 847 | static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) |
@@ -834,8 +851,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) | |||
834 | u16 dst_ring_num; | 851 | u16 dst_ring_num; |
835 | int ret; | 852 | int ret; |
836 | 853 | ||
837 | xgene_gmac_tx_disable(pdata); | 854 | pdata->port_ops->reset(pdata); |
838 | xgene_gmac_rx_disable(pdata); | ||
839 | 855 | ||
840 | ret = xgene_enet_create_desc_rings(ndev); | 856 | ret = xgene_enet_create_desc_rings(ndev); |
841 | if (ret) { | 857 | if (ret) { |
@@ -853,11 +869,26 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) | |||
853 | } | 869 | } |
854 | 870 | ||
855 | dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); | 871 | dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); |
856 | xgene_enet_cle_bypass(pdata, dst_ring_num, buf_pool->id); | 872 | pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); |
873 | pdata->mac_ops->init(pdata); | ||
857 | 874 | ||
858 | return ret; | 875 | return ret; |
859 | } | 876 | } |
860 | 877 | ||
878 | static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) | ||
879 | { | ||
880 | switch (pdata->phy_mode) { | ||
881 | case PHY_INTERFACE_MODE_RGMII: | ||
882 | pdata->mac_ops = &xgene_gmac_ops; | ||
883 | pdata->port_ops = &xgene_gport_ops; | ||
884 | break; | ||
885 | default: | ||
886 | pdata->mac_ops = &xgene_xgmac_ops; | ||
887 | pdata->port_ops = &xgene_xgport_ops; | ||
888 | break; | ||
889 | } | ||
890 | } | ||
891 | |||
861 | static int xgene_enet_probe(struct platform_device *pdev) | 892 | static int xgene_enet_probe(struct platform_device *pdev) |
862 | { | 893 | { |
863 | struct net_device *ndev; | 894 | struct net_device *ndev; |
@@ -886,8 +917,7 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
886 | if (ret) | 917 | if (ret) |
887 | goto err; | 918 | goto err; |
888 | 919 | ||
889 | xgene_enet_reset(pdata); | 920 | xgene_enet_setup_ops(pdata); |
890 | xgene_gmac_init(pdata, SPEED_1000); | ||
891 | 921 | ||
892 | ret = register_netdev(ndev); | 922 | ret = register_netdev(ndev); |
893 | if (ret) { | 923 | if (ret) { |
@@ -907,7 +937,10 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
907 | 937 | ||
908 | napi = &pdata->rx_ring->napi; | 938 | napi = &pdata->rx_ring->napi; |
909 | netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); | 939 | netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); |
910 | ret = xgene_enet_mdio_config(pdata); | 940 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
941 | ret = xgene_enet_mdio_config(pdata); | ||
942 | else | ||
943 | INIT_DELAYED_WORK(&pdata->link_work, xgene_enet_link_state); | ||
911 | 944 | ||
912 | return ret; | 945 | return ret; |
913 | err: | 946 | err: |
@@ -918,19 +951,21 @@ err: | |||
918 | static int xgene_enet_remove(struct platform_device *pdev) | 951 | static int xgene_enet_remove(struct platform_device *pdev) |
919 | { | 952 | { |
920 | struct xgene_enet_pdata *pdata; | 953 | struct xgene_enet_pdata *pdata; |
954 | struct xgene_mac_ops *mac_ops; | ||
921 | struct net_device *ndev; | 955 | struct net_device *ndev; |
922 | 956 | ||
923 | pdata = platform_get_drvdata(pdev); | 957 | pdata = platform_get_drvdata(pdev); |
958 | mac_ops = pdata->mac_ops; | ||
924 | ndev = pdata->ndev; | 959 | ndev = pdata->ndev; |
925 | 960 | ||
926 | xgene_gmac_rx_disable(pdata); | 961 | mac_ops->rx_disable(pdata); |
927 | xgene_gmac_tx_disable(pdata); | 962 | mac_ops->tx_disable(pdata); |
928 | 963 | ||
929 | netif_napi_del(&pdata->rx_ring->napi); | 964 | netif_napi_del(&pdata->rx_ring->napi); |
930 | xgene_enet_mdio_remove(pdata); | 965 | xgene_enet_mdio_remove(pdata); |
931 | xgene_enet_delete_desc_rings(pdata); | 966 | xgene_enet_delete_desc_rings(pdata); |
932 | unregister_netdev(ndev); | 967 | unregister_netdev(ndev); |
933 | xgene_gport_shutdown(pdata); | 968 | pdata->port_ops->shutdown(pdata); |
934 | free_netdev(ndev); | 969 | free_netdev(ndev); |
935 | 970 | ||
936 | return 0; | 971 | return 0; |
@@ -956,5 +991,6 @@ module_platform_driver(xgene_enet_driver); | |||
956 | 991 | ||
957 | MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); | 992 | MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); |
958 | MODULE_VERSION(XGENE_DRV_VERSION); | 993 | MODULE_VERSION(XGENE_DRV_VERSION); |
994 | MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>"); | ||
959 | MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); | 995 | MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); |
960 | MODULE_LICENSE("GPL"); | 996 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 0815866986b0..86cf68b65584 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h | |||
@@ -68,6 +68,23 @@ struct xgene_enet_desc_ring { | |||
68 | }; | 68 | }; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | struct xgene_mac_ops { | ||
72 | void (*init)(struct xgene_enet_pdata *pdata); | ||
73 | void (*reset)(struct xgene_enet_pdata *pdata); | ||
74 | void (*tx_enable)(struct xgene_enet_pdata *pdata); | ||
75 | void (*rx_enable)(struct xgene_enet_pdata *pdata); | ||
76 | void (*tx_disable)(struct xgene_enet_pdata *pdata); | ||
77 | void (*rx_disable)(struct xgene_enet_pdata *pdata); | ||
78 | void (*set_mac_addr)(struct xgene_enet_pdata *pdata); | ||
79 | }; | ||
80 | |||
81 | struct xgene_port_ops { | ||
82 | void (*reset)(struct xgene_enet_pdata *pdata); | ||
83 | void (*cle_bypass)(struct xgene_enet_pdata *pdata, | ||
84 | u32 dst_ring_num, u16 bufpool_id); | ||
85 | void (*shutdown)(struct xgene_enet_pdata *pdata); | ||
86 | }; | ||
87 | |||
71 | /* ethernet private data */ | 88 | /* ethernet private data */ |
72 | struct xgene_enet_pdata { | 89 | struct xgene_enet_pdata { |
73 | struct net_device *ndev; | 90 | struct net_device *ndev; |
@@ -88,16 +105,17 @@ struct xgene_enet_pdata { | |||
88 | void __iomem *eth_ring_if_addr; | 105 | void __iomem *eth_ring_if_addr; |
89 | void __iomem *eth_diag_csr_addr; | 106 | void __iomem *eth_diag_csr_addr; |
90 | void __iomem *mcx_mac_addr; | 107 | void __iomem *mcx_mac_addr; |
91 | void __iomem *mcx_stats_addr; | ||
92 | void __iomem *mcx_mac_csr_addr; | 108 | void __iomem *mcx_mac_csr_addr; |
93 | void __iomem *base_addr; | 109 | void __iomem *base_addr; |
94 | void __iomem *ring_csr_addr; | 110 | void __iomem *ring_csr_addr; |
95 | void __iomem *ring_cmd_addr; | 111 | void __iomem *ring_cmd_addr; |
96 | u32 phy_addr; | 112 | u32 phy_addr; |
97 | int phy_mode; | 113 | int phy_mode; |
98 | u32 speed; | 114 | enum xgene_enet_rm rm; |
99 | u16 rm; | ||
100 | struct rtnl_link_stats64 stats; | 115 | struct rtnl_link_stats64 stats; |
116 | struct xgene_mac_ops *mac_ops; | ||
117 | struct xgene_port_ops *port_ops; | ||
118 | struct delayed_work link_work; | ||
101 | }; | 119 | }; |
102 | 120 | ||
103 | /* Set the specified value into a bit-field defined by its starting position | 121 | /* Set the specified value into a bit-field defined by its starting position |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c new file mode 100644 index 000000000000..cd64b9f18b58 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c | |||
@@ -0,0 +1,331 @@ | |||
1 | /* Applied Micro X-Gene SoC Ethernet Driver | ||
2 | * | ||
3 | * Copyright (c) 2014, Applied Micro Circuits Corporation | ||
4 | * Authors: Iyappan Subramanian <isubramanian@apm.com> | ||
5 | * Keyur Chudgar <kchudgar@apm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "xgene_enet_main.h" | ||
22 | #include "xgene_enet_hw.h" | ||
23 | #include "xgene_enet_xgmac.h" | ||
24 | |||
25 | static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, | ||
26 | u32 offset, u32 val) | ||
27 | { | ||
28 | void __iomem *addr = pdata->eth_csr_addr + offset; | ||
29 | |||
30 | iowrite32(val, addr); | ||
31 | } | ||
32 | |||
33 | static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, | ||
34 | u32 offset, u32 val) | ||
35 | { | ||
36 | void __iomem *addr = pdata->eth_ring_if_addr + offset; | ||
37 | |||
38 | iowrite32(val, addr); | ||
39 | } | ||
40 | |||
41 | static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, | ||
42 | u32 offset, u32 val) | ||
43 | { | ||
44 | void __iomem *addr = pdata->eth_diag_csr_addr + offset; | ||
45 | |||
46 | iowrite32(val, addr); | ||
47 | } | ||
48 | |||
49 | static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, | ||
50 | void __iomem *cmd, void __iomem *cmd_done, | ||
51 | u32 wr_addr, u32 wr_data) | ||
52 | { | ||
53 | u32 done; | ||
54 | u8 wait = 10; | ||
55 | |||
56 | iowrite32(wr_addr, addr); | ||
57 | iowrite32(wr_data, wr); | ||
58 | iowrite32(XGENE_ENET_WR_CMD, cmd); | ||
59 | |||
60 | /* wait for write command to complete */ | ||
61 | while (!(done = ioread32(cmd_done)) && wait--) | ||
62 | udelay(1); | ||
63 | |||
64 | if (!done) | ||
65 | return false; | ||
66 | |||
67 | iowrite32(0, cmd); | ||
68 | |||
69 | return true; | ||
70 | } | ||
71 | |||
72 | static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, | ||
73 | u32 wr_addr, u32 wr_data) | ||
74 | { | ||
75 | void __iomem *addr, *wr, *cmd, *cmd_done; | ||
76 | |||
77 | addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; | ||
78 | wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; | ||
79 | cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; | ||
80 | cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; | ||
81 | |||
82 | if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) | ||
83 | netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", | ||
84 | wr_addr); | ||
85 | } | ||
86 | |||
87 | static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, | ||
88 | u32 offset, u32 *val) | ||
89 | { | ||
90 | void __iomem *addr = pdata->eth_csr_addr + offset; | ||
91 | |||
92 | *val = ioread32(addr); | ||
93 | } | ||
94 | |||
95 | static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, | ||
96 | u32 offset, u32 *val) | ||
97 | { | ||
98 | void __iomem *addr = pdata->eth_diag_csr_addr + offset; | ||
99 | |||
100 | *val = ioread32(addr); | ||
101 | } | ||
102 | |||
103 | static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, | ||
104 | void __iomem *cmd, void __iomem *cmd_done, | ||
105 | u32 rd_addr, u32 *rd_data) | ||
106 | { | ||
107 | u32 done; | ||
108 | u8 wait = 10; | ||
109 | |||
110 | iowrite32(rd_addr, addr); | ||
111 | iowrite32(XGENE_ENET_RD_CMD, cmd); | ||
112 | |||
113 | /* wait for read command to complete */ | ||
114 | while (!(done = ioread32(cmd_done)) && wait--) | ||
115 | udelay(1); | ||
116 | |||
117 | if (!done) | ||
118 | return false; | ||
119 | |||
120 | *rd_data = ioread32(rd); | ||
121 | iowrite32(0, cmd); | ||
122 | |||
123 | return true; | ||
124 | } | ||
125 | |||
126 | static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, | ||
127 | u32 rd_addr, u32 *rd_data) | ||
128 | { | ||
129 | void __iomem *addr, *rd, *cmd, *cmd_done; | ||
130 | |||
131 | addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; | ||
132 | rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; | ||
133 | cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; | ||
134 | cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; | ||
135 | |||
136 | if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) | ||
137 | netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", | ||
138 | rd_addr); | ||
139 | } | ||
140 | |||
141 | static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) | ||
142 | { | ||
143 | struct net_device *ndev = pdata->ndev; | ||
144 | u32 data; | ||
145 | u8 wait = 10; | ||
146 | |||
147 | xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); | ||
148 | do { | ||
149 | usleep_range(100, 110); | ||
150 | xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); | ||
151 | } while ((data != 0xffffffff) && wait--); | ||
152 | |||
153 | if (data != 0xffffffff) { | ||
154 | netdev_err(ndev, "Failed to release memory from shutdown\n"); | ||
155 | return -ENODEV; | ||
156 | } | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) | ||
162 | { | ||
163 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0); | ||
164 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0); | ||
165 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0); | ||
166 | xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0); | ||
167 | } | ||
168 | |||
169 | static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata) | ||
170 | { | ||
171 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST); | ||
172 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0); | ||
173 | } | ||
174 | |||
175 | static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) | ||
176 | { | ||
177 | u32 addr0, addr1; | ||
178 | u8 *dev_addr = pdata->ndev->dev_addr; | ||
179 | |||
180 | addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | | ||
181 | (dev_addr[1] << 8) | dev_addr[0]; | ||
182 | addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); | ||
183 | |||
184 | xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0); | ||
185 | xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1); | ||
186 | } | ||
187 | |||
188 | static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) | ||
189 | { | ||
190 | u32 data; | ||
191 | |||
192 | xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data); | ||
193 | |||
194 | return data; | ||
195 | } | ||
196 | |||
197 | static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) | ||
198 | { | ||
199 | u32 data; | ||
200 | |||
201 | xgene_xgmac_reset(pdata); | ||
202 | |||
203 | xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); | ||
204 | data |= HSTPPEN; | ||
205 | data &= ~HSTLENCHK; | ||
206 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); | ||
207 | |||
208 | xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600); | ||
209 | xgene_xgmac_set_mac_addr(pdata); | ||
210 | |||
211 | xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data); | ||
212 | data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; | ||
213 | xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data); | ||
214 | |||
215 | xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); | ||
216 | xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); | ||
217 | xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data); | ||
218 | data |= BIT(12); | ||
219 | xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data); | ||
220 | xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82); | ||
221 | } | ||
222 | |||
223 | static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata) | ||
224 | { | ||
225 | u32 data; | ||
226 | |||
227 | xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); | ||
228 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN); | ||
229 | } | ||
230 | |||
231 | static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata) | ||
232 | { | ||
233 | u32 data; | ||
234 | |||
235 | xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); | ||
236 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN); | ||
237 | } | ||
238 | |||
239 | static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata) | ||
240 | { | ||
241 | u32 data; | ||
242 | |||
243 | xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); | ||
244 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN); | ||
245 | } | ||
246 | |||
247 | static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) | ||
248 | { | ||
249 | u32 data; | ||
250 | |||
251 | xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); | ||
252 | xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); | ||
253 | } | ||
254 | |||
255 | static void xgene_enet_reset(struct xgene_enet_pdata *pdata) | ||
256 | { | ||
257 | clk_prepare_enable(pdata->clk); | ||
258 | clk_disable_unprepare(pdata->clk); | ||
259 | clk_prepare_enable(pdata->clk); | ||
260 | |||
261 | xgene_enet_ecc_init(pdata); | ||
262 | xgene_enet_config_ring_if_assoc(pdata); | ||
263 | } | ||
264 | |||
265 | static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, | ||
266 | u32 dst_ring_num, u16 bufpool_id) | ||
267 | { | ||
268 | u32 cb, fpsel; | ||
269 | |||
270 | xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb); | ||
271 | cb |= CFG_CLE_BYPASS_EN0; | ||
272 | CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); | ||
273 | xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb); | ||
274 | |||
275 | fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; | ||
276 | xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb); | ||
277 | CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); | ||
278 | CFG_CLE_FPSEL0_SET(&cb, fpsel); | ||
279 | xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb); | ||
280 | } | ||
281 | |||
282 | static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) | ||
283 | { | ||
284 | clk_disable_unprepare(pdata->clk); | ||
285 | } | ||
286 | |||
287 | void xgene_enet_link_state(struct work_struct *work) | ||
288 | { | ||
289 | struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), | ||
290 | struct xgene_enet_pdata, link_work); | ||
291 | struct net_device *ndev = pdata->ndev; | ||
292 | u32 link_status, poll_interval; | ||
293 | |||
294 | link_status = xgene_enet_link_status(pdata); | ||
295 | if (link_status) { | ||
296 | if (!netif_carrier_ok(ndev)) { | ||
297 | netif_carrier_on(ndev); | ||
298 | xgene_xgmac_init(pdata); | ||
299 | xgene_xgmac_rx_enable(pdata); | ||
300 | xgene_xgmac_tx_enable(pdata); | ||
301 | netdev_info(ndev, "Link is Up - 10Gbps\n"); | ||
302 | } | ||
303 | poll_interval = PHY_POLL_LINK_ON; | ||
304 | } else { | ||
305 | if (netif_carrier_ok(ndev)) { | ||
306 | xgene_xgmac_rx_disable(pdata); | ||
307 | xgene_xgmac_tx_disable(pdata); | ||
308 | netif_carrier_off(ndev); | ||
309 | netdev_info(ndev, "Link is Down\n"); | ||
310 | } | ||
311 | poll_interval = PHY_POLL_LINK_OFF; | ||
312 | } | ||
313 | |||
314 | schedule_delayed_work(&pdata->link_work, poll_interval); | ||
315 | } | ||
316 | |||
317 | struct xgene_mac_ops xgene_xgmac_ops = { | ||
318 | .init = xgene_xgmac_init, | ||
319 | .reset = xgene_xgmac_reset, | ||
320 | .rx_enable = xgene_xgmac_rx_enable, | ||
321 | .tx_enable = xgene_xgmac_tx_enable, | ||
322 | .rx_disable = xgene_xgmac_rx_disable, | ||
323 | .tx_disable = xgene_xgmac_tx_disable, | ||
324 | .set_mac_addr = xgene_xgmac_set_mac_addr, | ||
325 | }; | ||
326 | |||
327 | struct xgene_port_ops xgene_xgport_ops = { | ||
328 | .reset = xgene_enet_reset, | ||
329 | .cle_bypass = xgene_enet_xgcle_bypass, | ||
330 | .shutdown = xgene_enet_shutdown, | ||
331 | }; | ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h new file mode 100644 index 000000000000..d2d59e7ed9ab --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* Applied Micro X-Gene SoC Ethernet Driver | ||
2 | * | ||
3 | * Copyright (c) 2014, Applied Micro Circuits Corporation | ||
4 | * Authors: Iyappan Subramanian <isubramanian@apm.com> | ||
5 | * Keyur Chudgar <kchudgar@apm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __XGENE_ENET_XGMAC_H__ | ||
22 | #define __XGENE_ENET_XGMAC_H__ | ||
23 | |||
24 | #define BLOCK_AXG_MAC_OFFSET 0x0800 | ||
25 | #define BLOCK_AXG_MAC_CSR_OFFSET 0x2000 | ||
26 | |||
27 | #define AXGMAC_CONFIG_0 0x0000 | ||
28 | #define AXGMAC_CONFIG_1 0x0004 | ||
29 | #define HSTMACRST BIT(31) | ||
30 | #define HSTTCTLEN BIT(31) | ||
31 | #define HSTTFEN BIT(30) | ||
32 | #define HSTRCTLEN BIT(29) | ||
33 | #define HSTRFEN BIT(28) | ||
34 | #define HSTPPEN BIT(7) | ||
35 | #define HSTDRPLT64 BIT(5) | ||
36 | #define HSTLENCHK BIT(3) | ||
37 | #define HSTMACADR_LSW_ADDR 0x0010 | ||
38 | #define HSTMACADR_MSW_ADDR 0x0014 | ||
39 | #define HSTMAXFRAME_LENGTH_ADDR 0x0020 | ||
40 | |||
41 | #define XG_RSIF_CONFIG_REG_ADDR 0x00a0 | ||
42 | #define XCLE_BYPASS_REG0_ADDR 0x0160 | ||
43 | #define XCLE_BYPASS_REG1_ADDR 0x0164 | ||
44 | #define XG_CFG_BYPASS_ADDR 0x0204 | ||
45 | #define XG_LINK_STATUS_ADDR 0x0228 | ||
46 | #define XG_ENET_SPARE_CFG_REG_ADDR 0x040c | ||
47 | #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 | ||
48 | #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 | ||
49 | |||
50 | #define PHY_POLL_LINK_ON (10 * HZ) | ||
51 | #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) | ||
52 | |||
53 | void xgene_enet_link_state(struct work_struct *work); | ||
54 | extern struct xgene_mac_ops xgene_xgmac_ops; | ||
55 | extern struct xgene_port_ops xgene_xgport_ops; | ||
56 | |||
57 | #endif /* __XGENE_ENET_XGMAC_H__ */ | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 075688188644..9ae36979bdee 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -436,7 +436,8 @@ static int bcm_sysport_set_wol(struct net_device *dev, | |||
436 | /* Flag the device and relevant IRQ as wakeup capable */ | 436 | /* Flag the device and relevant IRQ as wakeup capable */ |
437 | if (wol->wolopts) { | 437 | if (wol->wolopts) { |
438 | device_set_wakeup_enable(kdev, 1); | 438 | device_set_wakeup_enable(kdev, 1); |
439 | enable_irq_wake(priv->wol_irq); | 439 | if (priv->wol_irq_disabled) |
440 | enable_irq_wake(priv->wol_irq); | ||
440 | priv->wol_irq_disabled = 0; | 441 | priv->wol_irq_disabled = 0; |
441 | } else { | 442 | } else { |
442 | device_set_wakeup_enable(kdev, 0); | 443 | device_set_wakeup_enable(kdev, 0); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fff2634b6f34..fdc9ec09e453 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -1285,11 +1285,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1285 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | 1285 | cb = &priv->rx_cbs[priv->rx_read_ptr]; |
1286 | skb = cb->skb; | 1286 | skb = cb->skb; |
1287 | 1287 | ||
1288 | rxpktprocessed++; | ||
1289 | |||
1290 | priv->rx_read_ptr++; | ||
1291 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1292 | |||
1293 | /* We do not have a backing SKB, so we do not have a | 1288 | /* We do not have a backing SKB, so we do not have a |
1294 | * corresponding DMA mapping for this incoming packet since | 1289 | * corresponding DMA mapping for this incoming packet since |
1295 | * bcmgenet_rx_refill always either has both skb and mapping or | 1290 | * bcmgenet_rx_refill always either has both skb and mapping or |
@@ -1404,6 +1399,10 @@ refill: | |||
1404 | err = bcmgenet_rx_refill(priv, cb); | 1399 | err = bcmgenet_rx_refill(priv, cb); |
1405 | if (err) | 1400 | if (err) |
1406 | netif_err(priv, rx_err, dev, "Rx refill failed\n"); | 1401 | netif_err(priv, rx_err, dev, "Rx refill failed\n"); |
1402 | |||
1403 | rxpktprocessed++; | ||
1404 | priv->rx_read_ptr++; | ||
1405 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1407 | } | 1406 | } |
1408 | 1407 | ||
1409 | return rxpktprocessed; | 1408 | return rxpktprocessed; |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index b82b7e4e06b2..149a0d70c108 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | |||
@@ -86,7 +86,9 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
86 | /* Flag the device and relevant IRQ as wakeup capable */ | 86 | /* Flag the device and relevant IRQ as wakeup capable */ |
87 | if (wol->wolopts) { | 87 | if (wol->wolopts) { |
88 | device_set_wakeup_enable(kdev, 1); | 88 | device_set_wakeup_enable(kdev, 1); |
89 | enable_irq_wake(priv->wol_irq); | 89 | /* Avoid unbalanced enable_irq_wake calls */ |
90 | if (priv->wol_irq_disabled) | ||
91 | enable_irq_wake(priv->wol_irq); | ||
90 | priv->wol_irq_disabled = false; | 92 | priv->wol_irq_disabled = false; |
91 | } else { | 93 | } else { |
92 | device_set_wakeup_enable(kdev, 0); | 94 | device_set_wakeup_enable(kdev, 0); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 9b2c669b6522..410ed5805a9a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -968,7 +968,7 @@ void t4_intr_enable(struct adapter *adapter); | |||
968 | void t4_intr_disable(struct adapter *adapter); | 968 | void t4_intr_disable(struct adapter *adapter); |
969 | int t4_slow_intr_handler(struct adapter *adapter); | 969 | int t4_slow_intr_handler(struct adapter *adapter); |
970 | 970 | ||
971 | int t4_wait_dev_ready(struct adapter *adap); | 971 | int t4_wait_dev_ready(void __iomem *regs); |
972 | int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, | 972 | int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, |
973 | struct link_config *lc); | 973 | struct link_config *lc); |
974 | int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); | 974 | int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 321f3d9385c9..5b38e955af6e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -6137,7 +6137,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) | |||
6137 | pci_save_state(pdev); | 6137 | pci_save_state(pdev); |
6138 | pci_cleanup_aer_uncorrect_error_status(pdev); | 6138 | pci_cleanup_aer_uncorrect_error_status(pdev); |
6139 | 6139 | ||
6140 | if (t4_wait_dev_ready(adap) < 0) | 6140 | if (t4_wait_dev_ready(adap->regs) < 0) |
6141 | return PCI_ERS_RESULT_DISCONNECT; | 6141 | return PCI_ERS_RESULT_DISCONNECT; |
6142 | if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) | 6142 | if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) |
6143 | return PCI_ERS_RESULT_DISCONNECT; | 6143 | return PCI_ERS_RESULT_DISCONNECT; |
@@ -6530,6 +6530,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6530 | goto out_disable_device; | 6530 | goto out_disable_device; |
6531 | } | 6531 | } |
6532 | 6532 | ||
6533 | err = t4_wait_dev_ready(regs); | ||
6534 | if (err < 0) | ||
6535 | goto out_unmap_bar0; | ||
6536 | |||
6533 | /* We control everything through one PF */ | 6537 | /* We control everything through one PF */ |
6534 | func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); | 6538 | func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); |
6535 | if (func != ent->driver_data) { | 6539 | if (func != ent->driver_data) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index fab4c84a1da4..5e1b314e11af 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -1123,7 +1123,10 @@ out_free: dev_kfree_skb_any(skb); | |||
1123 | lso->c.ipid_ofst = htons(0); | 1123 | lso->c.ipid_ofst = htons(0); |
1124 | lso->c.mss = htons(ssi->gso_size); | 1124 | lso->c.mss = htons(ssi->gso_size); |
1125 | lso->c.seqno_offset = htonl(0); | 1125 | lso->c.seqno_offset = htonl(0); |
1126 | lso->c.len = htonl(skb->len); | 1126 | if (is_t4(adap->params.chip)) |
1127 | lso->c.len = htonl(skb->len); | ||
1128 | else | ||
1129 | lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len)); | ||
1127 | cpl = (void *)(lso + 1); | 1130 | cpl = (void *)(lso + 1); |
1128 | cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | | 1131 | cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | |
1129 | TXPKT_IPHDR_LEN(l3hdr_len) | | 1132 | TXPKT_IPHDR_LEN(l3hdr_len) | |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 22d7581341a9..1fff1495fe31 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -3845,12 +3845,19 @@ static void init_link_config(struct link_config *lc, unsigned int caps) | |||
3845 | } | 3845 | } |
3846 | } | 3846 | } |
3847 | 3847 | ||
3848 | int t4_wait_dev_ready(struct adapter *adap) | 3848 | #define CIM_PF_NOACCESS 0xeeeeeeee |
3849 | |||
3850 | int t4_wait_dev_ready(void __iomem *regs) | ||
3849 | { | 3851 | { |
3850 | if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) | 3852 | u32 whoami; |
3853 | |||
3854 | whoami = readl(regs + PL_WHOAMI); | ||
3855 | if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS) | ||
3851 | return 0; | 3856 | return 0; |
3857 | |||
3852 | msleep(500); | 3858 | msleep(500); |
3853 | return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; | 3859 | whoami = readl(regs + PL_WHOAMI); |
3860 | return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO); | ||
3854 | } | 3861 | } |
3855 | 3862 | ||
3856 | struct flash_desc { | 3863 | struct flash_desc { |
@@ -3919,10 +3926,6 @@ int t4_prep_adapter(struct adapter *adapter) | |||
3919 | uint16_t device_id; | 3926 | uint16_t device_id; |
3920 | u32 pl_rev; | 3927 | u32 pl_rev; |
3921 | 3928 | ||
3922 | ret = t4_wait_dev_ready(adapter); | ||
3923 | if (ret < 0) | ||
3924 | return ret; | ||
3925 | |||
3926 | get_pci_mode(adapter, &adapter->params.pci); | 3929 | get_pci_mode(adapter, &adapter->params.pci); |
3927 | pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); | 3930 | pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); |
3928 | 3931 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 52e08103f221..5f4db2398c71 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | |||
@@ -527,6 +527,7 @@ struct cpl_tx_pkt_lso_core { | |||
527 | #define LSO_LAST_SLICE (1 << 22) | 527 | #define LSO_LAST_SLICE (1 << 22) |
528 | #define LSO_FIRST_SLICE (1 << 23) | 528 | #define LSO_FIRST_SLICE (1 << 23) |
529 | #define LSO_OPCODE(x) ((x) << 24) | 529 | #define LSO_OPCODE(x) ((x) << 24) |
530 | #define LSO_T5_XFER_SIZE(x) ((x) << 0) | ||
530 | __be16 ipid_ofst; | 531 | __be16 ipid_ofst; |
531 | __be16 mss; | 532 | __be16 mss; |
532 | __be32 seqno_offset; | 533 | __be32 seqno_offset; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index eee272883027..a1024db5dc13 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -72,9 +72,8 @@ | |||
72 | #define PIDX_MASK 0x00003fffU | 72 | #define PIDX_MASK 0x00003fffU |
73 | #define PIDX_SHIFT 0 | 73 | #define PIDX_SHIFT 0 |
74 | #define PIDX(x) ((x) << PIDX_SHIFT) | 74 | #define PIDX(x) ((x) << PIDX_SHIFT) |
75 | #define S_PIDX_T5 0 | 75 | #define PIDX_SHIFT_T5 0 |
76 | #define M_PIDX_T5 0x1fffU | 76 | #define PIDX_T5(x) ((x) << PIDX_SHIFT_T5) |
77 | #define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5) | ||
78 | 77 | ||
79 | 78 | ||
80 | #define SGE_TIMERREGS 6 | 79 | #define SGE_TIMERREGS 6 |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 8498a641b2e3..bfa398d91826 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
@@ -163,15 +163,19 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) | |||
163 | netif_carrier_on(dev); | 163 | netif_carrier_on(dev); |
164 | 164 | ||
165 | switch (pi->link_cfg.speed) { | 165 | switch (pi->link_cfg.speed) { |
166 | case SPEED_10000: | 166 | case 40000: |
167 | s = "40Gbps"; | ||
168 | break; | ||
169 | |||
170 | case 10000: | ||
167 | s = "10Gbps"; | 171 | s = "10Gbps"; |
168 | break; | 172 | break; |
169 | 173 | ||
170 | case SPEED_1000: | 174 | case 1000: |
171 | s = "1000Mbps"; | 175 | s = "1000Mbps"; |
172 | break; | 176 | break; |
173 | 177 | ||
174 | case SPEED_100: | 178 | case 100: |
175 | s = "100Mbps"; | 179 | s = "100Mbps"; |
176 | break; | 180 | break; |
177 | 181 | ||
@@ -2351,7 +2355,7 @@ static void cfg_queues(struct adapter *adapter) | |||
2351 | struct port_info *pi = adap2pinfo(adapter, pidx); | 2355 | struct port_info *pi = adap2pinfo(adapter, pidx); |
2352 | 2356 | ||
2353 | pi->first_qset = qidx; | 2357 | pi->first_qset = qidx; |
2354 | pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; | 2358 | pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; |
2355 | qidx += pi->nqsets; | 2359 | qidx += pi->nqsets; |
2356 | } | 2360 | } |
2357 | s->ethqsets = qidx; | 2361 | s->ethqsets = qidx; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index a5fb9493dee8..85036e6b42c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -1208,7 +1208,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1208 | lso->ipid_ofst = cpu_to_be16(0); | 1208 | lso->ipid_ofst = cpu_to_be16(0); |
1209 | lso->mss = cpu_to_be16(ssi->gso_size); | 1209 | lso->mss = cpu_to_be16(ssi->gso_size); |
1210 | lso->seqno_offset = cpu_to_be32(0); | 1210 | lso->seqno_offset = cpu_to_be32(0); |
1211 | lso->len = cpu_to_be32(skb->len); | 1211 | if (is_t4(adapter->params.chip)) |
1212 | lso->len = cpu_to_be32(skb->len); | ||
1213 | else | ||
1214 | lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len)); | ||
1212 | 1215 | ||
1213 | /* | 1216 | /* |
1214 | * Set up TX Packet CPL pointer, control word and perform | 1217 | * Set up TX Packet CPL pointer, control word and perform |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index f412d0fa0850..95df61dcb4ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | |||
@@ -228,6 +228,12 @@ static inline bool is_10g_port(const struct link_config *lc) | |||
228 | return (lc->supported & SUPPORTED_10000baseT_Full) != 0; | 228 | return (lc->supported & SUPPORTED_10000baseT_Full) != 0; |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline bool is_x_10g_port(const struct link_config *lc) | ||
232 | { | ||
233 | return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || | ||
234 | (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; | ||
235 | } | ||
236 | |||
231 | static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) | 237 | static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) |
232 | { | 238 | { |
233 | return adapter->params.vpd.cclk / 1000; | 239 | return adapter->params.vpd.cclk / 1000; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 25dfeb8f28ed..e984fdc48ba2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -327,6 +327,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx) | |||
327 | v |= SUPPORTED_1000baseT_Full; | 327 | v |= SUPPORTED_1000baseT_Full; |
328 | if (word & FW_PORT_CAP_SPEED_10G) | 328 | if (word & FW_PORT_CAP_SPEED_10G) |
329 | v |= SUPPORTED_10000baseT_Full; | 329 | v |= SUPPORTED_10000baseT_Full; |
330 | if (word & FW_PORT_CAP_SPEED_40G) | ||
331 | v |= SUPPORTED_40000baseSR4_Full; | ||
330 | if (word & FW_PORT_CAP_ANEG) | 332 | if (word & FW_PORT_CAP_ANEG) |
331 | v |= SUPPORTED_Autoneg; | 333 | v |= SUPPORTED_Autoneg; |
332 | init_link_config(&pi->link_cfg, v); | 334 | init_link_config(&pi->link_cfg, v); |
@@ -1352,11 +1354,13 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) | |||
1352 | if (word & FW_PORT_CMD_TXPAUSE) | 1354 | if (word & FW_PORT_CMD_TXPAUSE) |
1353 | fc |= PAUSE_TX; | 1355 | fc |= PAUSE_TX; |
1354 | if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) | 1356 | if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) |
1355 | speed = SPEED_100; | 1357 | speed = 100; |
1356 | else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) | 1358 | else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) |
1357 | speed = SPEED_1000; | 1359 | speed = 1000; |
1358 | else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) | 1360 | else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) |
1359 | speed = SPEED_10000; | 1361 | speed = 10000; |
1362 | else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) | ||
1363 | speed = 40000; | ||
1360 | 1364 | ||
1361 | /* | 1365 | /* |
1362 | * Scan all of our "ports" (Virtual Interfaces) looking for | 1366 | * Scan all of our "ports" (Virtual Interfaces) looking for |
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 2c578db401e8..08f5b911d96b 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c | |||
@@ -125,7 +125,7 @@ out: | |||
125 | } | 125 | } |
126 | 126 | ||
127 | #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) | 127 | #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) |
128 | #define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXF | FCC_ENET_TXB) | 128 | #define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB) |
129 | #define FCC_RX_EVENT (FCC_ENET_RXF) | 129 | #define FCC_RX_EVENT (FCC_ENET_RXF) |
130 | #define FCC_TX_EVENT (FCC_ENET_TXB) | 130 | #define FCC_TX_EVENT (FCC_ENET_TXB) |
131 | #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) | 131 | #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) |
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index 41aa0b475ca0..f30411f0701f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c | |||
@@ -116,7 +116,7 @@ static int do_pd_setup(struct fs_enet_private *fep) | |||
116 | } | 116 | } |
117 | 117 | ||
118 | #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB) | 118 | #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB) |
119 | #define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXF | SCCE_ENET_TXB) | 119 | #define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB) |
120 | #define SCC_RX_EVENT (SCCE_ENET_RXF) | 120 | #define SCC_RX_EVENT (SCCE_ENET_RXF) |
121 | #define SCC_TX_EVENT (SCCE_ENET_TXB) | 121 | #define SCC_TX_EVENT (SCCE_ENET_TXB) |
122 | #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY) | 122 | #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY) |
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 583e71ab7f51..964c6bf37710 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c | |||
@@ -28,7 +28,9 @@ | |||
28 | #include <linux/of_device.h> | 28 | #include <linux/of_device.h> |
29 | 29 | ||
30 | #include <asm/io.h> | 30 | #include <asm/io.h> |
31 | #if IS_ENABLED(CONFIG_UCC_GETH) | ||
31 | #include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */ | 32 | #include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */ |
33 | #endif | ||
32 | 34 | ||
33 | #include "gianfar.h" | 35 | #include "gianfar.h" |
34 | 36 | ||
@@ -102,19 +104,22 @@ static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |||
102 | { | 104 | { |
103 | struct fsl_pq_mdio_priv *priv = bus->priv; | 105 | struct fsl_pq_mdio_priv *priv = bus->priv; |
104 | struct fsl_pq_mii __iomem *regs = priv->regs; | 106 | struct fsl_pq_mii __iomem *regs = priv->regs; |
105 | u32 status; | 107 | unsigned int timeout; |
106 | 108 | ||
107 | /* Set the PHY address and the register address we want to write */ | 109 | /* Set the PHY address and the register address we want to write */ |
108 | out_be32(®s->miimadd, (mii_id << 8) | regnum); | 110 | iowrite32be((mii_id << 8) | regnum, ®s->miimadd); |
109 | 111 | ||
110 | /* Write out the value we want */ | 112 | /* Write out the value we want */ |
111 | out_be32(®s->miimcon, value); | 113 | iowrite32be(value, ®s->miimcon); |
112 | 114 | ||
113 | /* Wait for the transaction to finish */ | 115 | /* Wait for the transaction to finish */ |
114 | status = spin_event_timeout(!(in_be32(®s->miimind) & MIIMIND_BUSY), | 116 | timeout = MII_TIMEOUT; |
115 | MII_TIMEOUT, 0); | 117 | while ((ioread32be(®s->miimind) & MIIMIND_BUSY) && timeout) { |
118 | cpu_relax(); | ||
119 | timeout--; | ||
120 | } | ||
116 | 121 | ||
117 | return status ? 0 : -ETIMEDOUT; | 122 | return timeout ? 0 : -ETIMEDOUT; |
118 | } | 123 | } |
119 | 124 | ||
120 | /* | 125 | /* |
@@ -131,25 +136,29 @@ static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |||
131 | { | 136 | { |
132 | struct fsl_pq_mdio_priv *priv = bus->priv; | 137 | struct fsl_pq_mdio_priv *priv = bus->priv; |
133 | struct fsl_pq_mii __iomem *regs = priv->regs; | 138 | struct fsl_pq_mii __iomem *regs = priv->regs; |
134 | u32 status; | 139 | unsigned int timeout; |
135 | u16 value; | 140 | u16 value; |
136 | 141 | ||
137 | /* Set the PHY address and the register address we want to read */ | 142 | /* Set the PHY address and the register address we want to read */ |
138 | out_be32(®s->miimadd, (mii_id << 8) | regnum); | 143 | iowrite32be((mii_id << 8) | regnum, ®s->miimadd); |
139 | 144 | ||
140 | /* Clear miimcom, and then initiate a read */ | 145 | /* Clear miimcom, and then initiate a read */ |
141 | out_be32(®s->miimcom, 0); | 146 | iowrite32be(0, ®s->miimcom); |
142 | out_be32(®s->miimcom, MII_READ_COMMAND); | 147 | iowrite32be(MII_READ_COMMAND, ®s->miimcom); |
143 | 148 | ||
144 | /* Wait for the transaction to finish, normally less than 100us */ | 149 | /* Wait for the transaction to finish, normally less than 100us */ |
145 | status = spin_event_timeout(!(in_be32(®s->miimind) & | 150 | timeout = MII_TIMEOUT; |
146 | (MIIMIND_NOTVALID | MIIMIND_BUSY)), | 151 | while ((ioread32be(®s->miimind) & |
147 | MII_TIMEOUT, 0); | 152 | (MIIMIND_NOTVALID | MIIMIND_BUSY)) && timeout) { |
148 | if (!status) | 153 | cpu_relax(); |
154 | timeout--; | ||
155 | } | ||
156 | |||
157 | if (!timeout) | ||
149 | return -ETIMEDOUT; | 158 | return -ETIMEDOUT; |
150 | 159 | ||
151 | /* Grab the value of the register from miimstat */ | 160 | /* Grab the value of the register from miimstat */ |
152 | value = in_be32(®s->miimstat); | 161 | value = ioread32be(®s->miimstat); |
153 | 162 | ||
154 | dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum); | 163 | dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum); |
155 | return value; | 164 | return value; |
@@ -160,23 +169,26 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) | |||
160 | { | 169 | { |
161 | struct fsl_pq_mdio_priv *priv = bus->priv; | 170 | struct fsl_pq_mdio_priv *priv = bus->priv; |
162 | struct fsl_pq_mii __iomem *regs = priv->regs; | 171 | struct fsl_pq_mii __iomem *regs = priv->regs; |
163 | u32 status; | 172 | unsigned int timeout; |
164 | 173 | ||
165 | mutex_lock(&bus->mdio_lock); | 174 | mutex_lock(&bus->mdio_lock); |
166 | 175 | ||
167 | /* Reset the management interface */ | 176 | /* Reset the management interface */ |
168 | out_be32(®s->miimcfg, MIIMCFG_RESET); | 177 | iowrite32be(MIIMCFG_RESET, ®s->miimcfg); |
169 | 178 | ||
170 | /* Setup the MII Mgmt clock speed */ | 179 | /* Setup the MII Mgmt clock speed */ |
171 | out_be32(®s->miimcfg, MIIMCFG_INIT_VALUE); | 180 | iowrite32be(MIIMCFG_INIT_VALUE, ®s->miimcfg); |
172 | 181 | ||
173 | /* Wait until the bus is free */ | 182 | /* Wait until the bus is free */ |
174 | status = spin_event_timeout(!(in_be32(®s->miimind) & MIIMIND_BUSY), | 183 | timeout = MII_TIMEOUT; |
175 | MII_TIMEOUT, 0); | 184 | while ((ioread32be(®s->miimind) & MIIMIND_BUSY) && timeout) { |
185 | cpu_relax(); | ||
186 | timeout--; | ||
187 | } | ||
176 | 188 | ||
177 | mutex_unlock(&bus->mdio_lock); | 189 | mutex_unlock(&bus->mdio_lock); |
178 | 190 | ||
179 | if (!status) { | 191 | if (!timeout) { |
180 | dev_err(&bus->dev, "timeout waiting for MII bus\n"); | 192 | dev_err(&bus->dev, "timeout waiting for MII bus\n"); |
181 | return -EBUSY; | 193 | return -EBUSY; |
182 | } | 194 | } |
@@ -433,7 +445,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) | |||
433 | 445 | ||
434 | tbipa = data->get_tbipa(priv->map); | 446 | tbipa = data->get_tbipa(priv->map); |
435 | 447 | ||
436 | out_be32(tbipa, be32_to_cpup(prop)); | 448 | iowrite32be(be32_to_cpup(prop), tbipa); |
437 | } | 449 | } |
438 | } | 450 | } |
439 | 451 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index fb29d049f4e1..379b1a578d3d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -88,8 +88,10 @@ | |||
88 | #include <linux/net_tstamp.h> | 88 | #include <linux/net_tstamp.h> |
89 | 89 | ||
90 | #include <asm/io.h> | 90 | #include <asm/io.h> |
91 | #ifdef CONFIG_PPC | ||
91 | #include <asm/reg.h> | 92 | #include <asm/reg.h> |
92 | #include <asm/mpc85xx.h> | 93 | #include <asm/mpc85xx.h> |
94 | #endif | ||
93 | #include <asm/irq.h> | 95 | #include <asm/irq.h> |
94 | #include <asm/uaccess.h> | 96 | #include <asm/uaccess.h> |
95 | #include <linux/module.h> | 97 | #include <linux/module.h> |
@@ -100,6 +102,8 @@ | |||
100 | #include <linux/phy_fixed.h> | 102 | #include <linux/phy_fixed.h> |
101 | #include <linux/of.h> | 103 | #include <linux/of.h> |
102 | #include <linux/of_net.h> | 104 | #include <linux/of_net.h> |
105 | #include <linux/of_address.h> | ||
106 | #include <linux/of_irq.h> | ||
103 | 107 | ||
104 | #include "gianfar.h" | 108 | #include "gianfar.h" |
105 | 109 | ||
@@ -161,7 +165,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | |||
161 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) | 165 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
162 | lstatus |= BD_LFLAG(RXBD_WRAP); | 166 | lstatus |= BD_LFLAG(RXBD_WRAP); |
163 | 167 | ||
164 | eieio(); | 168 | gfar_wmb(); |
165 | 169 | ||
166 | bdp->lstatus = lstatus; | 170 | bdp->lstatus = lstatus; |
167 | } | 171 | } |
@@ -1061,6 +1065,7 @@ static void gfar_init_filer_table(struct gfar_private *priv) | |||
1061 | } | 1065 | } |
1062 | } | 1066 | } |
1063 | 1067 | ||
1068 | #ifdef CONFIG_PPC | ||
1064 | static void __gfar_detect_errata_83xx(struct gfar_private *priv) | 1069 | static void __gfar_detect_errata_83xx(struct gfar_private *priv) |
1065 | { | 1070 | { |
1066 | unsigned int pvr = mfspr(SPRN_PVR); | 1071 | unsigned int pvr = mfspr(SPRN_PVR); |
@@ -1093,6 +1098,7 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv) | |||
1093 | ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) | 1098 | ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) |
1094 | priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ | 1099 | priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ |
1095 | } | 1100 | } |
1101 | #endif | ||
1096 | 1102 | ||
1097 | static void gfar_detect_errata(struct gfar_private *priv) | 1103 | static void gfar_detect_errata(struct gfar_private *priv) |
1098 | { | 1104 | { |
@@ -1101,10 +1107,12 @@ static void gfar_detect_errata(struct gfar_private *priv) | |||
1101 | /* no plans to fix */ | 1107 | /* no plans to fix */ |
1102 | priv->errata |= GFAR_ERRATA_A002; | 1108 | priv->errata |= GFAR_ERRATA_A002; |
1103 | 1109 | ||
1110 | #ifdef CONFIG_PPC | ||
1104 | if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) | 1111 | if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) |
1105 | __gfar_detect_errata_85xx(priv); | 1112 | __gfar_detect_errata_85xx(priv); |
1106 | else /* non-mpc85xx parts, i.e. e300 core based */ | 1113 | else /* non-mpc85xx parts, i.e. e300 core based */ |
1107 | __gfar_detect_errata_83xx(priv); | 1114 | __gfar_detect_errata_83xx(priv); |
1115 | #endif | ||
1108 | 1116 | ||
1109 | if (priv->errata) | 1117 | if (priv->errata) |
1110 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", | 1118 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", |
@@ -1754,26 +1762,32 @@ static void gfar_halt_nodisable(struct gfar_private *priv) | |||
1754 | { | 1762 | { |
1755 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | 1763 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1756 | u32 tempval; | 1764 | u32 tempval; |
1765 | unsigned int timeout; | ||
1766 | int stopped; | ||
1757 | 1767 | ||
1758 | gfar_ints_disable(priv); | 1768 | gfar_ints_disable(priv); |
1759 | 1769 | ||
1770 | if (gfar_is_dma_stopped(priv)) | ||
1771 | return; | ||
1772 | |||
1760 | /* Stop the DMA, and wait for it to stop */ | 1773 | /* Stop the DMA, and wait for it to stop */ |
1761 | tempval = gfar_read(®s->dmactrl); | 1774 | tempval = gfar_read(®s->dmactrl); |
1762 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != | 1775 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
1763 | (DMACTRL_GRS | DMACTRL_GTS)) { | 1776 | gfar_write(®s->dmactrl, tempval); |
1764 | int ret; | ||
1765 | |||
1766 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | ||
1767 | gfar_write(®s->dmactrl, tempval); | ||
1768 | 1777 | ||
1769 | do { | 1778 | retry: |
1770 | ret = spin_event_timeout(((gfar_read(®s->ievent) & | 1779 | timeout = 1000; |
1771 | (IEVENT_GRSC | IEVENT_GTSC)) == | 1780 | while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { |
1772 | (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); | 1781 | cpu_relax(); |
1773 | if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) | 1782 | timeout--; |
1774 | ret = __gfar_is_rx_idle(priv); | ||
1775 | } while (!ret); | ||
1776 | } | 1783 | } |
1784 | |||
1785 | if (!timeout) | ||
1786 | stopped = gfar_is_dma_stopped(priv); | ||
1787 | |||
1788 | if (!stopped && !gfar_is_rx_dma_stopped(priv) && | ||
1789 | !__gfar_is_rx_idle(priv)) | ||
1790 | goto retry; | ||
1777 | } | 1791 | } |
1778 | 1792 | ||
1779 | /* Halt the receive and transmit queues */ | 1793 | /* Halt the receive and transmit queues */ |
@@ -2357,18 +2371,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2357 | */ | 2371 | */ |
2358 | spin_lock_irqsave(&tx_queue->txlock, flags); | 2372 | spin_lock_irqsave(&tx_queue->txlock, flags); |
2359 | 2373 | ||
2360 | /* The powerpc-specific eieio() is used, as wmb() has too strong | 2374 | gfar_wmb(); |
2361 | * semantics (it requires synchronization between cacheable and | ||
2362 | * uncacheable mappings, which eieio doesn't provide and which we | ||
2363 | * don't need), thus requiring a more expensive sync instruction. At | ||
2364 | * some point, the set of architecture-independent barrier functions | ||
2365 | * should be expanded to include weaker barriers. | ||
2366 | */ | ||
2367 | eieio(); | ||
2368 | 2375 | ||
2369 | txbdp_start->lstatus = lstatus; | 2376 | txbdp_start->lstatus = lstatus; |
2370 | 2377 | ||
2371 | eieio(); /* force lstatus write before tx_skbuff */ | 2378 | gfar_wmb(); /* force lstatus write before tx_skbuff */ |
2372 | 2379 | ||
2373 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | 2380 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
2374 | 2381 | ||
@@ -3240,22 +3247,21 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, | |||
3240 | { | 3247 | { |
3241 | struct gfar_private *priv = netdev_priv(dev); | 3248 | struct gfar_private *priv = netdev_priv(dev); |
3242 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | 3249 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3243 | int idx; | ||
3244 | char tmpbuf[ETH_ALEN]; | ||
3245 | u32 tempval; | 3250 | u32 tempval; |
3246 | u32 __iomem *macptr = ®s->macstnaddr1; | 3251 | u32 __iomem *macptr = ®s->macstnaddr1; |
3247 | 3252 | ||
3248 | macptr += num*2; | 3253 | macptr += num*2; |
3249 | 3254 | ||
3250 | /* Now copy it into the mac registers backwards, cuz | 3255 | /* For a station address of 0x12345678ABCD in transmission |
3251 | * little endian is silly | 3256 | * order (BE), MACnADDR1 is set to 0xCDAB7856 and |
3257 | * MACnADDR2 is set to 0x34120000. | ||
3252 | */ | 3258 | */ |
3253 | for (idx = 0; idx < ETH_ALEN; idx++) | 3259 | tempval = (addr[5] << 24) | (addr[4] << 16) | |
3254 | tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; | 3260 | (addr[3] << 8) | addr[2]; |
3255 | 3261 | ||
3256 | gfar_write(macptr, *((u32 *) (tmpbuf))); | 3262 | gfar_write(macptr, tempval); |
3257 | 3263 | ||
3258 | tempval = *((u32 *) (tmpbuf + 4)); | 3264 | tempval = (addr[1] << 24) | (addr[0] << 16); |
3259 | 3265 | ||
3260 | gfar_write(macptr+1, tempval); | 3266 | gfar_write(macptr+1, tempval); |
3261 | } | 3267 | } |
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 84632c569f2c..2805cfbf1765 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h | |||
@@ -1226,6 +1226,37 @@ static inline void gfar_write_isrg(struct gfar_private *priv) | |||
1226 | } | 1226 | } |
1227 | } | 1227 | } |
1228 | 1228 | ||
1229 | static inline int gfar_is_dma_stopped(struct gfar_private *priv) | ||
1230 | { | ||
1231 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1232 | |||
1233 | return ((gfar_read(®s->ievent) & (IEVENT_GRSC | IEVENT_GTSC)) == | ||
1234 | (IEVENT_GRSC | IEVENT_GTSC)); | ||
1235 | } | ||
1236 | |||
1237 | static inline int gfar_is_rx_dma_stopped(struct gfar_private *priv) | ||
1238 | { | ||
1239 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1240 | |||
1241 | return gfar_read(®s->ievent) & IEVENT_GRSC; | ||
1242 | } | ||
1243 | |||
1244 | static inline void gfar_wmb(void) | ||
1245 | { | ||
1246 | #if defined(CONFIG_PPC) | ||
1247 | /* The powerpc-specific eieio() is used, as wmb() has too strong | ||
1248 | * semantics (it requires synchronization between cacheable and | ||
1249 | * uncacheable mappings, which eieio() doesn't provide and which we | ||
1250 | * don't need), thus requiring a more expensive sync instruction. At | ||
1251 | * some point, the set of architecture-independent barrier functions | ||
1252 | * should be expanded to include weaker barriers. | ||
1253 | */ | ||
1254 | eieio(); | ||
1255 | #else | ||
1256 | wmb(); /* order write acesses for BD (or FCB) fields */ | ||
1257 | #endif | ||
1258 | } | ||
1259 | |||
1229 | irqreturn_t gfar_receive(int irq, void *dev_id); | 1260 | irqreturn_t gfar_receive(int irq, void *dev_id); |
1230 | int startup_gfar(struct net_device *dev); | 1261 | int startup_gfar(struct net_device *dev); |
1231 | void stop_gfar(struct net_device *dev); | 1262 | void stop_gfar(struct net_device *dev); |
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 6a6d5ee51e6a..6919adb66f53 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig | |||
@@ -304,6 +304,7 @@ config FM10K | |||
304 | tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" | 304 | tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" |
305 | default n | 305 | default n |
306 | depends on PCI_MSI | 306 | depends on PCI_MSI |
307 | select PTP_1588_CLOCK | ||
307 | ---help--- | 308 | ---help--- |
308 | This driver supports Intel(R) FM10000 Ethernet Switch Host | 309 | This driver supports Intel(R) FM10000 Ethernet Switch Host |
309 | Interface. For more information on how to identify your adapter, | 310 | Interface. For more information on how to identify your adapter, |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 6c800a330d66..9d7118a0d67a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | |||
@@ -219,11 +219,10 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, | |||
219 | /* flip page offset to other buffer */ | 219 | /* flip page offset to other buffer */ |
220 | rx_buffer->page_offset ^= FM10K_RX_BUFSZ; | 220 | rx_buffer->page_offset ^= FM10K_RX_BUFSZ; |
221 | 221 | ||
222 | /* since we are the only owner of the page and we need to | 222 | /* Even if we own the page, we are not allowed to use atomic_set() |
223 | * increment it, just set the value to 2 in order to avoid | 223 | * This would break get_page_unless_zero() users. |
224 | * an unnecessary locked operation | ||
225 | */ | 224 | */ |
226 | atomic_set(&page->_count, 2); | 225 | atomic_inc(&page->_count); |
227 | #else | 226 | #else |
228 | /* move offset up to the next cache line */ | 227 | /* move offset up to the next cache line */ |
229 | rx_buffer->page_offset += truesize; | 228 | rx_buffer->page_offset += truesize; |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ae59c0b108c5..a21b14495ebd 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -6545,11 +6545,10 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, | |||
6545 | /* flip page offset to other buffer */ | 6545 | /* flip page offset to other buffer */ |
6546 | rx_buffer->page_offset ^= IGB_RX_BUFSZ; | 6546 | rx_buffer->page_offset ^= IGB_RX_BUFSZ; |
6547 | 6547 | ||
6548 | /* since we are the only owner of the page and we need to | 6548 | /* Even if we own the page, we are not allowed to use atomic_set() |
6549 | * increment it, just set the value to 2 in order to avoid | 6549 | * This would break get_page_unless_zero() users. |
6550 | * an unnecessary locked operation | ||
6551 | */ | 6550 | */ |
6552 | atomic_set(&page->_count, 2); | 6551 | atomic_inc(&page->_count); |
6553 | #else | 6552 | #else |
6554 | /* move offset up to the next cache line */ | 6553 | /* move offset up to the next cache line */ |
6555 | rx_buffer->page_offset += truesize; | 6554 | rx_buffer->page_offset += truesize; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d677b5a23b58..fec5212d4337 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1865,12 +1865,10 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, | |||
1865 | /* flip page offset to other buffer */ | 1865 | /* flip page offset to other buffer */ |
1866 | rx_buffer->page_offset ^= truesize; | 1866 | rx_buffer->page_offset ^= truesize; |
1867 | 1867 | ||
1868 | /* | 1868 | /* Even if we own the page, we are not allowed to use atomic_set() |
1869 | * since we are the only owner of the page and we need to | 1869 | * This would break get_page_unless_zero() users. |
1870 | * increment it, just set the value to 2 in order to avoid | ||
1871 | * an unecessary locked operation | ||
1872 | */ | 1870 | */ |
1873 | atomic_set(&page->_count, 2); | 1871 | atomic_inc(&page->_count); |
1874 | #else | 1872 | #else |
1875 | /* move offset up to the next cache line */ | 1873 | /* move offset up to the next cache line */ |
1876 | rx_buffer->page_offset += truesize; | 1874 | rx_buffer->page_offset += truesize; |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index b3b72ad92d4a..d323a695dfbc 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig | |||
@@ -64,7 +64,8 @@ config MVPP2 | |||
64 | 64 | ||
65 | config PXA168_ETH | 65 | config PXA168_ETH |
66 | tristate "Marvell pxa168 ethernet support" | 66 | tristate "Marvell pxa168 ethernet support" |
67 | depends on (CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST) && HAS_IOMEM | 67 | depends on HAS_IOMEM && HAS_DMA |
68 | depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST | ||
68 | select PHYLIB | 69 | select PHYLIB |
69 | ---help--- | 70 | ---help--- |
70 | This driver supports the pxa168 Ethernet ports. | 71 | This driver supports the pxa168 Ethernet ports. |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index a33048ee9621..01660c595f5c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -76,10 +76,10 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, | |||
76 | page_alloc->dma = dma; | 76 | page_alloc->dma = dma; |
77 | page_alloc->page_offset = frag_info->frag_align; | 77 | page_alloc->page_offset = frag_info->frag_align; |
78 | /* Not doing get_page() for each frag is a big win | 78 | /* Not doing get_page() for each frag is a big win |
79 | * on asymetric workloads. | 79 | * on asymetric workloads. Note we can not use atomic_set(). |
80 | */ | 80 | */ |
81 | atomic_set(&page->_count, | 81 | atomic_add(page_alloc->page_size / frag_info->frag_stride - 1, |
82 | page_alloc->page_size / frag_info->frag_stride); | 82 | &page->_count); |
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 5efe60ea6526..0adcf73cf722 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | |||
@@ -134,7 +134,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, | |||
134 | void __iomem *ioaddr = (void __iomem *)dev->base_addr; | 134 | void __iomem *ioaddr = (void __iomem *)dev->base_addr; |
135 | unsigned int value = 0; | 135 | unsigned int value = 0; |
136 | unsigned int perfect_addr_number = hw->unicast_filter_entries; | 136 | unsigned int perfect_addr_number = hw->unicast_filter_entries; |
137 | u32 mc_filter[2]; | 137 | u32 mc_filter[8]; |
138 | int mcbitslog2 = hw->mcast_bits_log2; | 138 | int mcbitslog2 = hw->mcast_bits_log2; |
139 | 139 | ||
140 | pr_debug("%s: # mcasts %d, # unicast %d\n", __func__, | 140 | pr_debug("%s: # mcasts %d, # unicast %d\n", __func__, |
@@ -182,7 +182,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, | |||
182 | struct netdev_hw_addr *ha; | 182 | struct netdev_hw_addr *ha; |
183 | 183 | ||
184 | netdev_for_each_uc_addr(ha, dev) { | 184 | netdev_for_each_uc_addr(ha, dev) { |
185 | stmmac_get_mac_addr(ioaddr, ha->addr, | 185 | stmmac_set_mac_addr(ioaddr, ha->addr, |
186 | GMAC_ADDR_HIGH(reg), | 186 | GMAC_ADDR_HIGH(reg), |
187 | GMAC_ADDR_LOW(reg)); | 187 | GMAC_ADDR_LOW(reg)); |
188 | reg++; | 188 | reg++; |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 38b4fae61f04..29b3bb410781 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -260,7 +260,7 @@ static void macvlan_broadcast(struct sk_buff *skb, | |||
260 | mode == MACVLAN_MODE_BRIDGE) ?: | 260 | mode == MACVLAN_MODE_BRIDGE) ?: |
261 | netif_rx_ni(nskb); | 261 | netif_rx_ni(nskb); |
262 | macvlan_count_rx(vlan, skb->len + ETH_HLEN, | 262 | macvlan_count_rx(vlan, skb->len + ETH_HLEN, |
263 | err == NET_RX_SUCCESS, 1); | 263 | err == NET_RX_SUCCESS, true); |
264 | } | 264 | } |
265 | } | 265 | } |
266 | } | 266 | } |
@@ -379,7 +379,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb, | |||
379 | nskb->pkt_type = PACKET_HOST; | 379 | nskb->pkt_type = PACKET_HOST; |
380 | 380 | ||
381 | ret = netif_rx(nskb); | 381 | ret = netif_rx(nskb); |
382 | macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); | 382 | macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); |
383 | } | 383 | } |
384 | 384 | ||
385 | static void macvlan_forward_source(struct sk_buff *skb, | 385 | static void macvlan_forward_source(struct sk_buff *skb, |
@@ -407,7 +407,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) | |||
407 | const struct macvlan_dev *src; | 407 | const struct macvlan_dev *src; |
408 | struct net_device *dev; | 408 | struct net_device *dev; |
409 | unsigned int len = 0; | 409 | unsigned int len = 0; |
410 | int ret = NET_RX_DROP; | 410 | int ret; |
411 | rx_handler_result_t handle_res; | ||
411 | 412 | ||
412 | port = macvlan_port_get_rcu(skb->dev); | 413 | port = macvlan_port_get_rcu(skb->dev); |
413 | if (is_multicast_ether_addr(eth->h_dest)) { | 414 | if (is_multicast_ether_addr(eth->h_dest)) { |
@@ -423,6 +424,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) | |||
423 | vlan = src; | 424 | vlan = src; |
424 | ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: | 425 | ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: |
425 | netif_rx(skb); | 426 | netif_rx(skb); |
427 | handle_res = RX_HANDLER_CONSUMED; | ||
426 | goto out; | 428 | goto out; |
427 | } | 429 | } |
428 | 430 | ||
@@ -448,17 +450,20 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) | |||
448 | } | 450 | } |
449 | len = skb->len + ETH_HLEN; | 451 | len = skb->len + ETH_HLEN; |
450 | skb = skb_share_check(skb, GFP_ATOMIC); | 452 | skb = skb_share_check(skb, GFP_ATOMIC); |
451 | if (!skb) | 453 | if (!skb) { |
454 | ret = NET_RX_DROP; | ||
455 | handle_res = RX_HANDLER_CONSUMED; | ||
452 | goto out; | 456 | goto out; |
457 | } | ||
453 | 458 | ||
454 | skb->dev = dev; | 459 | skb->dev = dev; |
455 | skb->pkt_type = PACKET_HOST; | 460 | skb->pkt_type = PACKET_HOST; |
456 | 461 | ||
457 | ret = netif_rx(skb); | 462 | ret = NET_RX_SUCCESS; |
458 | 463 | handle_res = RX_HANDLER_ANOTHER; | |
459 | out: | 464 | out: |
460 | macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); | 465 | macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); |
461 | return RX_HANDLER_CONSUMED; | 466 | return handle_res; |
462 | } | 467 | } |
463 | 468 | ||
464 | static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) | 469 | static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 011dbda2b2f1..492435fce1d4 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/phy.h> | 26 | #include <linux/phy.h> |
27 | #include <linux/micrel_phy.h> | 27 | #include <linux/micrel_phy.h> |
28 | #include <linux/of.h> | 28 | #include <linux/of.h> |
29 | #include <linux/clk.h> | ||
29 | 30 | ||
30 | /* Operation Mode Strap Override */ | 31 | /* Operation Mode Strap Override */ |
31 | #define MII_KSZPHY_OMSO 0x16 | 32 | #define MII_KSZPHY_OMSO 0x16 |
@@ -72,9 +73,12 @@ static int ksz_config_flags(struct phy_device *phydev) | |||
72 | { | 73 | { |
73 | int regval; | 74 | int regval; |
74 | 75 | ||
75 | if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) { | 76 | if (phydev->dev_flags & (MICREL_PHY_50MHZ_CLK | MICREL_PHY_25MHZ_CLK)) { |
76 | regval = phy_read(phydev, MII_KSZPHY_CTRL); | 77 | regval = phy_read(phydev, MII_KSZPHY_CTRL); |
77 | regval |= KSZ8051_RMII_50MHZ_CLK; | 78 | if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) |
79 | regval |= KSZ8051_RMII_50MHZ_CLK; | ||
80 | else | ||
81 | regval &= ~KSZ8051_RMII_50MHZ_CLK; | ||
78 | return phy_write(phydev, MII_KSZPHY_CTRL, regval); | 82 | return phy_write(phydev, MII_KSZPHY_CTRL, regval); |
79 | } | 83 | } |
80 | return 0; | 84 | return 0; |
@@ -440,6 +444,27 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, | |||
440 | { | 444 | { |
441 | } | 445 | } |
442 | 446 | ||
447 | static int ksz8021_probe(struct phy_device *phydev) | ||
448 | { | ||
449 | struct clk *clk; | ||
450 | |||
451 | clk = devm_clk_get(&phydev->dev, "rmii-ref"); | ||
452 | if (!IS_ERR(clk)) { | ||
453 | unsigned long rate = clk_get_rate(clk); | ||
454 | |||
455 | if (rate > 24500000 && rate < 25500000) { | ||
456 | phydev->dev_flags |= MICREL_PHY_25MHZ_CLK; | ||
457 | } else if (rate > 49500000 && rate < 50500000) { | ||
458 | phydev->dev_flags |= MICREL_PHY_50MHZ_CLK; | ||
459 | } else { | ||
460 | dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate); | ||
461 | return -EINVAL; | ||
462 | } | ||
463 | } | ||
464 | |||
465 | return 0; | ||
466 | } | ||
467 | |||
443 | static struct phy_driver ksphy_driver[] = { | 468 | static struct phy_driver ksphy_driver[] = { |
444 | { | 469 | { |
445 | .phy_id = PHY_ID_KS8737, | 470 | .phy_id = PHY_ID_KS8737, |
@@ -462,6 +487,7 @@ static struct phy_driver ksphy_driver[] = { | |||
462 | .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | | 487 | .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | |
463 | SUPPORTED_Asym_Pause), | 488 | SUPPORTED_Asym_Pause), |
464 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | 489 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, |
490 | .probe = ksz8021_probe, | ||
465 | .config_init = ksz8021_config_init, | 491 | .config_init = ksz8021_config_init, |
466 | .config_aneg = genphy_config_aneg, | 492 | .config_aneg = genphy_config_aneg, |
467 | .read_status = genphy_read_status, | 493 | .read_status = genphy_read_status, |
@@ -477,6 +503,7 @@ static struct phy_driver ksphy_driver[] = { | |||
477 | .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | | 503 | .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | |
478 | SUPPORTED_Asym_Pause), | 504 | SUPPORTED_Asym_Pause), |
479 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | 505 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, |
506 | .probe = ksz8021_probe, | ||
480 | .config_init = ksz8021_config_init, | 507 | .config_init = ksz8021_config_init, |
481 | .config_aneg = genphy_config_aneg, | 508 | .config_aneg = genphy_config_aneg, |
482 | .read_status = genphy_read_status, | 509 | .read_status = genphy_read_status, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 5cfd414b9a3e..864159eb744e 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <linux/mdio.h> | 26 | #include <linux/mdio.h> |
27 | 27 | ||
28 | /* Version Information */ | 28 | /* Version Information */ |
29 | #define DRIVER_VERSION "v1.06.1 (2014/10/01)" | 29 | #define DRIVER_VERSION "v1.07.0 (2014/10/09)" |
30 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | 30 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" |
31 | #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" | 31 | #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" |
32 | #define MODULENAME "r8152" | 32 | #define MODULENAME "r8152" |
@@ -566,6 +566,7 @@ struct r8152 { | |||
566 | spinlock_t rx_lock, tx_lock; | 566 | spinlock_t rx_lock, tx_lock; |
567 | struct delayed_work schedule; | 567 | struct delayed_work schedule; |
568 | struct mii_if_info mii; | 568 | struct mii_if_info mii; |
569 | struct mutex control; /* use for hw setting */ | ||
569 | 570 | ||
570 | struct rtl_ops { | 571 | struct rtl_ops { |
571 | void (*init)(struct r8152 *); | 572 | void (*init)(struct r8152 *); |
@@ -942,15 +943,8 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg) | |||
942 | if (phy_id != R8152_PHY_ID) | 943 | if (phy_id != R8152_PHY_ID) |
943 | return -EINVAL; | 944 | return -EINVAL; |
944 | 945 | ||
945 | ret = usb_autopm_get_interface(tp->intf); | ||
946 | if (ret < 0) | ||
947 | goto out; | ||
948 | |||
949 | ret = r8152_mdio_read(tp, reg); | 946 | ret = r8152_mdio_read(tp, reg); |
950 | 947 | ||
951 | usb_autopm_put_interface(tp->intf); | ||
952 | |||
953 | out: | ||
954 | return ret; | 948 | return ret; |
955 | } | 949 | } |
956 | 950 | ||
@@ -965,12 +959,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) | |||
965 | if (phy_id != R8152_PHY_ID) | 959 | if (phy_id != R8152_PHY_ID) |
966 | return; | 960 | return; |
967 | 961 | ||
968 | if (usb_autopm_get_interface(tp->intf) < 0) | ||
969 | return; | ||
970 | |||
971 | r8152_mdio_write(tp, reg, val); | 962 | r8152_mdio_write(tp, reg, val); |
972 | |||
973 | usb_autopm_put_interface(tp->intf); | ||
974 | } | 963 | } |
975 | 964 | ||
976 | static int | 965 | static int |
@@ -989,12 +978,16 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) | |||
989 | if (ret < 0) | 978 | if (ret < 0) |
990 | goto out1; | 979 | goto out1; |
991 | 980 | ||
981 | mutex_lock(&tp->control); | ||
982 | |||
992 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 983 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
993 | 984 | ||
994 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); | 985 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); |
995 | pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); | 986 | pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); |
996 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); | 987 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); |
997 | 988 | ||
989 | mutex_unlock(&tp->control); | ||
990 | |||
998 | usb_autopm_put_interface(tp->intf); | 991 | usb_autopm_put_interface(tp->intf); |
999 | out1: | 992 | out1: |
1000 | return ret; | 993 | return ret; |
@@ -2145,6 +2138,13 @@ static int rtl8152_set_features(struct net_device *dev, | |||
2145 | { | 2138 | { |
2146 | netdev_features_t changed = features ^ dev->features; | 2139 | netdev_features_t changed = features ^ dev->features; |
2147 | struct r8152 *tp = netdev_priv(dev); | 2140 | struct r8152 *tp = netdev_priv(dev); |
2141 | int ret; | ||
2142 | |||
2143 | ret = usb_autopm_get_interface(tp->intf); | ||
2144 | if (ret < 0) | ||
2145 | goto out; | ||
2146 | |||
2147 | mutex_lock(&tp->control); | ||
2148 | 2148 | ||
2149 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) { | 2149 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) { |
2150 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | 2150 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
@@ -2153,7 +2153,12 @@ static int rtl8152_set_features(struct net_device *dev, | |||
2153 | rtl_rx_vlan_en(tp, false); | 2153 | rtl_rx_vlan_en(tp, false); |
2154 | } | 2154 | } |
2155 | 2155 | ||
2156 | return 0; | 2156 | mutex_unlock(&tp->control); |
2157 | |||
2158 | usb_autopm_put_interface(tp->intf); | ||
2159 | |||
2160 | out: | ||
2161 | return ret; | ||
2157 | } | 2162 | } |
2158 | 2163 | ||
2159 | #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) | 2164 | #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) |
@@ -2851,6 +2856,11 @@ static void rtl_work_func_t(struct work_struct *work) | |||
2851 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 2856 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
2852 | goto out1; | 2857 | goto out1; |
2853 | 2858 | ||
2859 | if (!mutex_trylock(&tp->control)) { | ||
2860 | schedule_delayed_work(&tp->schedule, 0); | ||
2861 | goto out1; | ||
2862 | } | ||
2863 | |||
2854 | if (test_bit(RTL8152_LINK_CHG, &tp->flags)) | 2864 | if (test_bit(RTL8152_LINK_CHG, &tp->flags)) |
2855 | set_carrier(tp); | 2865 | set_carrier(tp); |
2856 | 2866 | ||
@@ -2866,6 +2876,8 @@ static void rtl_work_func_t(struct work_struct *work) | |||
2866 | if (test_bit(PHY_RESET, &tp->flags)) | 2876 | if (test_bit(PHY_RESET, &tp->flags)) |
2867 | rtl_phy_reset(tp); | 2877 | rtl_phy_reset(tp); |
2868 | 2878 | ||
2879 | mutex_unlock(&tp->control); | ||
2880 | |||
2869 | out1: | 2881 | out1: |
2870 | usb_autopm_put_interface(tp->intf); | 2882 | usb_autopm_put_interface(tp->intf); |
2871 | } | 2883 | } |
@@ -2885,6 +2897,8 @@ static int rtl8152_open(struct net_device *netdev) | |||
2885 | goto out; | 2897 | goto out; |
2886 | } | 2898 | } |
2887 | 2899 | ||
2900 | mutex_lock(&tp->control); | ||
2901 | |||
2888 | /* The WORK_ENABLE may be set when autoresume occurs */ | 2902 | /* The WORK_ENABLE may be set when autoresume occurs */ |
2889 | if (test_bit(WORK_ENABLE, &tp->flags)) { | 2903 | if (test_bit(WORK_ENABLE, &tp->flags)) { |
2890 | clear_bit(WORK_ENABLE, &tp->flags); | 2904 | clear_bit(WORK_ENABLE, &tp->flags); |
@@ -2913,6 +2927,8 @@ static int rtl8152_open(struct net_device *netdev) | |||
2913 | free_all_mem(tp); | 2927 | free_all_mem(tp); |
2914 | } | 2928 | } |
2915 | 2929 | ||
2930 | mutex_unlock(&tp->control); | ||
2931 | |||
2916 | usb_autopm_put_interface(tp->intf); | 2932 | usb_autopm_put_interface(tp->intf); |
2917 | 2933 | ||
2918 | out: | 2934 | out: |
@@ -2933,6 +2949,8 @@ static int rtl8152_close(struct net_device *netdev) | |||
2933 | if (res < 0) { | 2949 | if (res < 0) { |
2934 | rtl_drop_queued_tx(tp); | 2950 | rtl_drop_queued_tx(tp); |
2935 | } else { | 2951 | } else { |
2952 | mutex_lock(&tp->control); | ||
2953 | |||
2936 | /* The autosuspend may have been enabled and wouldn't | 2954 | /* The autosuspend may have been enabled and wouldn't |
2937 | * be disable when autoresume occurs, because the | 2955 | * be disable when autoresume occurs, because the |
2938 | * netif_running() would be false. | 2956 | * netif_running() would be false. |
@@ -2945,6 +2963,9 @@ static int rtl8152_close(struct net_device *netdev) | |||
2945 | tasklet_disable(&tp->tl); | 2963 | tasklet_disable(&tp->tl); |
2946 | tp->rtl_ops.down(tp); | 2964 | tp->rtl_ops.down(tp); |
2947 | tasklet_enable(&tp->tl); | 2965 | tasklet_enable(&tp->tl); |
2966 | |||
2967 | mutex_unlock(&tp->control); | ||
2968 | |||
2948 | usb_autopm_put_interface(tp->intf); | 2969 | usb_autopm_put_interface(tp->intf); |
2949 | } | 2970 | } |
2950 | 2971 | ||
@@ -3169,6 +3190,8 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) | |||
3169 | { | 3190 | { |
3170 | struct r8152 *tp = usb_get_intfdata(intf); | 3191 | struct r8152 *tp = usb_get_intfdata(intf); |
3171 | 3192 | ||
3193 | mutex_lock(&tp->control); | ||
3194 | |||
3172 | if (PMSG_IS_AUTO(message)) | 3195 | if (PMSG_IS_AUTO(message)) |
3173 | set_bit(SELECTIVE_SUSPEND, &tp->flags); | 3196 | set_bit(SELECTIVE_SUSPEND, &tp->flags); |
3174 | else | 3197 | else |
@@ -3188,6 +3211,8 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) | |||
3188 | tasklet_enable(&tp->tl); | 3211 | tasklet_enable(&tp->tl); |
3189 | } | 3212 | } |
3190 | 3213 | ||
3214 | mutex_unlock(&tp->control); | ||
3215 | |||
3191 | return 0; | 3216 | return 0; |
3192 | } | 3217 | } |
3193 | 3218 | ||
@@ -3195,6 +3220,8 @@ static int rtl8152_resume(struct usb_interface *intf) | |||
3195 | { | 3220 | { |
3196 | struct r8152 *tp = usb_get_intfdata(intf); | 3221 | struct r8152 *tp = usb_get_intfdata(intf); |
3197 | 3222 | ||
3223 | mutex_lock(&tp->control); | ||
3224 | |||
3198 | if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | 3225 | if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
3199 | tp->rtl_ops.init(tp); | 3226 | tp->rtl_ops.init(tp); |
3200 | netif_device_attach(tp->netdev); | 3227 | netif_device_attach(tp->netdev); |
@@ -3220,6 +3247,8 @@ static int rtl8152_resume(struct usb_interface *intf) | |||
3220 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); | 3247 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); |
3221 | } | 3248 | } |
3222 | 3249 | ||
3250 | mutex_unlock(&tp->control); | ||
3251 | |||
3223 | return 0; | 3252 | return 0; |
3224 | } | 3253 | } |
3225 | 3254 | ||
@@ -3230,9 +3259,13 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
3230 | if (usb_autopm_get_interface(tp->intf) < 0) | 3259 | if (usb_autopm_get_interface(tp->intf) < 0) |
3231 | return; | 3260 | return; |
3232 | 3261 | ||
3262 | mutex_lock(&tp->control); | ||
3263 | |||
3233 | wol->supported = WAKE_ANY; | 3264 | wol->supported = WAKE_ANY; |
3234 | wol->wolopts = __rtl_get_wol(tp); | 3265 | wol->wolopts = __rtl_get_wol(tp); |
3235 | 3266 | ||
3267 | mutex_unlock(&tp->control); | ||
3268 | |||
3236 | usb_autopm_put_interface(tp->intf); | 3269 | usb_autopm_put_interface(tp->intf); |
3237 | } | 3270 | } |
3238 | 3271 | ||
@@ -3245,9 +3278,13 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
3245 | if (ret < 0) | 3278 | if (ret < 0) |
3246 | goto out_set_wol; | 3279 | goto out_set_wol; |
3247 | 3280 | ||
3281 | mutex_lock(&tp->control); | ||
3282 | |||
3248 | __rtl_set_wol(tp, wol->wolopts); | 3283 | __rtl_set_wol(tp, wol->wolopts); |
3249 | tp->saved_wolopts = wol->wolopts & WAKE_ANY; | 3284 | tp->saved_wolopts = wol->wolopts & WAKE_ANY; |
3250 | 3285 | ||
3286 | mutex_unlock(&tp->control); | ||
3287 | |||
3251 | usb_autopm_put_interface(tp->intf); | 3288 | usb_autopm_put_interface(tp->intf); |
3252 | 3289 | ||
3253 | out_set_wol: | 3290 | out_set_wol: |
@@ -3282,11 +3319,25 @@ static | |||
3282 | int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | 3319 | int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) |
3283 | { | 3320 | { |
3284 | struct r8152 *tp = netdev_priv(netdev); | 3321 | struct r8152 *tp = netdev_priv(netdev); |
3322 | int ret; | ||
3285 | 3323 | ||
3286 | if (!tp->mii.mdio_read) | 3324 | if (!tp->mii.mdio_read) |
3287 | return -EOPNOTSUPP; | 3325 | return -EOPNOTSUPP; |
3288 | 3326 | ||
3289 | return mii_ethtool_gset(&tp->mii, cmd); | 3327 | ret = usb_autopm_get_interface(tp->intf); |
3328 | if (ret < 0) | ||
3329 | goto out; | ||
3330 | |||
3331 | mutex_lock(&tp->control); | ||
3332 | |||
3333 | ret = mii_ethtool_gset(&tp->mii, cmd); | ||
3334 | |||
3335 | mutex_unlock(&tp->control); | ||
3336 | |||
3337 | usb_autopm_put_interface(tp->intf); | ||
3338 | |||
3339 | out: | ||
3340 | return ret; | ||
3290 | } | 3341 | } |
3291 | 3342 | ||
3292 | static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 3343 | static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
@@ -3298,8 +3349,12 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
3298 | if (ret < 0) | 3349 | if (ret < 0) |
3299 | goto out; | 3350 | goto out; |
3300 | 3351 | ||
3352 | mutex_lock(&tp->control); | ||
3353 | |||
3301 | ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); | 3354 | ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); |
3302 | 3355 | ||
3356 | mutex_unlock(&tp->control); | ||
3357 | |||
3303 | usb_autopm_put_interface(tp->intf); | 3358 | usb_autopm_put_interface(tp->intf); |
3304 | 3359 | ||
3305 | out: | 3360 | out: |
@@ -3459,8 +3514,12 @@ rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata) | |||
3459 | if (ret < 0) | 3514 | if (ret < 0) |
3460 | goto out; | 3515 | goto out; |
3461 | 3516 | ||
3517 | mutex_lock(&tp->control); | ||
3518 | |||
3462 | ret = tp->rtl_ops.eee_get(tp, edata); | 3519 | ret = tp->rtl_ops.eee_get(tp, edata); |
3463 | 3520 | ||
3521 | mutex_unlock(&tp->control); | ||
3522 | |||
3464 | usb_autopm_put_interface(tp->intf); | 3523 | usb_autopm_put_interface(tp->intf); |
3465 | 3524 | ||
3466 | out: | 3525 | out: |
@@ -3477,10 +3536,14 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata) | |||
3477 | if (ret < 0) | 3536 | if (ret < 0) |
3478 | goto out; | 3537 | goto out; |
3479 | 3538 | ||
3539 | mutex_lock(&tp->control); | ||
3540 | |||
3480 | ret = tp->rtl_ops.eee_set(tp, edata); | 3541 | ret = tp->rtl_ops.eee_set(tp, edata); |
3481 | if (!ret) | 3542 | if (!ret) |
3482 | ret = mii_nway_restart(&tp->mii); | 3543 | ret = mii_nway_restart(&tp->mii); |
3483 | 3544 | ||
3545 | mutex_unlock(&tp->control); | ||
3546 | |||
3484 | usb_autopm_put_interface(tp->intf); | 3547 | usb_autopm_put_interface(tp->intf); |
3485 | 3548 | ||
3486 | out: | 3549 | out: |
@@ -3522,7 +3585,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) | |||
3522 | break; | 3585 | break; |
3523 | 3586 | ||
3524 | case SIOCGMIIREG: | 3587 | case SIOCGMIIREG: |
3588 | mutex_lock(&tp->control); | ||
3525 | data->val_out = r8152_mdio_read(tp, data->reg_num); | 3589 | data->val_out = r8152_mdio_read(tp, data->reg_num); |
3590 | mutex_unlock(&tp->control); | ||
3526 | break; | 3591 | break; |
3527 | 3592 | ||
3528 | case SIOCSMIIREG: | 3593 | case SIOCSMIIREG: |
@@ -3530,7 +3595,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) | |||
3530 | res = -EPERM; | 3595 | res = -EPERM; |
3531 | break; | 3596 | break; |
3532 | } | 3597 | } |
3598 | mutex_lock(&tp->control); | ||
3533 | r8152_mdio_write(tp, data->reg_num, data->val_in); | 3599 | r8152_mdio_write(tp, data->reg_num, data->val_in); |
3600 | mutex_unlock(&tp->control); | ||
3534 | break; | 3601 | break; |
3535 | 3602 | ||
3536 | default: | 3603 | default: |
@@ -3723,6 +3790,7 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
3723 | goto out; | 3790 | goto out; |
3724 | 3791 | ||
3725 | tasklet_init(&tp->tl, bottom_half, (unsigned long)tp); | 3792 | tasklet_init(&tp->tl, bottom_half, (unsigned long)tp); |
3793 | mutex_init(&tp->control); | ||
3726 | INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); | 3794 | INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); |
3727 | 3795 | ||
3728 | netdev->netdev_ops = &rtl8152_netdev_ops; | 3796 | netdev->netdev_ops = &rtl8152_netdev_ops; |
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index bfa0b1518da1..01a7db061c6a 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -294,7 +294,6 @@ struct ath_tx_control { | |||
294 | * (axq_qnum). | 294 | * (axq_qnum). |
295 | */ | 295 | */ |
296 | struct ath_tx { | 296 | struct ath_tx { |
297 | u16 seq_no; | ||
298 | u32 txqsetup; | 297 | u32 txqsetup; |
299 | spinlock_t txbuflock; | 298 | spinlock_t txbuflock; |
300 | struct list_head txbuf; | 299 | struct list_head txbuf; |
@@ -563,6 +562,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs); | |||
563 | int ath_txq_update(struct ath_softc *sc, int qnum, | 562 | int ath_txq_update(struct ath_softc *sc, int qnum, |
564 | struct ath9k_tx_queue_info *q); | 563 | struct ath9k_tx_queue_info *q); |
565 | void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); | 564 | void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); |
565 | void ath_assign_seq(struct ath_common *common, struct sk_buff *skb); | ||
566 | int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | 566 | int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, |
567 | struct ath_tx_control *txctl); | 567 | struct ath_tx_control *txctl); |
568 | void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 568 | void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
@@ -592,6 +592,8 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, | |||
592 | struct ath_vif { | 592 | struct ath_vif { |
593 | struct list_head list; | 593 | struct list_head list; |
594 | 594 | ||
595 | u16 seq_no; | ||
596 | |||
595 | /* BSS info */ | 597 | /* BSS info */ |
596 | u8 bssid[ETH_ALEN]; | 598 | u8 bssid[ETH_ALEN]; |
597 | u16 aid; | 599 | u16 aid; |
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index a6af855ef6ed..ecb783beeec2 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c | |||
@@ -144,16 +144,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, | |||
144 | mgmt_hdr->u.beacon.timestamp = avp->tsf_adjust; | 144 | mgmt_hdr->u.beacon.timestamp = avp->tsf_adjust; |
145 | 145 | ||
146 | info = IEEE80211_SKB_CB(skb); | 146 | info = IEEE80211_SKB_CB(skb); |
147 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | 147 | |
148 | /* | 148 | ath_assign_seq(common, skb); |
149 | * TODO: make sure the seq# gets assigned properly (vs. other | ||
150 | * TX frames) | ||
151 | */ | ||
152 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
153 | sc->tx.seq_no += 0x10; | ||
154 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
155 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); | ||
156 | } | ||
157 | 149 | ||
158 | if (vif->p2p) | 150 | if (vif->p2p) |
159 | ath9k_beacon_add_noa(sc, avp, skb); | 151 | ath9k_beacon_add_noa(sc, avp, skb); |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index d779f4fa50e3..4014c4be6e79 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
@@ -464,6 +464,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, | |||
464 | return -ENOMEM; | 464 | return -ENOMEM; |
465 | 465 | ||
466 | ah->dev = priv->dev; | 466 | ah->dev = priv->dev; |
467 | ah->hw = priv->hw; | ||
467 | ah->hw_version.devid = devid; | 468 | ah->hw_version.devid = devid; |
468 | ah->hw_version.usbdev = drv_info; | 469 | ah->hw_version.usbdev = drv_info; |
469 | ah->ah_flags |= AH_USE_EEPROM; | 470 | ah->ah_flags |= AH_USE_EEPROM; |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 205162449b72..6f6a974f7fdb 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2332,7 +2332,7 @@ static void ath9k_remove_chanctx(struct ieee80211_hw *hw, | |||
2332 | conf->def.chan->center_freq); | 2332 | conf->def.chan->center_freq); |
2333 | 2333 | ||
2334 | ctx->assigned = false; | 2334 | ctx->assigned = false; |
2335 | ctx->hw_queue_base = -1; | 2335 | ctx->hw_queue_base = 0; |
2336 | ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN); | 2336 | ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN); |
2337 | 2337 | ||
2338 | mutex_unlock(&sc->mutex); | 2338 | mutex_unlock(&sc->mutex); |
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 8a69d08ec55c..40ab65e6882f 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c | |||
@@ -54,6 +54,12 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) | |||
54 | struct ieee80211_hdr *hdr; | 54 | struct ieee80211_hdr *hdr; |
55 | struct ieee80211_tx_info *tx_info; | 55 | struct ieee80211_tx_info *tx_info; |
56 | struct sk_buff *skb; | 56 | struct sk_buff *skb; |
57 | struct ath_vif *avp; | ||
58 | |||
59 | if (!sc->tx99_vif) | ||
60 | return NULL; | ||
61 | |||
62 | avp = (struct ath_vif *)sc->tx99_vif->drv_priv; | ||
57 | 63 | ||
58 | skb = alloc_skb(len, GFP_KERNEL); | 64 | skb = alloc_skb(len, GFP_KERNEL); |
59 | if (!skb) | 65 | if (!skb) |
@@ -71,7 +77,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) | |||
71 | memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN); | 77 | memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN); |
72 | memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN); | 78 | memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN); |
73 | 79 | ||
74 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); | 80 | hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); |
75 | 81 | ||
76 | tx_info = IEEE80211_SKB_CB(skb); | 82 | tx_info = IEEE80211_SKB_CB(skb); |
77 | memset(tx_info, 0, sizeof(*tx_info)); | 83 | memset(tx_info, 0, sizeof(*tx_info)); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 151ae49fa57e..493a183d0aaf 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -2139,6 +2139,28 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
2139 | return bf; | 2139 | return bf; |
2140 | } | 2140 | } |
2141 | 2141 | ||
2142 | void ath_assign_seq(struct ath_common *common, struct sk_buff *skb) | ||
2143 | { | ||
2144 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
2145 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
2146 | struct ieee80211_vif *vif = info->control.vif; | ||
2147 | struct ath_vif *avp; | ||
2148 | |||
2149 | if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) | ||
2150 | return; | ||
2151 | |||
2152 | if (!vif) | ||
2153 | return; | ||
2154 | |||
2155 | avp = (struct ath_vif *)vif->drv_priv; | ||
2156 | |||
2157 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | ||
2158 | avp->seq_no += 0x10; | ||
2159 | |||
2160 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
2161 | hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); | ||
2162 | } | ||
2163 | |||
2142 | static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, | 2164 | static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, |
2143 | struct ath_tx_control *txctl) | 2165 | struct ath_tx_control *txctl) |
2144 | { | 2166 | { |
@@ -2162,17 +2184,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2162 | if (info->control.hw_key) | 2184 | if (info->control.hw_key) |
2163 | frmlen += info->control.hw_key->icv_len; | 2185 | frmlen += info->control.hw_key->icv_len; |
2164 | 2186 | ||
2165 | /* | 2187 | ath_assign_seq(ath9k_hw_common(sc->sc_ah), skb); |
2166 | * As a temporary workaround, assign seq# here; this will likely need | ||
2167 | * to be cleaned up to work better with Beacon transmission and virtual | ||
2168 | * BSSes. | ||
2169 | */ | ||
2170 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | ||
2171 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | ||
2172 | sc->tx.seq_no += 0x10; | ||
2173 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
2174 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); | ||
2175 | } | ||
2176 | 2188 | ||
2177 | if ((vif && vif->type != NL80211_IFTYPE_AP && | 2189 | if ((vif && vif->type != NL80211_IFTYPE_AP && |
2178 | vif->type != NL80211_IFTYPE_AP_VLAN) || | 2190 | vif->type != NL80211_IFTYPE_AP_VLAN) || |
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c index 83f47af19280..338d72337604 100644 --- a/drivers/net/wireless/ath/main.c +++ b/drivers/net/wireless/ath/main.c | |||
@@ -79,13 +79,13 @@ void ath_printk(const char *level, const struct ath_common* common, | |||
79 | vaf.fmt = fmt; | 79 | vaf.fmt = fmt; |
80 | vaf.va = &args; | 80 | vaf.va = &args; |
81 | 81 | ||
82 | if (common && common->hw && common->hw->wiphy) | 82 | if (common && common->hw && common->hw->wiphy) { |
83 | printk("%sath: %s: %pV", | 83 | printk("%sath: %s: %pV", |
84 | level, wiphy_name(common->hw->wiphy), &vaf); | 84 | level, wiphy_name(common->hw->wiphy), &vaf); |
85 | else | 85 | trace_ath_log(common->hw->wiphy, &vaf); |
86 | } else { | ||
86 | printk("%sath: %pV", level, &vaf); | 87 | printk("%sath: %pV", level, &vaf); |
87 | 88 | } | |
88 | trace_ath_log(common->hw->wiphy, &vaf); | ||
89 | 89 | ||
90 | va_end(args); | 90 | va_end(args); |
91 | } | 91 | } |
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c index ded967aa6ecb..706b844bce00 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c | |||
@@ -742,35 +742,49 @@ static void rtl8180_int_disable(struct ieee80211_hw *dev) | |||
742 | } | 742 | } |
743 | 743 | ||
744 | static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev, | 744 | static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev, |
745 | u32 rates_mask) | 745 | u32 basic_mask) |
746 | { | 746 | { |
747 | struct rtl8180_priv *priv = dev->priv; | 747 | struct rtl8180_priv *priv = dev->priv; |
748 | |||
749 | u8 max, min; | ||
750 | u16 reg; | 748 | u16 reg; |
751 | 749 | u32 resp_mask; | |
752 | max = fls(rates_mask) - 1; | 750 | u8 basic_max; |
753 | min = ffs(rates_mask) - 1; | 751 | u8 resp_max, resp_min; |
752 | |||
753 | resp_mask = basic_mask; | ||
754 | /* IEEE80211 says the response rate should be equal to the highest basic | ||
755 | * rate that is not faster than received frame. But it says also that if | ||
756 | * the basic rate set does not contains any rate for the current | ||
757 | * modulation class then mandatory rate set must be used for that | ||
758 | * modulation class. Eventually add OFDM mandatory rates.. | ||
759 | */ | ||
760 | if ((resp_mask & 0xf) == resp_mask) | ||
761 | resp_mask |= 0x150; /* 6, 12, 24Mbps */ | ||
754 | 762 | ||
755 | switch (priv->chip_family) { | 763 | switch (priv->chip_family) { |
756 | 764 | ||
757 | case RTL818X_CHIP_FAMILY_RTL8180: | 765 | case RTL818X_CHIP_FAMILY_RTL8180: |
758 | /* in 8180 this is NOT a BITMAP */ | 766 | /* in 8180 this is NOT a BITMAP */ |
767 | basic_max = fls(basic_mask) - 1; | ||
759 | reg = rtl818x_ioread16(priv, &priv->map->BRSR); | 768 | reg = rtl818x_ioread16(priv, &priv->map->BRSR); |
760 | reg &= ~3; | 769 | reg &= ~3; |
761 | reg |= max; | 770 | reg |= basic_max; |
762 | rtl818x_iowrite16(priv, &priv->map->BRSR, reg); | 771 | rtl818x_iowrite16(priv, &priv->map->BRSR, reg); |
763 | break; | 772 | break; |
764 | 773 | ||
765 | case RTL818X_CHIP_FAMILY_RTL8185: | 774 | case RTL818X_CHIP_FAMILY_RTL8185: |
775 | resp_max = fls(resp_mask) - 1; | ||
776 | resp_min = ffs(resp_mask) - 1; | ||
766 | /* in 8185 this is a BITMAP */ | 777 | /* in 8185 this is a BITMAP */ |
767 | rtl818x_iowrite16(priv, &priv->map->BRSR, rates_mask); | 778 | rtl818x_iowrite16(priv, &priv->map->BRSR, basic_mask); |
768 | rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (max << 4) | min); | 779 | rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (resp_max << 4) | |
780 | resp_min); | ||
769 | break; | 781 | break; |
770 | 782 | ||
771 | case RTL818X_CHIP_FAMILY_RTL8187SE: | 783 | case RTL818X_CHIP_FAMILY_RTL8187SE: |
772 | /* in 8187se this is a BITMAP */ | 784 | /* in 8187se this is a BITMAP. BRSR reg actually sets |
773 | rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, rates_mask); | 785 | * response rates. |
786 | */ | ||
787 | rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, resp_mask); | ||
774 | break; | 788 | break; |
775 | } | 789 | } |
776 | } | 790 | } |
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 976667ae8549..6866dcf24340 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h | |||
@@ -1370,7 +1370,7 @@ struct rtl_mac { | |||
1370 | bool rdg_en; | 1370 | bool rdg_en; |
1371 | 1371 | ||
1372 | /*AP*/ | 1372 | /*AP*/ |
1373 | u8 bssid[6]; | 1373 | u8 bssid[ETH_ALEN] __aligned(2); |
1374 | u32 vendor; | 1374 | u32 vendor; |
1375 | u8 mcs[16]; /* 16 bytes mcs for HT rates. */ | 1375 | u8 mcs[16]; /* 16 bytes mcs for HT rates. */ |
1376 | u32 basic_rates; /* b/g rates */ | 1376 | u32 basic_rates; /* b/g rates */ |
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 2e5b194b9b19..53d33dee70e1 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | /* struct phy_device dev_flags definitions */ | 38 | /* struct phy_device dev_flags definitions */ |
39 | #define MICREL_PHY_50MHZ_CLK 0x00000001 | 39 | #define MICREL_PHY_50MHZ_CLK 0x00000001 |
40 | #define MICREL_PHY_25MHZ_CLK 0x00000002 | ||
40 | 41 | ||
41 | #define MICREL_KSZ9021_EXTREG_CTRL 0xB | 42 | #define MICREL_KSZ9021_EXTREG_CTRL 0xB |
42 | #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC | 43 | #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC |
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h index 7a10cfcd8e33..48e18810a9be 100644 --- a/include/net/netfilter/ipv6/nf_reject.h +++ b/include/net/netfilter/ipv6/nf_reject.h | |||
@@ -1,11 +1,7 @@ | |||
1 | #ifndef _IPV6_NF_REJECT_H | 1 | #ifndef _IPV6_NF_REJECT_H |
2 | #define _IPV6_NF_REJECT_H | 2 | #define _IPV6_NF_REJECT_H |
3 | 3 | ||
4 | #include <net/ipv6.h> | 4 | #include <linux/icmpv6.h> |
5 | #include <net/ip6_route.h> | ||
6 | #include <net/ip6_fib.h> | ||
7 | #include <net/ip6_checksum.h> | ||
8 | #include <linux/netfilter_ipv6.h> | ||
9 | 5 | ||
10 | static inline void | 6 | static inline void |
11 | nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, | 7 | nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, |
@@ -17,155 +13,6 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, | |||
17 | icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); | 13 | icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); |
18 | } | 14 | } |
19 | 15 | ||
20 | /* Send RST reply */ | 16 | void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); |
21 | static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | ||
22 | { | ||
23 | struct sk_buff *nskb; | ||
24 | struct tcphdr otcph, *tcph; | ||
25 | unsigned int otcplen, hh_len; | ||
26 | int tcphoff, needs_ack; | ||
27 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | ||
28 | struct ipv6hdr *ip6h; | ||
29 | #define DEFAULT_TOS_VALUE 0x0U | ||
30 | const __u8 tclass = DEFAULT_TOS_VALUE; | ||
31 | struct dst_entry *dst = NULL; | ||
32 | u8 proto; | ||
33 | __be16 frag_off; | ||
34 | struct flowi6 fl6; | ||
35 | |||
36 | if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || | ||
37 | (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { | ||
38 | pr_debug("addr is not unicast.\n"); | ||
39 | return; | ||
40 | } | ||
41 | |||
42 | proto = oip6h->nexthdr; | ||
43 | tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); | ||
44 | |||
45 | if ((tcphoff < 0) || (tcphoff > oldskb->len)) { | ||
46 | pr_debug("Cannot get TCP header.\n"); | ||
47 | return; | ||
48 | } | ||
49 | |||
50 | otcplen = oldskb->len - tcphoff; | ||
51 | |||
52 | /* IP header checks: fragment, too short. */ | ||
53 | if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { | ||
54 | pr_debug("proto(%d) != IPPROTO_TCP, " | ||
55 | "or too short. otcplen = %d\n", | ||
56 | proto, otcplen); | ||
57 | return; | ||
58 | } | ||
59 | |||
60 | if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) | ||
61 | BUG(); | ||
62 | |||
63 | /* No RST for RST. */ | ||
64 | if (otcph.rst) { | ||
65 | pr_debug("RST is set\n"); | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | /* Check checksum. */ | ||
70 | if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { | ||
71 | pr_debug("TCP checksum is invalid\n"); | ||
72 | return; | ||
73 | } | ||
74 | |||
75 | memset(&fl6, 0, sizeof(fl6)); | ||
76 | fl6.flowi6_proto = IPPROTO_TCP; | ||
77 | fl6.saddr = oip6h->daddr; | ||
78 | fl6.daddr = oip6h->saddr; | ||
79 | fl6.fl6_sport = otcph.dest; | ||
80 | fl6.fl6_dport = otcph.source; | ||
81 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); | ||
82 | dst = ip6_route_output(net, NULL, &fl6); | ||
83 | if (dst == NULL || dst->error) { | ||
84 | dst_release(dst); | ||
85 | return; | ||
86 | } | ||
87 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); | ||
88 | if (IS_ERR(dst)) | ||
89 | return; | ||
90 | |||
91 | hh_len = (dst->dev->hard_header_len + 15)&~15; | ||
92 | nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) | ||
93 | + sizeof(struct tcphdr) + dst->trailer_len, | ||
94 | GFP_ATOMIC); | ||
95 | |||
96 | if (!nskb) { | ||
97 | net_dbg_ratelimited("cannot alloc skb\n"); | ||
98 | dst_release(dst); | ||
99 | return; | ||
100 | } | ||
101 | |||
102 | skb_dst_set(nskb, dst); | ||
103 | |||
104 | skb_reserve(nskb, hh_len + dst->header_len); | ||
105 | |||
106 | skb_put(nskb, sizeof(struct ipv6hdr)); | ||
107 | skb_reset_network_header(nskb); | ||
108 | ip6h = ipv6_hdr(nskb); | ||
109 | ip6_flow_hdr(ip6h, tclass, 0); | ||
110 | ip6h->hop_limit = ip6_dst_hoplimit(dst); | ||
111 | ip6h->nexthdr = IPPROTO_TCP; | ||
112 | ip6h->saddr = oip6h->daddr; | ||
113 | ip6h->daddr = oip6h->saddr; | ||
114 | |||
115 | skb_reset_transport_header(nskb); | ||
116 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); | ||
117 | /* Truncate to length (no data) */ | ||
118 | tcph->doff = sizeof(struct tcphdr)/4; | ||
119 | tcph->source = otcph.dest; | ||
120 | tcph->dest = otcph.source; | ||
121 | |||
122 | if (otcph.ack) { | ||
123 | needs_ack = 0; | ||
124 | tcph->seq = otcph.ack_seq; | ||
125 | tcph->ack_seq = 0; | ||
126 | } else { | ||
127 | needs_ack = 1; | ||
128 | tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin | ||
129 | + otcplen - (otcph.doff<<2)); | ||
130 | tcph->seq = 0; | ||
131 | } | ||
132 | |||
133 | /* Reset flags */ | ||
134 | ((u_int8_t *)tcph)[13] = 0; | ||
135 | tcph->rst = 1; | ||
136 | tcph->ack = needs_ack; | ||
137 | tcph->window = 0; | ||
138 | tcph->urg_ptr = 0; | ||
139 | tcph->check = 0; | ||
140 | |||
141 | /* Adjust TCP checksum */ | ||
142 | tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, | ||
143 | &ipv6_hdr(nskb)->daddr, | ||
144 | sizeof(struct tcphdr), IPPROTO_TCP, | ||
145 | csum_partial(tcph, | ||
146 | sizeof(struct tcphdr), 0)); | ||
147 | |||
148 | nf_ct_attach(nskb, oldskb); | ||
149 | |||
150 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | ||
151 | /* If we use ip6_local_out for bridged traffic, the MAC source on | ||
152 | * the RST will be ours, instead of the destination's. This confuses | ||
153 | * some routers/firewalls, and they drop the packet. So we need to | ||
154 | * build the eth header using the original destination's MAC as the | ||
155 | * source, and send the RST packet directly. | ||
156 | */ | ||
157 | if (oldskb->nf_bridge) { | ||
158 | struct ethhdr *oeth = eth_hdr(oldskb); | ||
159 | nskb->dev = oldskb->nf_bridge->physindev; | ||
160 | nskb->protocol = htons(ETH_P_IPV6); | ||
161 | ip6h->payload_len = htons(sizeof(struct tcphdr)); | ||
162 | if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), | ||
163 | oeth->h_source, oeth->h_dest, nskb->len) < 0) | ||
164 | return; | ||
165 | dev_queue_xmit(nskb); | ||
166 | } else | ||
167 | #endif | ||
168 | ip6_local_out(nskb); | ||
169 | } | ||
170 | 17 | ||
171 | #endif /* _IPV6_NF_REJECT_H */ | 18 | #endif /* _IPV6_NF_REJECT_H */ |
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index c26df6787fb0..f31fe7b660a5 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h | |||
@@ -774,7 +774,7 @@ enum nft_reject_inet_code { | |||
774 | NFT_REJECT_ICMPX_ADMIN_PROHIBITED, | 774 | NFT_REJECT_ICMPX_ADMIN_PROHIBITED, |
775 | __NFT_REJECT_ICMPX_MAX | 775 | __NFT_REJECT_ICMPX_MAX |
776 | }; | 776 | }; |
777 | #define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX + 1) | 777 | #define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX - 1) |
778 | 778 | ||
779 | /** | 779 | /** |
780 | * enum nft_reject_attributes - nf_tables reject expression netlink attributes | 780 | * enum nft_reject_attributes - nf_tables reject expression netlink attributes |
diff --git a/net/Kconfig b/net/Kconfig index d6b138e2c263..6272420a721b 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -6,6 +6,7 @@ menuconfig NET | |||
6 | bool "Networking support" | 6 | bool "Networking support" |
7 | select NLATTR | 7 | select NLATTR |
8 | select GENERIC_NET_UTILS | 8 | select GENERIC_NET_UTILS |
9 | select ANON_INODES | ||
9 | ---help--- | 10 | ---help--- |
10 | Unless you really know what you are doing, you should say Y here. | 11 | Unless you really know what you are doing, you should say Y here. |
11 | The reason is that some programs need kernel networking support even | 12 | The reason is that some programs need kernel networking support even |
diff --git a/net/core/filter.c b/net/core/filter.c index fcd3f6742a6a..647b12265e18 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -51,9 +51,9 @@ | |||
51 | * @skb: buffer to filter | 51 | * @skb: buffer to filter |
52 | * | 52 | * |
53 | * Run the filter code and then cut skb->data to correct size returned by | 53 | * Run the filter code and then cut skb->data to correct size returned by |
54 | * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller | 54 | * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller |
55 | * than pkt_len we keep whole skb->data. This is the socket level | 55 | * than pkt_len we keep whole skb->data. This is the socket level |
56 | * wrapper to sk_run_filter. It returns 0 if the packet should | 56 | * wrapper to SK_RUN_FILTER. It returns 0 if the packet should |
57 | * be accepted or -EPERM if the packet should be tossed. | 57 | * be accepted or -EPERM if the packet should be tossed. |
58 | * | 58 | * |
59 | */ | 59 | */ |
@@ -566,11 +566,8 @@ err: | |||
566 | 566 | ||
567 | /* Security: | 567 | /* Security: |
568 | * | 568 | * |
569 | * A BPF program is able to use 16 cells of memory to store intermediate | ||
570 | * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()). | ||
571 | * | ||
572 | * As we dont want to clear mem[] array for each packet going through | 569 | * As we dont want to clear mem[] array for each packet going through |
573 | * sk_run_filter(), we check that filter loaded by user never try to read | 570 | * __bpf_prog_run(), we check that filter loaded by user never try to read |
574 | * a cell if not previously written, and we check all branches to be sure | 571 | * a cell if not previously written, and we check all branches to be sure |
575 | * a malicious user doesn't try to abuse us. | 572 | * a malicious user doesn't try to abuse us. |
576 | */ | 573 | */ |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 8560dea58803..45084938c403 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -100,6 +100,13 @@ ip: | |||
100 | if (ip_is_fragment(iph)) | 100 | if (ip_is_fragment(iph)) |
101 | ip_proto = 0; | 101 | ip_proto = 0; |
102 | 102 | ||
103 | /* skip the address processing if skb is NULL. The assumption | ||
104 | * here is that if there is no skb we are not looking for flow | ||
105 | * info but lengths and protocols. | ||
106 | */ | ||
107 | if (!skb) | ||
108 | break; | ||
109 | |||
103 | iph_to_flow_copy_addrs(flow, iph); | 110 | iph_to_flow_copy_addrs(flow, iph); |
104 | break; | 111 | break; |
105 | } | 112 | } |
@@ -114,17 +121,15 @@ ipv6: | |||
114 | return false; | 121 | return false; |
115 | 122 | ||
116 | ip_proto = iph->nexthdr; | 123 | ip_proto = iph->nexthdr; |
117 | flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr); | ||
118 | flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); | ||
119 | nhoff += sizeof(struct ipv6hdr); | 124 | nhoff += sizeof(struct ipv6hdr); |
120 | 125 | ||
121 | /* skip the flow label processing if skb is NULL. The | 126 | /* see comment above in IPv4 section */ |
122 | * assumption here is that if there is no skb we are not | ||
123 | * looking for flow info as much as we are length. | ||
124 | */ | ||
125 | if (!skb) | 127 | if (!skb) |
126 | break; | 128 | break; |
127 | 129 | ||
130 | flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr); | ||
131 | flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); | ||
132 | |||
128 | flow_label = ip6_flowlabel(iph); | 133 | flow_label = ip6_flowlabel(iph); |
129 | if (flow_label) { | 134 | if (flow_label) { |
130 | /* Awesome, IPv6 packet has a flow label so we can | 135 | /* Awesome, IPv6 packet has a flow label so we can |
@@ -231,9 +236,13 @@ ipv6: | |||
231 | 236 | ||
232 | flow->n_proto = proto; | 237 | flow->n_proto = proto; |
233 | flow->ip_proto = ip_proto; | 238 | flow->ip_proto = ip_proto; |
234 | flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); | ||
235 | flow->thoff = (u16) nhoff; | 239 | flow->thoff = (u16) nhoff; |
236 | 240 | ||
241 | /* unless skb is set we don't need to record port info */ | ||
242 | if (skb) | ||
243 | flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, | ||
244 | data, hlen); | ||
245 | |||
237 | return true; | 246 | return true; |
238 | } | 247 | } |
239 | EXPORT_SYMBOL(__skb_flow_dissect); | 248 | EXPORT_SYMBOL(__skb_flow_dissect); |
@@ -334,15 +343,16 @@ u32 __skb_get_poff(const struct sk_buff *skb, void *data, | |||
334 | 343 | ||
335 | switch (keys->ip_proto) { | 344 | switch (keys->ip_proto) { |
336 | case IPPROTO_TCP: { | 345 | case IPPROTO_TCP: { |
337 | const struct tcphdr *tcph; | 346 | /* access doff as u8 to avoid unaligned access */ |
338 | struct tcphdr _tcph; | 347 | const u8 *doff; |
348 | u8 _doff; | ||
339 | 349 | ||
340 | tcph = __skb_header_pointer(skb, poff, sizeof(_tcph), | 350 | doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff), |
341 | data, hlen, &_tcph); | 351 | data, hlen, &_doff); |
342 | if (!tcph) | 352 | if (!doff) |
343 | return poff; | 353 | return poff; |
344 | 354 | ||
345 | poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4); | 355 | poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2); |
346 | break; | 356 | break; |
347 | } | 357 | } |
348 | case IPPROTO_UDP: | 358 | case IPPROTO_UDP: |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7b3df0d518ab..829d013745ab 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -360,18 +360,29 @@ refill: | |||
360 | goto end; | 360 | goto end; |
361 | } | 361 | } |
362 | nc->frag.size = PAGE_SIZE << order; | 362 | nc->frag.size = PAGE_SIZE << order; |
363 | recycle: | 363 | /* Even if we own the page, we do not use atomic_set(). |
364 | atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); | 364 | * This would break get_page_unless_zero() users. |
365 | */ | ||
366 | atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1, | ||
367 | &nc->frag.page->_count); | ||
365 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; | 368 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; |
366 | nc->frag.offset = 0; | 369 | nc->frag.offset = 0; |
367 | } | 370 | } |
368 | 371 | ||
369 | if (nc->frag.offset + fragsz > nc->frag.size) { | 372 | if (nc->frag.offset + fragsz > nc->frag.size) { |
370 | /* avoid unnecessary locked operations if possible */ | 373 | if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) { |
371 | if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || | 374 | if (!atomic_sub_and_test(nc->pagecnt_bias, |
372 | atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) | 375 | &nc->frag.page->_count)) |
373 | goto recycle; | 376 | goto refill; |
374 | goto refill; | 377 | /* OK, page count is 0, we can safely set it */ |
378 | atomic_set(&nc->frag.page->_count, | ||
379 | NETDEV_PAGECNT_MAX_BIAS); | ||
380 | } else { | ||
381 | atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias, | ||
382 | &nc->frag.page->_count); | ||
383 | } | ||
384 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; | ||
385 | nc->frag.offset = 0; | ||
375 | } | 386 | } |
376 | 387 | ||
377 | data = page_address(nc->frag.page) + nc->frag.offset; | 388 | data = page_address(nc->frag.page) + nc->frag.offset; |
@@ -4126,11 +4137,11 @@ EXPORT_SYMBOL(skb_vlan_untag); | |||
4126 | /** | 4137 | /** |
4127 | * alloc_skb_with_frags - allocate skb with page frags | 4138 | * alloc_skb_with_frags - allocate skb with page frags |
4128 | * | 4139 | * |
4129 | * header_len: size of linear part | 4140 | * @header_len: size of linear part |
4130 | * data_len: needed length in frags | 4141 | * @data_len: needed length in frags |
4131 | * max_page_order: max page order desired. | 4142 | * @max_page_order: max page order desired. |
4132 | * errcode: pointer to error code if any | 4143 | * @errcode: pointer to error code if any |
4133 | * gfp_mask: allocation mask | 4144 | * @gfp_mask: allocation mask |
4134 | * | 4145 | * |
4135 | * This can be used to allocate a paged skb, given a maximal order for frags. | 4146 | * This can be used to allocate a paged skb, given a maximal order for frags. |
4136 | */ | 4147 | */ |
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index ec8a456092a7..57d3e1af5630 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c | |||
@@ -72,7 +72,7 @@ nla_put_failure: | |||
72 | } | 72 | } |
73 | EXPORT_SYMBOL_GPL(nft_reject_dump); | 73 | EXPORT_SYMBOL_GPL(nft_reject_dump); |
74 | 74 | ||
75 | static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = { | 75 | static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX + 1] = { |
76 | [NFT_REJECT_ICMPX_NO_ROUTE] = ICMP_NET_UNREACH, | 76 | [NFT_REJECT_ICMPX_NO_ROUTE] = ICMP_NET_UNREACH, |
77 | [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMP_PORT_UNREACH, | 77 | [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMP_PORT_UNREACH, |
78 | [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMP_HOST_UNREACH, | 78 | [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMP_HOST_UNREACH, |
@@ -81,8 +81,7 @@ static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = { | |||
81 | 81 | ||
82 | int nft_reject_icmp_code(u8 code) | 82 | int nft_reject_icmp_code(u8 code) |
83 | { | 83 | { |
84 | if (code > NFT_REJECT_ICMPX_MAX) | 84 | BUG_ON(code > NFT_REJECT_ICMPX_MAX); |
85 | return -EINVAL; | ||
86 | 85 | ||
87 | return icmp_code_v4[code]; | 86 | return icmp_code_v4[code]; |
88 | } | 87 | } |
@@ -90,7 +89,7 @@ int nft_reject_icmp_code(u8 code) | |||
90 | EXPORT_SYMBOL_GPL(nft_reject_icmp_code); | 89 | EXPORT_SYMBOL_GPL(nft_reject_icmp_code); |
91 | 90 | ||
92 | 91 | ||
93 | static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = { | 92 | static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX + 1] = { |
94 | [NFT_REJECT_ICMPX_NO_ROUTE] = ICMPV6_NOROUTE, | 93 | [NFT_REJECT_ICMPX_NO_ROUTE] = ICMPV6_NOROUTE, |
95 | [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMPV6_PORT_UNREACH, | 94 | [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMPV6_PORT_UNREACH, |
96 | [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMPV6_ADDR_UNREACH, | 95 | [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMPV6_ADDR_UNREACH, |
@@ -99,8 +98,7 @@ static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = { | |||
99 | 98 | ||
100 | int nft_reject_icmpv6_code(u8 code) | 99 | int nft_reject_icmpv6_code(u8 code) |
101 | { | 100 | { |
102 | if (code > NFT_REJECT_ICMPX_MAX) | 101 | BUG_ON(code > NFT_REJECT_ICMPX_MAX); |
103 | return -EINVAL; | ||
104 | 102 | ||
105 | return icmp_code_v6[code]; | 103 | return icmp_code_v6[code]; |
106 | } | 104 | } |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 0b4692dd1c5e..a845cd4cf21e 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -246,7 +246,6 @@ int netlbl_cfg_unlbl_static_add(struct net *net, | |||
246 | * @addr: IP address in network byte order (struct in[6]_addr) | 246 | * @addr: IP address in network byte order (struct in[6]_addr) |
247 | * @mask: address mask in network byte order (struct in[6]_addr) | 247 | * @mask: address mask in network byte order (struct in[6]_addr) |
248 | * @family: address family | 248 | * @family: address family |
249 | * @secid: LSM secid value for the entry | ||
250 | * @audit_info: NetLabel audit information | 249 | * @audit_info: NetLabel audit information |
251 | * | 250 | * |
252 | * Description: | 251 | * Description: |
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index b3b16c070a7f..fa7cd792791c 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -329,7 +329,7 @@ static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); | |||
329 | /** | 329 | /** |
330 | * __rfkill_switch_all - Toggle state of all switches of given type | 330 | * __rfkill_switch_all - Toggle state of all switches of given type |
331 | * @type: type of interfaces to be affected | 331 | * @type: type of interfaces to be affected |
332 | * @state: the new state | 332 | * @blocked: the new state |
333 | * | 333 | * |
334 | * This function sets the state of all switches of given type, | 334 | * This function sets the state of all switches of given type, |
335 | * unless a specific switch is claimed by userspace (in which case, | 335 | * unless a specific switch is claimed by userspace (in which case, |
@@ -353,7 +353,7 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) | |||
353 | /** | 353 | /** |
354 | * rfkill_switch_all - Toggle state of all switches of given type | 354 | * rfkill_switch_all - Toggle state of all switches of given type |
355 | * @type: type of interfaces to be affected | 355 | * @type: type of interfaces to be affected |
356 | * @state: the new state | 356 | * @blocked: the new state |
357 | * | 357 | * |
358 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | 358 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). |
359 | * Please refer to __rfkill_switch_all() for details. | 359 | * Please refer to __rfkill_switch_all() for details. |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 38d58e6cef07..6efca30894aa 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -57,7 +57,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | |||
57 | 57 | ||
58 | static void try_bulk_dequeue_skb(struct Qdisc *q, | 58 | static void try_bulk_dequeue_skb(struct Qdisc *q, |
59 | struct sk_buff *skb, | 59 | struct sk_buff *skb, |
60 | const struct netdev_queue *txq) | 60 | const struct netdev_queue *txq, |
61 | int *packets) | ||
61 | { | 62 | { |
62 | int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; | 63 | int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; |
63 | 64 | ||
@@ -70,6 +71,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, | |||
70 | bytelimit -= nskb->len; /* covers GSO len */ | 71 | bytelimit -= nskb->len; /* covers GSO len */ |
71 | skb->next = nskb; | 72 | skb->next = nskb; |
72 | skb = nskb; | 73 | skb = nskb; |
74 | (*packets)++; /* GSO counts as one pkt */ | ||
73 | } | 75 | } |
74 | skb->next = NULL; | 76 | skb->next = NULL; |
75 | } | 77 | } |
@@ -77,11 +79,13 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, | |||
77 | /* Note that dequeue_skb can possibly return a SKB list (via skb->next). | 79 | /* Note that dequeue_skb can possibly return a SKB list (via skb->next). |
78 | * A requeued skb (via q->gso_skb) can also be a SKB list. | 80 | * A requeued skb (via q->gso_skb) can also be a SKB list. |
79 | */ | 81 | */ |
80 | static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate) | 82 | static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, |
83 | int *packets) | ||
81 | { | 84 | { |
82 | struct sk_buff *skb = q->gso_skb; | 85 | struct sk_buff *skb = q->gso_skb; |
83 | const struct netdev_queue *txq = q->dev_queue; | 86 | const struct netdev_queue *txq = q->dev_queue; |
84 | 87 | ||
88 | *packets = 1; | ||
85 | *validate = true; | 89 | *validate = true; |
86 | if (unlikely(skb)) { | 90 | if (unlikely(skb)) { |
87 | /* check the reason of requeuing without tx lock first */ | 91 | /* check the reason of requeuing without tx lock first */ |
@@ -98,7 +102,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate) | |||
98 | !netif_xmit_frozen_or_stopped(txq)) { | 102 | !netif_xmit_frozen_or_stopped(txq)) { |
99 | skb = q->dequeue(q); | 103 | skb = q->dequeue(q); |
100 | if (skb && qdisc_may_bulk(q)) | 104 | if (skb && qdisc_may_bulk(q)) |
101 | try_bulk_dequeue_skb(q, skb, txq); | 105 | try_bulk_dequeue_skb(q, skb, txq, packets); |
102 | } | 106 | } |
103 | } | 107 | } |
104 | return skb; | 108 | return skb; |
@@ -204,7 +208,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
204 | * >0 - queue is not empty. | 208 | * >0 - queue is not empty. |
205 | * | 209 | * |
206 | */ | 210 | */ |
207 | static inline int qdisc_restart(struct Qdisc *q) | 211 | static inline int qdisc_restart(struct Qdisc *q, int *packets) |
208 | { | 212 | { |
209 | struct netdev_queue *txq; | 213 | struct netdev_queue *txq; |
210 | struct net_device *dev; | 214 | struct net_device *dev; |
@@ -213,7 +217,7 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
213 | bool validate; | 217 | bool validate; |
214 | 218 | ||
215 | /* Dequeue packet */ | 219 | /* Dequeue packet */ |
216 | skb = dequeue_skb(q, &validate); | 220 | skb = dequeue_skb(q, &validate, packets); |
217 | if (unlikely(!skb)) | 221 | if (unlikely(!skb)) |
218 | return 0; | 222 | return 0; |
219 | 223 | ||
@@ -227,14 +231,16 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
227 | void __qdisc_run(struct Qdisc *q) | 231 | void __qdisc_run(struct Qdisc *q) |
228 | { | 232 | { |
229 | int quota = weight_p; | 233 | int quota = weight_p; |
234 | int packets; | ||
230 | 235 | ||
231 | while (qdisc_restart(q)) { | 236 | while (qdisc_restart(q, &packets)) { |
232 | /* | 237 | /* |
233 | * Ordered by possible occurrence: Postpone processing if | 238 | * Ordered by possible occurrence: Postpone processing if |
234 | * 1. we've exceeded packet quota | 239 | * 1. we've exceeded packet quota |
235 | * 2. another process needs the CPU; | 240 | * 2. another process needs the CPU; |
236 | */ | 241 | */ |
237 | if (--quota <= 0 || need_resched()) { | 242 | quota -= packets; |
243 | if (quota <= 0 || need_resched()) { | ||
238 | __netif_schedule(q); | 244 | __netif_schedule(q); |
239 | break; | 245 | break; |
240 | } | 246 | } |