diff options
57 files changed, 1421 insertions, 1206 deletions
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt index f09525772369..c010fafc66a8 100644 --- a/Documentation/devicetree/bindings/net/mediatek-net.txt +++ b/Documentation/devicetree/bindings/net/mediatek-net.txt | |||
| @@ -24,7 +24,6 @@ Required properties: | |||
| 24 | Optional properties: | 24 | Optional properties: |
| 25 | - interrupt-parent: Should be the phandle for the interrupt controller | 25 | - interrupt-parent: Should be the phandle for the interrupt controller |
| 26 | that services interrupts for this device | 26 | that services interrupts for this device |
| 27 | - mediatek,hwlro: the capability if the hardware supports LRO functions | ||
| 28 | 27 | ||
| 29 | * Ethernet MAC node | 28 | * Ethernet MAC node |
| 30 | 29 | ||
| @@ -54,7 +53,6 @@ eth: ethernet@1b100000 { | |||
| 54 | reset-names = "eth"; | 53 | reset-names = "eth"; |
| 55 | mediatek,ethsys = <ðsys>; | 54 | mediatek,ethsys = <ðsys>; |
| 56 | mediatek,pctl = <&syscfg_pctl_a>; | 55 | mediatek,pctl = <&syscfg_pctl_a>; |
| 57 | mediatek,hwlro; | ||
| 58 | #address-cells = <1>; | 56 | #address-cells = <1>; |
| 59 | #size-cells = <0>; | 57 | #size-cells = <0>; |
| 60 | 58 | ||
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt index f9c32adab5c6..c35b5b428a7f 100644 --- a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt +++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt | |||
| @@ -34,16 +34,17 @@ KSZ9031: | |||
| 34 | 34 | ||
| 35 | All skew control options are specified in picoseconds. The minimum | 35 | All skew control options are specified in picoseconds. The minimum |
| 36 | value is 0, and the maximum is property-dependent. The increment | 36 | value is 0, and the maximum is property-dependent. The increment |
| 37 | step is 60ps. | 37 | step is 60ps. The default value is the neutral setting, so setting |
| 38 | rxc-skew-ps=<0> actually results in -900 picoseconds adjustment. | ||
| 38 | 39 | ||
| 39 | Optional properties: | 40 | Optional properties: |
| 40 | 41 | ||
| 41 | Maximum value of 1860: | 42 | Maximum value of 1860, default value 900: |
| 42 | 43 | ||
| 43 | - rxc-skew-ps : Skew control of RX clock pad | 44 | - rxc-skew-ps : Skew control of RX clock pad |
| 44 | - txc-skew-ps : Skew control of TX clock pad | 45 | - txc-skew-ps : Skew control of TX clock pad |
| 45 | 46 | ||
| 46 | Maximum value of 900: | 47 | Maximum value of 900, default value 420: |
| 47 | 48 | ||
| 48 | - rxdv-skew-ps : Skew control of RX CTL pad | 49 | - rxdv-skew-ps : Skew control of RX CTL pad |
| 49 | - txen-skew-ps : Skew control of TX CTL pad | 50 | - txen-skew-ps : Skew control of TX CTL pad |
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index c8ac222eac67..b519503be51a 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt | |||
| @@ -10,6 +10,7 @@ Required properties: | |||
| 10 | "renesas,etheravb-r8a7793" if the device is a part of R8A7793 SoC. | 10 | "renesas,etheravb-r8a7793" if the device is a part of R8A7793 SoC. |
| 11 | "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC. | 11 | "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC. |
| 12 | "renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC. | 12 | "renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC. |
| 13 | "renesas,etheravb-r8a7796" if the device is a part of R8A7796 SoC. | ||
| 13 | "renesas,etheravb-rcar-gen2" for generic R-Car Gen 2 compatible interface. | 14 | "renesas,etheravb-rcar-gen2" for generic R-Car Gen 2 compatible interface. |
| 14 | "renesas,etheravb-rcar-gen3" for generic R-Car Gen 3 compatible interface. | 15 | "renesas,etheravb-rcar-gen3" for generic R-Car Gen 3 compatible interface. |
| 15 | 16 | ||
| @@ -33,7 +34,7 @@ Optional properties: | |||
| 33 | - interrupt-parent: the phandle for the interrupt controller that services | 34 | - interrupt-parent: the phandle for the interrupt controller that services |
| 34 | interrupts for this device. | 35 | interrupts for this device. |
| 35 | - interrupt-names: A list of interrupt names. | 36 | - interrupt-names: A list of interrupt names. |
| 36 | For the R8A7795 SoC this property is mandatory; | 37 | For the R8A779[56] SoCs this property is mandatory; |
| 37 | it should include one entry per channel, named "ch%u", | 38 | it should include one entry per channel, named "ch%u", |
| 38 | where %u is the channel number ranging from 0 to 24. | 39 | where %u is the channel number ranging from 0 to 24. |
| 39 | For other SoCs this property is optional; if present | 40 | For other SoCs this property is optional; if present |
diff --git a/MAINTAINERS b/MAINTAINERS index 4347bce8ada6..8b58a86988aa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -5033,6 +5033,13 @@ F: drivers/net/ethernet/freescale/fec_ptp.c | |||
| 5033 | F: drivers/net/ethernet/freescale/fec.h | 5033 | F: drivers/net/ethernet/freescale/fec.h |
| 5034 | F: Documentation/devicetree/bindings/net/fsl-fec.txt | 5034 | F: Documentation/devicetree/bindings/net/fsl-fec.txt |
| 5035 | 5035 | ||
| 5036 | FREESCALE QORIQ DPAA FMAN DRIVER | ||
| 5037 | M: Madalin Bucur <madalin.bucur@nxp.com> | ||
| 5038 | L: netdev@vger.kernel.org | ||
| 5039 | S: Maintained | ||
| 5040 | F: drivers/net/ethernet/freescale/fman | ||
| 5041 | F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt | ||
| 5042 | |||
| 5036 | FREESCALE QUICC ENGINE LIBRARY | 5043 | FREESCALE QUICC ENGINE LIBRARY |
| 5037 | L: linuxppc-dev@lists.ozlabs.org | 5044 | L: linuxppc-dev@lists.ozlabs.org |
| 5038 | S: Orphan | 5045 | S: Orphan |
| @@ -13128,6 +13135,7 @@ F: arch/arm64/include/asm/xen/ | |||
| 13128 | 13135 | ||
| 13129 | XEN NETWORK BACKEND DRIVER | 13136 | XEN NETWORK BACKEND DRIVER |
| 13130 | M: Wei Liu <wei.liu2@citrix.com> | 13137 | M: Wei Liu <wei.liu2@citrix.com> |
| 13138 | M: Paul Durrant <paul.durrant@citrix.com> | ||
| 13131 | L: xen-devel@lists.xenproject.org (moderated for non-subscribers) | 13139 | L: xen-devel@lists.xenproject.org (moderated for non-subscribers) |
| 13132 | L: netdev@vger.kernel.org | 13140 | L: netdev@vger.kernel.org |
| 13133 | S: Supported | 13141 | S: Supported |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 6bd63b84abd0..2f633df9f4e6 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
| @@ -314,6 +314,7 @@ static const struct usb_device_id blacklist_table[] = { | |||
| 314 | /* Marvell Bluetooth devices */ | 314 | /* Marvell Bluetooth devices */ |
| 315 | { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL }, | 315 | { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL }, |
| 316 | { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, | 316 | { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, |
| 317 | { USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL }, | ||
| 317 | 318 | ||
| 318 | /* Intel Bluetooth devices */ | 319 | /* Intel Bluetooth devices */ |
| 319 | { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, | 320 | { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, |
| @@ -1042,6 +1043,10 @@ static int btusb_open(struct hci_dev *hdev) | |||
| 1042 | 1043 | ||
| 1043 | BT_DBG("%s", hdev->name); | 1044 | BT_DBG("%s", hdev->name); |
| 1044 | 1045 | ||
| 1046 | err = usb_autopm_get_interface(data->intf); | ||
| 1047 | if (err < 0) | ||
| 1048 | return err; | ||
| 1049 | |||
| 1045 | /* Patching USB firmware files prior to starting any URBs of HCI path | 1050 | /* Patching USB firmware files prior to starting any URBs of HCI path |
| 1046 | * It is more safe to use USB bulk channel for downloading USB patch | 1051 | * It is more safe to use USB bulk channel for downloading USB patch |
| 1047 | */ | 1052 | */ |
| @@ -1051,10 +1056,6 @@ static int btusb_open(struct hci_dev *hdev) | |||
| 1051 | return err; | 1056 | return err; |
| 1052 | } | 1057 | } |
| 1053 | 1058 | ||
| 1054 | err = usb_autopm_get_interface(data->intf); | ||
| 1055 | if (err < 0) | ||
| 1056 | return err; | ||
| 1057 | |||
| 1058 | data->intf->needs_remote_wakeup = 1; | 1059 | data->intf->needs_remote_wakeup = 1; |
| 1059 | 1060 | ||
| 1060 | if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) | 1061 | if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 6ea0e5ff1e44..856379cbb402 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
| @@ -1046,7 +1046,7 @@ static void bgmac_enable(struct bgmac *bgmac) | |||
| 1046 | 1046 | ||
| 1047 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> | 1047 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> |
| 1048 | BGMAC_DS_MM_SHIFT; | 1048 | BGMAC_DS_MM_SHIFT; |
| 1049 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) | 1049 | if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0) |
| 1050 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); | 1050 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); |
| 1051 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) | 1051 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) |
| 1052 | bgmac_cco_ctl_maskset(bgmac, 1, ~0, | 1052 | bgmac_cco_ctl_maskset(bgmac, 1, ~0, |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 63144bb413d1..b32444a3ed79 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
| @@ -3117,6 +3117,7 @@ static int macb_remove(struct platform_device *pdev) | |||
| 3117 | if (dev->phydev) | 3117 | if (dev->phydev) |
| 3118 | phy_disconnect(dev->phydev); | 3118 | phy_disconnect(dev->phydev); |
| 3119 | mdiobus_unregister(bp->mii_bus); | 3119 | mdiobus_unregister(bp->mii_bus); |
| 3120 | dev->phydev = NULL; | ||
| 3120 | mdiobus_free(bp->mii_bus); | 3121 | mdiobus_free(bp->mii_bus); |
| 3121 | 3122 | ||
| 3122 | /* Shutdown the PHY if there is a GPIO reset */ | 3123 | /* Shutdown the PHY if there is a GPIO reset */ |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 9cffe48be156..1fb5d7239254 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -2728,6 +2728,26 @@ static int be_flash(struct be_adapter *adapter, const u8 *img, | |||
| 2728 | return 0; | 2728 | return 0; |
| 2729 | } | 2729 | } |
| 2730 | 2730 | ||
| 2731 | #define NCSI_UPDATE_LOG "NCSI section update is not supported in FW ver %s\n" | ||
| 2732 | static bool be_fw_ncsi_supported(char *ver) | ||
| 2733 | { | ||
| 2734 | int v1[4] = {3, 102, 148, 0}; /* Min ver that supports NCSI FW */ | ||
| 2735 | int v2[4]; | ||
| 2736 | int i; | ||
| 2737 | |||
| 2738 | if (sscanf(ver, "%d.%d.%d.%d", &v2[0], &v2[1], &v2[2], &v2[3]) != 4) | ||
| 2739 | return false; | ||
| 2740 | |||
| 2741 | for (i = 0; i < 4; i++) { | ||
| 2742 | if (v1[i] < v2[i]) | ||
| 2743 | return true; | ||
| 2744 | else if (v1[i] > v2[i]) | ||
| 2745 | return false; | ||
| 2746 | } | ||
| 2747 | |||
| 2748 | return true; | ||
| 2749 | } | ||
| 2750 | |||
| 2731 | /* For BE2, BE3 and BE3-R */ | 2751 | /* For BE2, BE3 and BE3-R */ |
| 2732 | static int be_flash_BEx(struct be_adapter *adapter, | 2752 | static int be_flash_BEx(struct be_adapter *adapter, |
| 2733 | const struct firmware *fw, | 2753 | const struct firmware *fw, |
| @@ -2805,8 +2825,10 @@ static int be_flash_BEx(struct be_adapter *adapter, | |||
| 2805 | continue; | 2825 | continue; |
| 2806 | 2826 | ||
| 2807 | if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && | 2827 | if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && |
| 2808 | memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) | 2828 | !be_fw_ncsi_supported(adapter->fw_ver)) { |
| 2829 | dev_info(dev, NCSI_UPDATE_LOG, adapter->fw_ver); | ||
| 2809 | continue; | 2830 | continue; |
| 2831 | } | ||
| 2810 | 2832 | ||
| 2811 | if (pflashcomp[i].optype == OPTYPE_PHY_FW && | 2833 | if (pflashcomp[i].optype == OPTYPE_PHY_FW && |
| 2812 | !phy_flashing_required(adapter)) | 2834 | !phy_flashing_required(adapter)) |
| @@ -3527,6 +3549,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) | |||
| 3527 | for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) | 3549 | for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) |
| 3528 | adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & | 3550 | adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & |
| 3529 | (BIT_MASK(16) - 1); | 3551 | (BIT_MASK(16) - 1); |
| 3552 | /* For BEx, since GET_FUNC_CONFIG command is not | ||
| 3553 | * supported, we read funcnum here as a workaround. | ||
| 3554 | */ | ||
| 3555 | if (BEx_chip(adapter)) | ||
| 3556 | adapter->pf_num = attribs->hba_attribs.pci_funcnum; | ||
| 3530 | } | 3557 | } |
| 3531 | 3558 | ||
| 3532 | err: | 3559 | err: |
| @@ -4950,7 +4977,7 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter, | |||
| 4950 | { | 4977 | { |
| 4951 | int status; | 4978 | int status; |
| 4952 | 4979 | ||
| 4953 | if (BEx_chip(adapter)) | 4980 | if (BE2_chip(adapter)) |
| 4954 | return -EOPNOTSUPP; | 4981 | return -EOPNOTSUPP; |
| 4955 | 4982 | ||
| 4956 | status = __be_cmd_set_logical_link_config(adapter, link_state, | 4983 | status = __be_cmd_set_logical_link_config(adapter, link_state, |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 1bd82bcb3be5..09da2d82c2f0 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
| @@ -1720,7 +1720,11 @@ struct mgmt_hba_attribs { | |||
| 1720 | u32 rsvd2[55]; | 1720 | u32 rsvd2[55]; |
| 1721 | u8 rsvd3[3]; | 1721 | u8 rsvd3[3]; |
| 1722 | u8 phy_port; | 1722 | u8 phy_port; |
| 1723 | u32 rsvd4[13]; | 1723 | u32 rsvd4[15]; |
| 1724 | u8 rsvd5[2]; | ||
| 1725 | u8 pci_funcnum; | ||
| 1726 | u8 rsvd6; | ||
| 1727 | u32 rsvd7[6]; | ||
| 1724 | } __packed; | 1728 | } __packed; |
| 1725 | 1729 | ||
| 1726 | struct mgmt_controller_attrib { | 1730 | struct mgmt_controller_attrib { |
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 92942c84d329..36e4232ed6b8 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2005 - 2015 Emulex | 2 | * Copyright (C) 2005-2016 Broadcom. |
| 3 | * All rights reserved. | 3 | * All rights reserved. |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index dcb930a52613..cece8a08edca 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -724,14 +724,24 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status) | |||
| 724 | netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down"); | 724 | netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down"); |
| 725 | } | 725 | } |
| 726 | 726 | ||
| 727 | static int be_gso_hdr_len(struct sk_buff *skb) | ||
| 728 | { | ||
| 729 | if (skb->encapsulation) | ||
| 730 | return skb_inner_transport_offset(skb) + | ||
| 731 | inner_tcp_hdrlen(skb); | ||
| 732 | return skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
| 733 | } | ||
| 734 | |||
| 727 | static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb) | 735 | static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb) |
| 728 | { | 736 | { |
| 729 | struct be_tx_stats *stats = tx_stats(txo); | 737 | struct be_tx_stats *stats = tx_stats(txo); |
| 730 | u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1; | 738 | u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1; |
| 739 | /* Account for headers which get duplicated in TSO pkt */ | ||
| 740 | u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0; | ||
| 731 | 741 | ||
| 732 | u64_stats_update_begin(&stats->sync); | 742 | u64_stats_update_begin(&stats->sync); |
| 733 | stats->tx_reqs++; | 743 | stats->tx_reqs++; |
| 734 | stats->tx_bytes += skb->len; | 744 | stats->tx_bytes += skb->len + dup_hdr_len; |
| 735 | stats->tx_pkts += tx_pkts; | 745 | stats->tx_pkts += tx_pkts; |
| 736 | if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) | 746 | if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) |
| 737 | stats->tx_vxlan_offload_pkts += tx_pkts; | 747 | stats->tx_vxlan_offload_pkts += tx_pkts; |
diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile index 51fd2e6c1b84..60491779e49f 100644 --- a/drivers/net/ethernet/freescale/fman/Makefile +++ b/drivers/net/ethernet/freescale/fman/Makefile | |||
| @@ -1,7 +1,9 @@ | |||
| 1 | subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman | 1 | subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman |
| 2 | 2 | ||
| 3 | obj-y += fsl_fman.o fsl_fman_mac.o fsl_mac.o | 3 | obj-$(CONFIG_FSL_FMAN) += fsl_fman.o |
| 4 | obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o | ||
| 5 | obj-$(CONFIG_FSL_FMAN) += fsl_mac.o | ||
| 4 | 6 | ||
| 5 | fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_port.o | 7 | fsl_fman-objs := fman_muram.o fman.o fman_sp.o |
| 6 | fsl_fman_mac-objs := fman_dtsec.o fman_memac.o fman_tgec.o | 8 | fsl_fman_port-objs := fman_port.o |
| 7 | fsl_mac-objs += mac.o | 9 | fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o |
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 1de2e1e51c2b..dafd9e1baba2 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c | |||
| @@ -618,7 +618,7 @@ struct fman { | |||
| 618 | unsigned long cam_offset; | 618 | unsigned long cam_offset; |
| 619 | size_t cam_size; | 619 | size_t cam_size; |
| 620 | /* Fifo in MURAM */ | 620 | /* Fifo in MURAM */ |
| 621 | int fifo_offset; | 621 | unsigned long fifo_offset; |
| 622 | size_t fifo_size; | 622 | size_t fifo_size; |
| 623 | 623 | ||
| 624 | u32 liodn_base[64]; | 624 | u32 liodn_base[64]; |
| @@ -2036,7 +2036,7 @@ static int fman_init(struct fman *fman) | |||
| 2036 | /* allocate MURAM for FIFO according to total size */ | 2036 | /* allocate MURAM for FIFO according to total size */ |
| 2037 | fman->fifo_offset = fman_muram_alloc(fman->muram, | 2037 | fman->fifo_offset = fman_muram_alloc(fman->muram, |
| 2038 | fman->state->total_fifo_size); | 2038 | fman->state->total_fifo_size); |
| 2039 | if (IS_ERR_VALUE(fman->cam_offset)) { | 2039 | if (IS_ERR_VALUE(fman->fifo_offset)) { |
| 2040 | free_init_resources(fman); | 2040 | free_init_resources(fman); |
| 2041 | dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n", | 2041 | dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n", |
| 2042 | __func__); | 2042 | __func__); |
| @@ -2115,6 +2115,7 @@ void fman_register_intr(struct fman *fman, enum fman_event_modules module, | |||
| 2115 | fman->intr_mng[event].isr_cb = isr_cb; | 2115 | fman->intr_mng[event].isr_cb = isr_cb; |
| 2116 | fman->intr_mng[event].src_handle = src_arg; | 2116 | fman->intr_mng[event].src_handle = src_arg; |
| 2117 | } | 2117 | } |
| 2118 | EXPORT_SYMBOL(fman_register_intr); | ||
| 2118 | 2119 | ||
| 2119 | /** | 2120 | /** |
| 2120 | * fman_unregister_intr | 2121 | * fman_unregister_intr |
| @@ -2138,6 +2139,7 @@ void fman_unregister_intr(struct fman *fman, enum fman_event_modules module, | |||
| 2138 | fman->intr_mng[event].isr_cb = NULL; | 2139 | fman->intr_mng[event].isr_cb = NULL; |
| 2139 | fman->intr_mng[event].src_handle = NULL; | 2140 | fman->intr_mng[event].src_handle = NULL; |
| 2140 | } | 2141 | } |
| 2142 | EXPORT_SYMBOL(fman_unregister_intr); | ||
| 2141 | 2143 | ||
| 2142 | /** | 2144 | /** |
| 2143 | * fman_set_port_params | 2145 | * fman_set_port_params |
| @@ -2241,6 +2243,7 @@ return_err: | |||
| 2241 | spin_unlock_irqrestore(&fman->spinlock, flags); | 2243 | spin_unlock_irqrestore(&fman->spinlock, flags); |
| 2242 | return err; | 2244 | return err; |
| 2243 | } | 2245 | } |
| 2246 | EXPORT_SYMBOL(fman_set_port_params); | ||
| 2244 | 2247 | ||
| 2245 | /** | 2248 | /** |
| 2246 | * fman_reset_mac | 2249 | * fman_reset_mac |
| @@ -2310,6 +2313,7 @@ int fman_reset_mac(struct fman *fman, u8 mac_id) | |||
| 2310 | 2313 | ||
| 2311 | return 0; | 2314 | return 0; |
| 2312 | } | 2315 | } |
| 2316 | EXPORT_SYMBOL(fman_reset_mac); | ||
| 2313 | 2317 | ||
| 2314 | /** | 2318 | /** |
| 2315 | * fman_set_mac_max_frame | 2319 | * fman_set_mac_max_frame |
| @@ -2327,8 +2331,7 @@ int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl) | |||
| 2327 | * or equal to the port's max | 2331 | * or equal to the port's max |
| 2328 | */ | 2332 | */ |
| 2329 | if ((!fman->state->port_mfl[mac_id]) || | 2333 | if ((!fman->state->port_mfl[mac_id]) || |
| 2330 | (fman->state->port_mfl[mac_id] && | 2334 | (mfl <= fman->state->port_mfl[mac_id])) { |
| 2331 | (mfl <= fman->state->port_mfl[mac_id]))) { | ||
| 2332 | fman->state->mac_mfl[mac_id] = mfl; | 2335 | fman->state->mac_mfl[mac_id] = mfl; |
| 2333 | } else { | 2336 | } else { |
| 2334 | dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n", | 2337 | dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n", |
| @@ -2337,6 +2340,7 @@ int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl) | |||
| 2337 | } | 2340 | } |
| 2338 | return 0; | 2341 | return 0; |
| 2339 | } | 2342 | } |
| 2343 | EXPORT_SYMBOL(fman_set_mac_max_frame); | ||
| 2340 | 2344 | ||
| 2341 | /** | 2345 | /** |
| 2342 | * fman_get_clock_freq | 2346 | * fman_get_clock_freq |
| @@ -2363,6 +2367,7 @@ u32 fman_get_bmi_max_fifo_size(struct fman *fman) | |||
| 2363 | { | 2367 | { |
| 2364 | return fman->state->bmi_max_fifo_size; | 2368 | return fman->state->bmi_max_fifo_size; |
| 2365 | } | 2369 | } |
| 2370 | EXPORT_SYMBOL(fman_get_bmi_max_fifo_size); | ||
| 2366 | 2371 | ||
| 2367 | /** | 2372 | /** |
| 2368 | * fman_get_revision | 2373 | * fman_get_revision |
| @@ -2384,6 +2389,7 @@ void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info) | |||
| 2384 | FPM_REV1_MAJOR_SHIFT); | 2389 | FPM_REV1_MAJOR_SHIFT); |
| 2385 | rev_info->minor = tmp & FPM_REV1_MINOR_MASK; | 2390 | rev_info->minor = tmp & FPM_REV1_MINOR_MASK; |
| 2386 | } | 2391 | } |
| 2392 | EXPORT_SYMBOL(fman_get_revision); | ||
| 2387 | 2393 | ||
| 2388 | /** | 2394 | /** |
| 2389 | * fman_get_qman_channel_id | 2395 | * fman_get_qman_channel_id |
| @@ -2419,6 +2425,7 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id) | |||
| 2419 | 2425 | ||
| 2420 | return fman->state->qman_channel_base + i; | 2426 | return fman->state->qman_channel_base + i; |
| 2421 | } | 2427 | } |
| 2428 | EXPORT_SYMBOL(fman_get_qman_channel_id); | ||
| 2422 | 2429 | ||
| 2423 | /** | 2430 | /** |
| 2424 | * fman_get_mem_region | 2431 | * fman_get_mem_region |
| @@ -2432,6 +2439,7 @@ struct resource *fman_get_mem_region(struct fman *fman) | |||
| 2432 | { | 2439 | { |
| 2433 | return fman->state->res; | 2440 | return fman->state->res; |
| 2434 | } | 2441 | } |
| 2442 | EXPORT_SYMBOL(fman_get_mem_region); | ||
| 2435 | 2443 | ||
| 2436 | /* Bootargs defines */ | 2444 | /* Bootargs defines */ |
| 2437 | /* Extra headroom for RX buffers - Default, min and max */ | 2445 | /* Extra headroom for RX buffers - Default, min and max */ |
| @@ -2453,7 +2461,7 @@ struct resource *fman_get_mem_region(struct fman *fman) | |||
| 2453 | * particular forwarding scenarios that add extra headers to the | 2461 | * particular forwarding scenarios that add extra headers to the |
| 2454 | * forwarded frame. | 2462 | * forwarded frame. |
| 2455 | */ | 2463 | */ |
| 2456 | int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; | 2464 | static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; |
| 2457 | module_param(fsl_fm_rx_extra_headroom, int, 0); | 2465 | module_param(fsl_fm_rx_extra_headroom, int, 0); |
| 2458 | MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); | 2466 | MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); |
| 2459 | 2467 | ||
| @@ -2466,7 +2474,7 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); | |||
| 2466 | * Could be overridden once, at boot-time, via the | 2474 | * Could be overridden once, at boot-time, via the |
| 2467 | * fm_set_max_frm() callback. | 2475 | * fm_set_max_frm() callback. |
| 2468 | */ | 2476 | */ |
| 2469 | int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; | 2477 | static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; |
| 2470 | module_param(fsl_fm_max_frm, int, 0); | 2478 | module_param(fsl_fm_max_frm, int, 0); |
| 2471 | MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces"); | 2479 | MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces"); |
| 2472 | 2480 | ||
| @@ -2538,6 +2546,7 @@ struct fman *fman_bind(struct device *fm_dev) | |||
| 2538 | { | 2546 | { |
| 2539 | return (struct fman *)(dev_get_drvdata(get_device(fm_dev))); | 2547 | return (struct fman *)(dev_get_drvdata(get_device(fm_dev))); |
| 2540 | } | 2548 | } |
| 2549 | EXPORT_SYMBOL(fman_bind); | ||
| 2541 | 2550 | ||
| 2542 | static irqreturn_t fman_err_irq(int irq, void *handle) | 2551 | static irqreturn_t fman_err_irq(int irq, void *handle) |
| 2543 | { | 2552 | { |
| @@ -2727,8 +2736,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) | |||
| 2727 | struct fman *fman; | 2736 | struct fman *fman; |
| 2728 | struct device_node *fm_node, *muram_node; | 2737 | struct device_node *fm_node, *muram_node; |
| 2729 | struct resource *res; | 2738 | struct resource *res; |
| 2730 | const u32 *u32_prop; | 2739 | u32 val, range[2]; |
| 2731 | int lenp, err, irq; | 2740 | int err, irq; |
| 2732 | struct clk *clk; | 2741 | struct clk *clk; |
| 2733 | u32 clk_rate; | 2742 | u32 clk_rate; |
| 2734 | phys_addr_t phys_base_addr; | 2743 | phys_addr_t phys_base_addr; |
| @@ -2740,16 +2749,13 @@ static struct fman *read_dts_node(struct platform_device *of_dev) | |||
| 2740 | 2749 | ||
| 2741 | fm_node = of_node_get(of_dev->dev.of_node); | 2750 | fm_node = of_node_get(of_dev->dev.of_node); |
| 2742 | 2751 | ||
| 2743 | u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp); | 2752 | err = of_property_read_u32(fm_node, "cell-index", &val); |
| 2744 | if (!u32_prop) { | 2753 | if (err) { |
| 2745 | dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n", | 2754 | dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n", |
| 2746 | __func__, fm_node->full_name); | 2755 | __func__, fm_node->full_name); |
| 2747 | goto fman_node_put; | 2756 | goto fman_node_put; |
| 2748 | } | 2757 | } |
| 2749 | if (WARN_ON(lenp != sizeof(u32))) | 2758 | fman->dts_params.id = (u8)val; |
| 2750 | goto fman_node_put; | ||
| 2751 | |||
| 2752 | fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]); | ||
| 2753 | 2759 | ||
| 2754 | /* Get the FM interrupt */ | 2760 | /* Get the FM interrupt */ |
| 2755 | res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0); | 2761 | res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0); |
| @@ -2796,18 +2802,15 @@ static struct fman *read_dts_node(struct platform_device *of_dev) | |||
| 2796 | /* Rounding to MHz */ | 2802 | /* Rounding to MHz */ |
| 2797 | fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000); | 2803 | fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000); |
| 2798 | 2804 | ||
| 2799 | u32_prop = (const u32 *)of_get_property(fm_node, | 2805 | err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range", |
| 2800 | "fsl,qman-channel-range", | 2806 | &range[0], 2); |
| 2801 | &lenp); | 2807 | if (err) { |
| 2802 | if (!u32_prop) { | 2808 | dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n", |
| 2803 | dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n", | ||
| 2804 | __func__, fm_node->full_name); | 2809 | __func__, fm_node->full_name); |
| 2805 | goto fman_node_put; | 2810 | goto fman_node_put; |
| 2806 | } | 2811 | } |
| 2807 | if (WARN_ON(lenp != sizeof(u32) * 2)) | 2812 | fman->dts_params.qman_channel_base = range[0]; |
| 2808 | goto fman_node_put; | 2813 | fman->dts_params.num_of_qman_channels = range[1]; |
| 2809 | fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]); | ||
| 2810 | fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]); | ||
| 2811 | 2814 | ||
| 2812 | /* Get the MURAM base address and size */ | 2815 | /* Get the MURAM base address and size */ |
| 2813 | muram_node = of_find_matching_node(fm_node, fman_muram_match); | 2816 | muram_node = of_find_matching_node(fm_node, fman_muram_match); |
| @@ -2858,7 +2861,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) | |||
| 2858 | 2861 | ||
| 2859 | fman->dts_params.base_addr = | 2862 | fman->dts_params.base_addr = |
| 2860 | devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); | 2863 | devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); |
| 2861 | if (fman->dts_params.base_addr == 0) { | 2864 | if (!fman->dts_params.base_addr) { |
| 2862 | dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); | 2865 | dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); |
| 2863 | goto fman_free; | 2866 | goto fman_free; |
| 2864 | } | 2867 | } |
| @@ -2930,7 +2933,7 @@ static const struct of_device_id fman_match[] = { | |||
| 2930 | {} | 2933 | {} |
| 2931 | }; | 2934 | }; |
| 2932 | 2935 | ||
| 2933 | MODULE_DEVICE_TABLE(of, fm_match); | 2936 | MODULE_DEVICE_TABLE(of, fman_match); |
| 2934 | 2937 | ||
| 2935 | static struct platform_driver fman_driver = { | 2938 | static struct platform_driver fman_driver = { |
| 2936 | .driver = { | 2939 | .driver = { |
| @@ -2940,4 +2943,25 @@ static struct platform_driver fman_driver = { | |||
| 2940 | .probe = fman_probe, | 2943 | .probe = fman_probe, |
| 2941 | }; | 2944 | }; |
| 2942 | 2945 | ||
| 2943 | builtin_platform_driver(fman_driver); | 2946 | static int __init fman_load(void) |
| 2947 | { | ||
| 2948 | int err; | ||
| 2949 | |||
| 2950 | pr_debug("FSL DPAA FMan driver\n"); | ||
| 2951 | |||
| 2952 | err = platform_driver_register(&fman_driver); | ||
| 2953 | if (err < 0) | ||
| 2954 | pr_err("Error, platform_driver_register() = %d\n", err); | ||
| 2955 | |||
| 2956 | return err; | ||
| 2957 | } | ||
| 2958 | module_init(fman_load); | ||
| 2959 | |||
| 2960 | static void __exit fman_unload(void) | ||
| 2961 | { | ||
| 2962 | platform_driver_unregister(&fman_driver); | ||
| 2963 | } | ||
| 2964 | module_exit(fman_unload); | ||
| 2965 | |||
| 2966 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 2967 | MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver"); | ||
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index ddf0260176c9..dd6d0526f6c1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h | |||
| @@ -191,10 +191,6 @@ struct fman_mac_params { | |||
| 191 | u16 max_speed; | 191 | u16 max_speed; |
| 192 | /* A handle to the FM object this port related to */ | 192 | /* A handle to the FM object this port related to */ |
| 193 | void *fm; | 193 | void *fm; |
| 194 | /* MDIO exceptions interrupt source - not valid for all | ||
| 195 | * MACs; MUST be set to 0 for MACs that don't have | ||
| 196 | * mdio-irq, or for polling | ||
| 197 | */ | ||
| 198 | void *dev_id; /* device cookie used by the exception cbs */ | 194 | void *dev_id; /* device cookie used by the exception cbs */ |
| 199 | fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */ | 195 | fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */ |
| 200 | fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */ | 196 | fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */ |
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 45e98fd8b79e..53ef51e3bd9e 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c | |||
| @@ -507,6 +507,9 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac, | |||
| 507 | { | 507 | { |
| 508 | u16 tmp_reg16; | 508 | u16 tmp_reg16; |
| 509 | 509 | ||
| 510 | if (WARN_ON(!memac->pcsphy)) | ||
| 511 | return; | ||
| 512 | |||
| 510 | /* SGMII mode */ | 513 | /* SGMII mode */ |
| 511 | tmp_reg16 = IF_MODE_SGMII_EN; | 514 | tmp_reg16 = IF_MODE_SGMII_EN; |
| 512 | if (!fixed_link) | 515 | if (!fixed_link) |
| @@ -1151,7 +1154,8 @@ struct fman_mac *memac_config(struct fman_mac_params *params) | |||
| 1151 | /* Save FMan revision */ | 1154 | /* Save FMan revision */ |
| 1152 | fman_get_revision(memac->fm, &memac->fm_rev_info); | 1155 | fman_get_revision(memac->fm, &memac->fm_rev_info); |
| 1153 | 1156 | ||
| 1154 | if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) { | 1157 | if (memac->phy_if == PHY_INTERFACE_MODE_SGMII || |
| 1158 | memac->phy_if == PHY_INTERFACE_MODE_QSGMII) { | ||
| 1155 | if (!params->internal_phy_node) { | 1159 | if (!params->internal_phy_node) { |
| 1156 | pr_err("PCS PHY node is not available\n"); | 1160 | pr_err("PCS PHY node is not available\n"); |
| 1157 | memac_free(memac); | 1161 | memac_free(memac); |
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c index 47394c45b6e8..5ec94d243da0 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.c +++ b/drivers/net/ethernet/freescale/fman/fman_muram.c | |||
| @@ -150,7 +150,8 @@ unsigned long fman_muram_alloc(struct muram_info *muram, size_t size) | |||
| 150 | * | 150 | * |
| 151 | * Free an allocated memory from FM-MURAM partition. | 151 | * Free an allocated memory from FM-MURAM partition. |
| 152 | */ | 152 | */ |
| 153 | void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size) | 153 | void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, |
| 154 | size_t size) | ||
| 154 | { | 155 | { |
| 155 | unsigned long addr = fman_muram_offset_to_vbase(muram, offset); | 156 | unsigned long addr = fman_muram_offset_to_vbase(muram, offset); |
| 156 | 157 | ||
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h index 889649ad8931..453bf849eee1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.h +++ b/drivers/net/ethernet/freescale/fman/fman_muram.h | |||
| @@ -46,6 +46,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, | |||
| 46 | 46 | ||
| 47 | unsigned long fman_muram_alloc(struct muram_info *muram, size_t size); | 47 | unsigned long fman_muram_alloc(struct muram_info *muram, size_t size); |
| 48 | 48 | ||
| 49 | void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size); | 49 | void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, |
| 50 | size_t size); | ||
| 50 | 51 | ||
| 51 | #endif /* __FM_MURAM_EXT */ | 52 | #endif /* __FM_MURAM_EXT */ |
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 70c198d072dc..9f3bb50a2365 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c | |||
| @@ -1477,7 +1477,8 @@ EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content); | |||
| 1477 | */ | 1477 | */ |
| 1478 | int fman_port_disable(struct fman_port *port) | 1478 | int fman_port_disable(struct fman_port *port) |
| 1479 | { | 1479 | { |
| 1480 | u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp; | 1480 | u32 __iomem *bmi_cfg_reg, *bmi_status_reg; |
| 1481 | u32 tmp; | ||
| 1481 | bool rx_port, failure = false; | 1482 | bool rx_port, failure = false; |
| 1482 | int count; | 1483 | int count; |
| 1483 | 1484 | ||
| @@ -1553,7 +1554,8 @@ EXPORT_SYMBOL(fman_port_disable); | |||
| 1553 | */ | 1554 | */ |
| 1554 | int fman_port_enable(struct fman_port *port) | 1555 | int fman_port_enable(struct fman_port *port) |
| 1555 | { | 1556 | { |
| 1556 | u32 __iomem *bmi_cfg_reg, tmp; | 1557 | u32 __iomem *bmi_cfg_reg; |
| 1558 | u32 tmp; | ||
| 1557 | bool rx_port; | 1559 | bool rx_port; |
| 1558 | 1560 | ||
| 1559 | if (!is_init_done(port->cfg)) | 1561 | if (!is_init_done(port->cfg)) |
| @@ -1623,7 +1625,7 @@ static int fman_port_probe(struct platform_device *of_dev) | |||
| 1623 | struct device_node *fm_node, *port_node; | 1625 | struct device_node *fm_node, *port_node; |
| 1624 | struct resource res; | 1626 | struct resource res; |
| 1625 | struct resource *dev_res; | 1627 | struct resource *dev_res; |
| 1626 | const u32 *u32_prop; | 1628 | u32 val; |
| 1627 | int err = 0, lenp; | 1629 | int err = 0, lenp; |
| 1628 | enum fman_port_type port_type; | 1630 | enum fman_port_type port_type; |
| 1629 | u16 port_speed; | 1631 | u16 port_speed; |
| @@ -1652,28 +1654,20 @@ static int fman_port_probe(struct platform_device *of_dev) | |||
| 1652 | goto return_err; | 1654 | goto return_err; |
| 1653 | } | 1655 | } |
| 1654 | 1656 | ||
| 1655 | u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp); | 1657 | err = of_property_read_u32(port_node, "cell-index", &val); |
| 1656 | if (!u32_prop) { | 1658 | if (err) { |
| 1657 | dev_err(port->dev, "%s: of_get_property(%s, cell-index) failed\n", | 1659 | dev_err(port->dev, "%s: reading cell-index for %s failed\n", |
| 1658 | __func__, port_node->full_name); | 1660 | __func__, port_node->full_name); |
| 1659 | err = -EINVAL; | 1661 | err = -EINVAL; |
| 1660 | goto return_err; | 1662 | goto return_err; |
| 1661 | } | 1663 | } |
| 1662 | if (WARN_ON(lenp != sizeof(u32))) { | 1664 | port_id = (u8)val; |
| 1663 | err = -EINVAL; | ||
| 1664 | goto return_err; | ||
| 1665 | } | ||
| 1666 | port_id = (u8)fdt32_to_cpu(u32_prop[0]); | ||
| 1667 | |||
| 1668 | port->dts_params.id = port_id; | 1665 | port->dts_params.id = port_id; |
| 1669 | 1666 | ||
| 1670 | if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) { | 1667 | if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) { |
| 1671 | port_type = FMAN_PORT_TYPE_TX; | 1668 | port_type = FMAN_PORT_TYPE_TX; |
| 1672 | port_speed = 1000; | 1669 | port_speed = 1000; |
| 1673 | u32_prop = (const u32 *)of_get_property(port_node, | 1670 | if (of_find_property(port_node, "fsl,fman-10g-port", &lenp)) |
| 1674 | "fsl,fman-10g-port", | ||
| 1675 | &lenp); | ||
| 1676 | if (u32_prop) | ||
| 1677 | port_speed = 10000; | 1671 | port_speed = 10000; |
| 1678 | 1672 | ||
| 1679 | } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) { | 1673 | } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) { |
| @@ -1686,9 +1680,7 @@ static int fman_port_probe(struct platform_device *of_dev) | |||
| 1686 | } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) { | 1680 | } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) { |
| 1687 | port_type = FMAN_PORT_TYPE_RX; | 1681 | port_type = FMAN_PORT_TYPE_RX; |
| 1688 | port_speed = 1000; | 1682 | port_speed = 1000; |
| 1689 | u32_prop = (const u32 *)of_get_property(port_node, | 1683 | if (of_find_property(port_node, "fsl,fman-10g-port", &lenp)) |
| 1690 | "fsl,fman-10g-port", &lenp); | ||
| 1691 | if (u32_prop) | ||
| 1692 | port_speed = 10000; | 1684 | port_speed = 10000; |
| 1693 | 1685 | ||
| 1694 | } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) { | 1686 | } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) { |
| @@ -1743,7 +1735,7 @@ static int fman_port_probe(struct platform_device *of_dev) | |||
| 1743 | 1735 | ||
| 1744 | port->dts_params.base_addr = devm_ioremap(port->dev, res.start, | 1736 | port->dts_params.base_addr = devm_ioremap(port->dev, res.start, |
| 1745 | resource_size(&res)); | 1737 | resource_size(&res)); |
| 1746 | if (port->dts_params.base_addr == 0) | 1738 | if (!port->dts_params.base_addr) |
| 1747 | dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__); | 1739 | dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__); |
| 1748 | 1740 | ||
| 1749 | dev_set_drvdata(&of_dev->dev, port); | 1741 | dev_set_drvdata(&of_dev->dev, port); |
| @@ -1775,4 +1767,25 @@ static struct platform_driver fman_port_driver = { | |||
| 1775 | .probe = fman_port_probe, | 1767 | .probe = fman_port_probe, |
| 1776 | }; | 1768 | }; |
| 1777 | 1769 | ||
| 1778 | builtin_platform_driver(fman_port_driver); | 1770 | static int __init fman_port_load(void) |
| 1771 | { | ||
| 1772 | int err; | ||
| 1773 | |||
| 1774 | pr_debug("FSL DPAA FMan driver\n"); | ||
| 1775 | |||
| 1776 | err = platform_driver_register(&fman_port_driver); | ||
| 1777 | if (err < 0) | ||
| 1778 | pr_err("Error, platform_driver_register() = %d\n", err); | ||
| 1779 | |||
| 1780 | return err; | ||
| 1781 | } | ||
| 1782 | module_init(fman_port_load); | ||
| 1783 | |||
| 1784 | static void __exit fman_port_unload(void) | ||
| 1785 | { | ||
| 1786 | platform_driver_unregister(&fman_port_driver); | ||
| 1787 | } | ||
| 1788 | module_exit(fman_port_unload); | ||
| 1789 | |||
| 1790 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 1791 | MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver"); | ||
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c index f9e7aa385cba..248f5bcca468 100644 --- a/drivers/net/ethernet/freescale/fman/fman_sp.c +++ b/drivers/net/ethernet/freescale/fman/fman_sp.c | |||
| @@ -80,6 +80,7 @@ void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools | |||
| 80 | } | 80 | } |
| 81 | } | 81 | } |
| 82 | } | 82 | } |
| 83 | EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes); | ||
| 83 | 84 | ||
| 84 | int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy * | 85 | int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy * |
| 85 | int_context_data_copy, | 86 | int_context_data_copy, |
| @@ -164,3 +165,5 @@ int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy * | |||
| 164 | 165 | ||
| 165 | return 0; | 166 | return 0; |
| 166 | } | 167 | } |
| 168 | EXPORT_SYMBOL(fman_sp_build_buffer_struct); | ||
| 169 | |||
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index e33d9d24c1db..8fe6b3e253fa 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c | |||
| @@ -469,9 +469,9 @@ static void adjust_link_memac(struct net_device *net_dev) | |||
| 469 | /* Initializes driver's PHY state, and attaches to the PHY. | 469 | /* Initializes driver's PHY state, and attaches to the PHY. |
| 470 | * Returns 0 on success. | 470 | * Returns 0 on success. |
| 471 | */ | 471 | */ |
| 472 | static int init_phy(struct net_device *net_dev, | 472 | static struct phy_device *init_phy(struct net_device *net_dev, |
| 473 | struct mac_device *mac_dev, | 473 | struct mac_device *mac_dev, |
| 474 | void (*adj_lnk)(struct net_device *)) | 474 | void (*adj_lnk)(struct net_device *)) |
| 475 | { | 475 | { |
| 476 | struct phy_device *phy_dev; | 476 | struct phy_device *phy_dev; |
| 477 | struct mac_priv_s *priv = mac_dev->priv; | 477 | struct mac_priv_s *priv = mac_dev->priv; |
| @@ -480,7 +480,7 @@ static int init_phy(struct net_device *net_dev, | |||
| 480 | priv->phy_if); | 480 | priv->phy_if); |
| 481 | if (!phy_dev) { | 481 | if (!phy_dev) { |
| 482 | netdev_err(net_dev, "Could not connect to PHY\n"); | 482 | netdev_err(net_dev, "Could not connect to PHY\n"); |
| 483 | return -ENODEV; | 483 | return NULL; |
| 484 | } | 484 | } |
| 485 | 485 | ||
| 486 | /* Remove any features not supported by the controller */ | 486 | /* Remove any features not supported by the controller */ |
| @@ -493,23 +493,23 @@ static int init_phy(struct net_device *net_dev, | |||
| 493 | 493 | ||
| 494 | mac_dev->phy_dev = phy_dev; | 494 | mac_dev->phy_dev = phy_dev; |
| 495 | 495 | ||
| 496 | return 0; | 496 | return phy_dev; |
| 497 | } | 497 | } |
| 498 | 498 | ||
| 499 | static int dtsec_init_phy(struct net_device *net_dev, | 499 | static struct phy_device *dtsec_init_phy(struct net_device *net_dev, |
| 500 | struct mac_device *mac_dev) | 500 | struct mac_device *mac_dev) |
| 501 | { | 501 | { |
| 502 | return init_phy(net_dev, mac_dev, &adjust_link_dtsec); | 502 | return init_phy(net_dev, mac_dev, &adjust_link_dtsec); |
| 503 | } | 503 | } |
| 504 | 504 | ||
| 505 | static int tgec_init_phy(struct net_device *net_dev, | 505 | static struct phy_device *tgec_init_phy(struct net_device *net_dev, |
| 506 | struct mac_device *mac_dev) | 506 | struct mac_device *mac_dev) |
| 507 | { | 507 | { |
| 508 | return init_phy(net_dev, mac_dev, adjust_link_void); | 508 | return init_phy(net_dev, mac_dev, adjust_link_void); |
| 509 | } | 509 | } |
| 510 | 510 | ||
| 511 | static int memac_init_phy(struct net_device *net_dev, | 511 | static struct phy_device *memac_init_phy(struct net_device *net_dev, |
| 512 | struct mac_device *mac_dev) | 512 | struct mac_device *mac_dev) |
| 513 | { | 513 | { |
| 514 | return init_phy(net_dev, mac_dev, &adjust_link_memac); | 514 | return init_phy(net_dev, mac_dev, &adjust_link_memac); |
| 515 | } | 515 | } |
| @@ -583,31 +583,6 @@ static void setup_memac(struct mac_device *mac_dev) | |||
| 583 | 583 | ||
| 584 | static DEFINE_MUTEX(eth_lock); | 584 | static DEFINE_MUTEX(eth_lock); |
| 585 | 585 | ||
| 586 | static const char phy_str[][11] = { | ||
| 587 | [PHY_INTERFACE_MODE_MII] = "mii", | ||
| 588 | [PHY_INTERFACE_MODE_GMII] = "gmii", | ||
| 589 | [PHY_INTERFACE_MODE_SGMII] = "sgmii", | ||
| 590 | [PHY_INTERFACE_MODE_TBI] = "tbi", | ||
| 591 | [PHY_INTERFACE_MODE_RMII] = "rmii", | ||
| 592 | [PHY_INTERFACE_MODE_RGMII] = "rgmii", | ||
| 593 | [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", | ||
| 594 | [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid", | ||
| 595 | [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", | ||
| 596 | [PHY_INTERFACE_MODE_RTBI] = "rtbi", | ||
| 597 | [PHY_INTERFACE_MODE_XGMII] = "xgmii" | ||
| 598 | }; | ||
| 599 | |||
| 600 | static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) | ||
| 601 | { | ||
| 602 | int i; | ||
| 603 | |||
| 604 | for (i = 0; i < ARRAY_SIZE(phy_str); i++) | ||
| 605 | if (strcmp(str, phy_str[i]) == 0) | ||
| 606 | return (phy_interface_t)i; | ||
| 607 | |||
| 608 | return PHY_INTERFACE_MODE_MII; | ||
| 609 | } | ||
| 610 | |||
| 611 | static const u16 phy2speed[] = { | 586 | static const u16 phy2speed[] = { |
| 612 | [PHY_INTERFACE_MODE_MII] = SPEED_100, | 587 | [PHY_INTERFACE_MODE_MII] = SPEED_100, |
| 613 | [PHY_INTERFACE_MODE_GMII] = SPEED_1000, | 588 | [PHY_INTERFACE_MODE_GMII] = SPEED_1000, |
| @@ -678,7 +653,7 @@ MODULE_DEVICE_TABLE(of, mac_match); | |||
| 678 | 653 | ||
| 679 | static int mac_probe(struct platform_device *_of_dev) | 654 | static int mac_probe(struct platform_device *_of_dev) |
| 680 | { | 655 | { |
| 681 | int err, i, lenp, nph; | 656 | int err, i, nph; |
| 682 | struct device *dev; | 657 | struct device *dev; |
| 683 | struct device_node *mac_node, *dev_node; | 658 | struct device_node *mac_node, *dev_node; |
| 684 | struct mac_device *mac_dev; | 659 | struct mac_device *mac_dev; |
| @@ -686,9 +661,9 @@ static int mac_probe(struct platform_device *_of_dev) | |||
| 686 | struct resource res; | 661 | struct resource res; |
| 687 | struct mac_priv_s *priv; | 662 | struct mac_priv_s *priv; |
| 688 | const u8 *mac_addr; | 663 | const u8 *mac_addr; |
| 689 | const char *char_prop; | 664 | u32 val; |
| 690 | const u32 *u32_prop; | ||
| 691 | u8 fman_id; | 665 | u8 fman_id; |
| 666 | int phy_if; | ||
| 692 | 667 | ||
| 693 | dev = &_of_dev->dev; | 668 | dev = &_of_dev->dev; |
| 694 | mac_node = dev->of_node; | 669 | mac_node = dev->of_node; |
| @@ -749,16 +724,15 @@ static int mac_probe(struct platform_device *_of_dev) | |||
| 749 | } | 724 | } |
| 750 | 725 | ||
| 751 | /* Get the FMan cell-index */ | 726 | /* Get the FMan cell-index */ |
| 752 | u32_prop = of_get_property(dev_node, "cell-index", &lenp); | 727 | err = of_property_read_u32(dev_node, "cell-index", &val); |
| 753 | if (!u32_prop) { | 728 | if (err) { |
| 754 | dev_err(dev, "of_get_property(%s, cell-index) failed\n", | 729 | dev_err(dev, "failed to read cell-index for %s\n", |
| 755 | dev_node->full_name); | 730 | dev_node->full_name); |
| 756 | err = -EINVAL; | 731 | err = -EINVAL; |
| 757 | goto _return_of_node_put; | 732 | goto _return_of_node_put; |
| 758 | } | 733 | } |
| 759 | WARN_ON(lenp != sizeof(u32)); | ||
| 760 | /* cell-index 0 => FMan id 1 */ | 734 | /* cell-index 0 => FMan id 1 */ |
| 761 | fman_id = (u8)(fdt32_to_cpu(u32_prop[0]) + 1); | 735 | fman_id = (u8)(val + 1); |
| 762 | 736 | ||
| 763 | priv->fman = fman_bind(&of_dev->dev); | 737 | priv->fman = fman_bind(&of_dev->dev); |
| 764 | if (!priv->fman) { | 738 | if (!priv->fman) { |
| @@ -805,15 +779,14 @@ static int mac_probe(struct platform_device *_of_dev) | |||
| 805 | } | 779 | } |
| 806 | 780 | ||
| 807 | /* Get the cell-index */ | 781 | /* Get the cell-index */ |
| 808 | u32_prop = of_get_property(mac_node, "cell-index", &lenp); | 782 | err = of_property_read_u32(mac_node, "cell-index", &val); |
| 809 | if (!u32_prop) { | 783 | if (err) { |
| 810 | dev_err(dev, "of_get_property(%s, cell-index) failed\n", | 784 | dev_err(dev, "failed to read cell-index for %s\n", |
| 811 | mac_node->full_name); | 785 | mac_node->full_name); |
| 812 | err = -EINVAL; | 786 | err = -EINVAL; |
| 813 | goto _return_dev_set_drvdata; | 787 | goto _return_dev_set_drvdata; |
| 814 | } | 788 | } |
| 815 | WARN_ON(lenp != sizeof(u32)); | 789 | priv->cell_index = (u8)val; |
| 816 | priv->cell_index = (u8)fdt32_to_cpu(u32_prop[0]); | ||
| 817 | 790 | ||
| 818 | /* Get the MAC address */ | 791 | /* Get the MAC address */ |
| 819 | mac_addr = of_get_mac_address(mac_node); | 792 | mac_addr = of_get_mac_address(mac_node); |
| @@ -870,16 +843,14 @@ static int mac_probe(struct platform_device *_of_dev) | |||
| 870 | } | 843 | } |
| 871 | 844 | ||
| 872 | /* Get the PHY connection type */ | 845 | /* Get the PHY connection type */ |
| 873 | char_prop = (const char *)of_get_property(mac_node, | 846 | phy_if = of_get_phy_mode(mac_node); |
| 874 | "phy-connection-type", NULL); | 847 | if (phy_if < 0) { |
| 875 | if (!char_prop) { | ||
| 876 | dev_warn(dev, | 848 | dev_warn(dev, |
| 877 | "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n", | 849 | "of_get_phy_mode() for %s failed. Defaulting to SGMII\n", |
| 878 | mac_node->full_name); | 850 | mac_node->full_name); |
| 879 | priv->phy_if = PHY_INTERFACE_MODE_MII; | 851 | phy_if = PHY_INTERFACE_MODE_SGMII; |
| 880 | } else { | ||
| 881 | priv->phy_if = str2phy(char_prop); | ||
| 882 | } | 852 | } |
| 853 | priv->phy_if = phy_if; | ||
| 883 | 854 | ||
| 884 | priv->speed = phy2speed[priv->phy_if]; | 855 | priv->speed = phy2speed[priv->phy_if]; |
| 885 | priv->max_speed = priv->speed; | 856 | priv->max_speed = priv->speed; |
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 0211cc9a46d6..d7313f0c5135 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h | |||
| @@ -58,7 +58,8 @@ struct mac_device { | |||
| 58 | bool tx_pause_active; | 58 | bool tx_pause_active; |
| 59 | bool promisc; | 59 | bool promisc; |
| 60 | 60 | ||
| 61 | int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev); | 61 | struct phy_device *(*init_phy)(struct net_device *net_dev, |
| 62 | struct mac_device *mac_dev); | ||
| 62 | int (*init)(struct mac_device *mac_dev); | 63 | int (*init)(struct mac_device *mac_dev); |
| 63 | int (*start)(struct mac_device *mac_dev); | 64 | int (*start)(struct mac_device *mac_dev); |
| 64 | int (*stop)(struct mac_device *mac_dev); | 65 | int (*stop)(struct mac_device *mac_dev); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a68eef0ee65f..22e141005cd9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | |||
| @@ -126,7 +126,7 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | |||
| 126 | (enum mac_speed)speed, duplex); | 126 | (enum mac_speed)speed, duplex); |
| 127 | if (ret) { | 127 | if (ret) { |
| 128 | dev_err(mac_cb->dev, | 128 | dev_err(mac_cb->dev, |
| 129 | "adjust_link failed,%s mac%d ret = %#x!\n", | 129 | "adjust_link failed, %s mac%d ret = %#x!\n", |
| 130 | mac_cb->dsaf_dev->ae_dev.name, | 130 | mac_cb->dsaf_dev->ae_dev.name, |
| 131 | mac_cb->mac_id, ret); | 131 | mac_cb->mac_id, ret); |
| 132 | return; | 132 | return; |
| @@ -149,7 +149,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, | |||
| 149 | if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) { | 149 | if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) { |
| 150 | if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) { | 150 | if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) { |
| 151 | dev_err(mac_cb->dev, | 151 | dev_err(mac_cb->dev, |
| 152 | "input invalid,%s mac%d vmid%d !\n", | 152 | "input invalid, %s mac%d vmid%d !\n", |
| 153 | mac_cb->dsaf_dev->ae_dev.name, | 153 | mac_cb->dsaf_dev->ae_dev.name, |
| 154 | mac_cb->mac_id, vmid); | 154 | mac_cb->mac_id, vmid); |
| 155 | return -EINVAL; | 155 | return -EINVAL; |
| @@ -157,19 +157,19 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, | |||
| 157 | } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) { | 157 | } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) { |
| 158 | if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) { | 158 | if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) { |
| 159 | dev_err(mac_cb->dev, | 159 | dev_err(mac_cb->dev, |
| 160 | "input invalid,%s mac%d vmid%d!\n", | 160 | "input invalid, %s mac%d vmid%d!\n", |
| 161 | mac_cb->dsaf_dev->ae_dev.name, | 161 | mac_cb->dsaf_dev->ae_dev.name, |
| 162 | mac_cb->mac_id, vmid); | 162 | mac_cb->mac_id, vmid); |
| 163 | return -EINVAL; | 163 | return -EINVAL; |
| 164 | } | 164 | } |
| 165 | } else { | 165 | } else { |
| 166 | dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n", | 166 | dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n", |
| 167 | mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id); | 167 | mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id); |
| 168 | return -EINVAL; | 168 | return -EINVAL; |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) { | 171 | if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) { |
| 172 | dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n", | 172 | dev_err(mac_cb->dev, "input invalid, %s mac%d vmid%d !\n", |
| 173 | mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid); | 173 | mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid); |
| 174 | return -EINVAL; | 174 | return -EINVAL; |
| 175 | } | 175 | } |
| @@ -196,7 +196,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, | |||
| 196 | tmp_port = vmid; | 196 | tmp_port = vmid; |
| 197 | break; | 197 | break; |
| 198 | default: | 198 | default: |
| 199 | dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n", | 199 | dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n", |
| 200 | mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id); | 200 | mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id); |
| 201 | return -EINVAL; | 201 | return -EINVAL; |
| 202 | } | 202 | } |
| @@ -275,7 +275,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb, | |||
| 275 | ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); | 275 | ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); |
| 276 | if (ret) { | 276 | if (ret) { |
| 277 | dev_err(dsaf_dev->dev, | 277 | dev_err(dsaf_dev->dev, |
| 278 | "set mac mc port failed,%s mac%d ret = %#x!\n", | 278 | "set mac mc port failed, %s mac%d ret = %#x!\n", |
| 279 | mac_cb->dsaf_dev->ae_dev.name, | 279 | mac_cb->dsaf_dev->ae_dev.name, |
| 280 | mac_cb->mac_id, ret); | 280 | mac_cb->mac_id, ret); |
| 281 | return ret; | 281 | return ret; |
| @@ -305,7 +305,7 @@ int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac) | |||
| 305 | old_mac = &mac_cb->addr_entry_idx[vfn]; | 305 | old_mac = &mac_cb->addr_entry_idx[vfn]; |
| 306 | } else { | 306 | } else { |
| 307 | dev_err(mac_cb->dev, | 307 | dev_err(mac_cb->dev, |
| 308 | "vf queue is too large,%s mac%d queue = %#x!\n", | 308 | "vf queue is too large, %s mac%d queue = %#x!\n", |
| 309 | mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn); | 309 | mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn); |
| 310 | return -EINVAL; | 310 | return -EINVAL; |
| 311 | } | 311 | } |
| @@ -547,7 +547,7 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable) | |||
| 547 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | 547 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); |
| 548 | 548 | ||
| 549 | if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) { | 549 | if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) { |
| 550 | dev_err(mac_cb->dev, "enable autoneg is not allowed!"); | 550 | dev_err(mac_cb->dev, "enabling autoneg is not allowed!\n"); |
| 551 | return -ENOTSUPP; | 551 | return -ENOTSUPP; |
| 552 | } | 552 | } |
| 553 | 553 | ||
| @@ -571,7 +571,7 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en) | |||
| 571 | 571 | ||
| 572 | if (mac_cb->mac_type == HNAE_PORT_DEBUG) { | 572 | if (mac_cb->mac_type == HNAE_PORT_DEBUG) { |
| 573 | if (is_ver1 && (tx_en || rx_en)) { | 573 | if (is_ver1 && (tx_en || rx_en)) { |
| 574 | dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!"); | 574 | dev_err(mac_cb->dev, "macv1 can't enable tx/rx_pause!\n"); |
| 575 | return -EINVAL; | 575 | return -EINVAL; |
| 576 | } | 576 | } |
| 577 | } | 577 | } |
| @@ -926,7 +926,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) | |||
| 926 | ret = hns_mac_get_mode(mac_cb->phy_if); | 926 | ret = hns_mac_get_mode(mac_cb->phy_if); |
| 927 | if (ret < 0) { | 927 | if (ret < 0) { |
| 928 | dev_err(dsaf_dev->dev, | 928 | dev_err(dsaf_dev->dev, |
| 929 | "hns_mac_get_mode failed,mac%d ret = %#x!\n", | 929 | "hns_mac_get_mode failed, mac%d ret = %#x!\n", |
| 930 | mac_cb->mac_id, ret); | 930 | mac_cb->mac_id, ret); |
| 931 | return ret; | 931 | return ret; |
| 932 | } | 932 | } |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index ad4ab979507b..4a62ffd7729d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
| @@ -2323,6 +2323,41 @@ free_netdev: | |||
| 2323 | return err; | 2323 | return err; |
| 2324 | } | 2324 | } |
| 2325 | 2325 | ||
| 2326 | static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id) | ||
| 2327 | { | ||
| 2328 | u32 val[2], id[4]; | ||
| 2329 | |||
| 2330 | regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]); | ||
| 2331 | regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]); | ||
| 2332 | |||
| 2333 | id[3] = ((val[0] >> 16) & 0xff) - '0'; | ||
| 2334 | id[2] = ((val[0] >> 24) & 0xff) - '0'; | ||
| 2335 | id[1] = (val[1] & 0xff) - '0'; | ||
| 2336 | id[0] = ((val[1] >> 8) & 0xff) - '0'; | ||
| 2337 | |||
| 2338 | *chip_id = (id[3] * 1000) + (id[2] * 100) + | ||
| 2339 | (id[1] * 10) + id[0]; | ||
| 2340 | |||
| 2341 | if (!(*chip_id)) { | ||
| 2342 | dev_err(eth->dev, "failed to get chip id\n"); | ||
| 2343 | return -ENODEV; | ||
| 2344 | } | ||
| 2345 | |||
| 2346 | dev_info(eth->dev, "chip id = %d\n", *chip_id); | ||
| 2347 | |||
| 2348 | return 0; | ||
| 2349 | } | ||
| 2350 | |||
| 2351 | static bool mtk_is_hwlro_supported(struct mtk_eth *eth) | ||
| 2352 | { | ||
| 2353 | switch (eth->chip_id) { | ||
| 2354 | case MT7623_ETH: | ||
| 2355 | return true; | ||
| 2356 | } | ||
| 2357 | |||
| 2358 | return false; | ||
| 2359 | } | ||
| 2360 | |||
| 2326 | static int mtk_probe(struct platform_device *pdev) | 2361 | static int mtk_probe(struct platform_device *pdev) |
| 2327 | { | 2362 | { |
| 2328 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2363 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| @@ -2362,8 +2397,6 @@ static int mtk_probe(struct platform_device *pdev) | |||
| 2362 | return PTR_ERR(eth->pctl); | 2397 | return PTR_ERR(eth->pctl); |
| 2363 | } | 2398 | } |
| 2364 | 2399 | ||
| 2365 | eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro"); | ||
| 2366 | |||
| 2367 | for (i = 0; i < 3; i++) { | 2400 | for (i = 0; i < 3; i++) { |
| 2368 | eth->irq[i] = platform_get_irq(pdev, i); | 2401 | eth->irq[i] = platform_get_irq(pdev, i); |
| 2369 | if (eth->irq[i] < 0) { | 2402 | if (eth->irq[i] < 0) { |
| @@ -2388,6 +2421,12 @@ static int mtk_probe(struct platform_device *pdev) | |||
| 2388 | if (err) | 2421 | if (err) |
| 2389 | return err; | 2422 | return err; |
| 2390 | 2423 | ||
| 2424 | err = mtk_get_chip_id(eth, ð->chip_id); | ||
| 2425 | if (err) | ||
| 2426 | return err; | ||
| 2427 | |||
| 2428 | eth->hwlro = mtk_is_hwlro_supported(eth); | ||
| 2429 | |||
| 2391 | for_each_child_of_node(pdev->dev.of_node, mac_np) { | 2430 | for_each_child_of_node(pdev->dev.of_node, mac_np) { |
| 2392 | if (!of_device_is_compatible(mac_np, | 2431 | if (!of_device_is_compatible(mac_np, |
| 2393 | "mediatek,eth-mac")) | 2432 | "mediatek,eth-mac")) |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 30031959d6de..99b1c8e9f16f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h | |||
| @@ -342,6 +342,11 @@ | |||
| 342 | #define GPIO_BIAS_CTRL 0xed0 | 342 | #define GPIO_BIAS_CTRL 0xed0 |
| 343 | #define GPIO_DRV_SEL10 0xf00 | 343 | #define GPIO_DRV_SEL10 0xf00 |
| 344 | 344 | ||
| 345 | /* ethernet subsystem chip id register */ | ||
| 346 | #define ETHSYS_CHIPID0_3 0x0 | ||
| 347 | #define ETHSYS_CHIPID4_7 0x4 | ||
| 348 | #define MT7623_ETH 7623 | ||
| 349 | |||
| 345 | /* ethernet subsystem config register */ | 350 | /* ethernet subsystem config register */ |
| 346 | #define ETHSYS_SYSCFG0 0x14 | 351 | #define ETHSYS_SYSCFG0 0x14 |
| 347 | #define SYSCFG0_GE_MASK 0x3 | 352 | #define SYSCFG0_GE_MASK 0x3 |
| @@ -534,6 +539,7 @@ struct mtk_eth { | |||
| 534 | unsigned long sysclk; | 539 | unsigned long sysclk; |
| 535 | struct regmap *ethsys; | 540 | struct regmap *ethsys; |
| 536 | struct regmap *pctl; | 541 | struct regmap *pctl; |
| 542 | u32 chip_id; | ||
| 537 | bool hwlro; | 543 | bool hwlro; |
| 538 | atomic_t dma_refcnt; | 544 | atomic_t dma_refcnt; |
| 539 | struct mtk_tx_ring tx_ring; | 545 | struct mtk_tx_ring tx_ring; |
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig index 9ba568db576f..d7720bf92d49 100644 --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig | |||
| @@ -26,6 +26,7 @@ config QCA7000 | |||
| 26 | 26 | ||
| 27 | config QCOM_EMAC | 27 | config QCOM_EMAC |
| 28 | tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support" | 28 | tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support" |
| 29 | depends on HAS_DMA && HAS_IOMEM | ||
| 29 | select CRC32 | 30 | select CRC32 |
| 30 | select PHYLIB | 31 | select PHYLIB |
| 31 | ---help--- | 32 | ---help--- |
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index c3e85acfdc70..054a8dd23dae 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c | |||
| @@ -30,6 +30,8 @@ | |||
| 30 | 30 | ||
| 31 | #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7) | 31 | #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7) |
| 32 | #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6) | 32 | #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6) |
| 33 | #define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5) | ||
| 34 | #define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4) | ||
| 33 | 35 | ||
| 34 | #define GMII_SEL_MODE_MASK 0x3 | 36 | #define GMII_SEL_MODE_MASK 0x3 |
| 35 | 37 | ||
| @@ -48,6 +50,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, | |||
| 48 | u32 reg; | 50 | u32 reg; |
| 49 | u32 mask; | 51 | u32 mask; |
| 50 | u32 mode = 0; | 52 | u32 mode = 0; |
| 53 | bool rgmii_id = false; | ||
| 51 | 54 | ||
| 52 | reg = readl(priv->gmii_sel); | 55 | reg = readl(priv->gmii_sel); |
| 53 | 56 | ||
| @@ -57,10 +60,14 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, | |||
| 57 | break; | 60 | break; |
| 58 | 61 | ||
| 59 | case PHY_INTERFACE_MODE_RGMII: | 62 | case PHY_INTERFACE_MODE_RGMII: |
| 63 | mode = AM33XX_GMII_SEL_MODE_RGMII; | ||
| 64 | break; | ||
| 65 | |||
| 60 | case PHY_INTERFACE_MODE_RGMII_ID: | 66 | case PHY_INTERFACE_MODE_RGMII_ID: |
| 61 | case PHY_INTERFACE_MODE_RGMII_RXID: | 67 | case PHY_INTERFACE_MODE_RGMII_RXID: |
| 62 | case PHY_INTERFACE_MODE_RGMII_TXID: | 68 | case PHY_INTERFACE_MODE_RGMII_TXID: |
| 63 | mode = AM33XX_GMII_SEL_MODE_RGMII; | 69 | mode = AM33XX_GMII_SEL_MODE_RGMII; |
| 70 | rgmii_id = true; | ||
| 64 | break; | 71 | break; |
| 65 | 72 | ||
| 66 | default: | 73 | default: |
| @@ -83,6 +90,13 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, | |||
| 83 | mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN; | 90 | mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN; |
| 84 | } | 91 | } |
| 85 | 92 | ||
| 93 | if (rgmii_id) { | ||
| 94 | if (slave == 0) | ||
| 95 | mode |= AM33XX_GMII_SEL_RGMII1_IDMODE; | ||
| 96 | else | ||
| 97 | mode |= AM33XX_GMII_SEL_RGMII2_IDMODE; | ||
| 98 | } | ||
| 99 | |||
| 86 | reg &= ~mask; | 100 | reg &= ~mask; |
| 87 | reg |= mode; | 101 | reg |= mode; |
| 88 | 102 | ||
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index bc258d7e41df..272f2b1cb7ad 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c | |||
| @@ -1769,7 +1769,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev) | |||
| 1769 | gelic_ether_setup_netdev_ops(netdev, &card->napi); | 1769 | gelic_ether_setup_netdev_ops(netdev, &card->napi); |
| 1770 | result = gelic_net_setup_netdev(netdev, card); | 1770 | result = gelic_net_setup_netdev(netdev, card); |
| 1771 | if (result) { | 1771 | if (result) { |
| 1772 | dev_dbg(&dev->core, "%s: setup_netdev failed %d", | 1772 | dev_dbg(&dev->core, "%s: setup_netdev failed %d\n", |
| 1773 | __func__, result); | 1773 | __func__, result); |
| 1774 | goto fail_setup_netdev; | 1774 | goto fail_setup_netdev; |
| 1775 | } | 1775 | } |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 69e2a833a84f..35f9f9742a48 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
| @@ -818,7 +818,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) | |||
| 818 | goto out; | 818 | goto out; |
| 819 | } | 819 | } |
| 820 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) | 820 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) |
| 821 | dev_err(&ndev->dev, "No interrupts asserted in Tx path"); | 821 | dev_err(&ndev->dev, "No interrupts asserted in Tx path\n"); |
| 822 | if (status & XAXIDMA_IRQ_ERROR_MASK) { | 822 | if (status & XAXIDMA_IRQ_ERROR_MASK) { |
| 823 | dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); | 823 | dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); |
| 824 | dev_err(&ndev->dev, "Current BD is at: 0x%x\n", | 824 | dev_err(&ndev->dev, "Current BD is at: 0x%x\n", |
| @@ -867,7 +867,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) | |||
| 867 | goto out; | 867 | goto out; |
| 868 | } | 868 | } |
| 869 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) | 869 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) |
| 870 | dev_err(&ndev->dev, "No interrupts asserted in Rx path"); | 870 | dev_err(&ndev->dev, "No interrupts asserted in Rx path\n"); |
| 871 | if (status & XAXIDMA_IRQ_ERROR_MASK) { | 871 | if (status & XAXIDMA_IRQ_ERROR_MASK) { |
| 872 | dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); | 872 | dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); |
| 873 | dev_err(&ndev->dev, "Current BD is at: 0x%x\n", | 873 | dev_err(&ndev->dev, "Current BD is at: 0x%x\n", |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 5078a0d0db64..2651c8d8de2f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
| @@ -142,6 +142,7 @@ config MDIO_THUNDER | |||
| 142 | 142 | ||
| 143 | config MDIO_XGENE | 143 | config MDIO_XGENE |
| 144 | tristate "APM X-Gene SoC MDIO bus controller" | 144 | tristate "APM X-Gene SoC MDIO bus controller" |
| 145 | depends on ARCH_XGENE || COMPILE_TEST | ||
| 145 | help | 146 | help |
| 146 | This module provides a driver for the MDIO busses found in the | 147 | This module provides a driver for the MDIO busses found in the |
| 147 | APM X-Gene SoC's. | 148 | APM X-Gene SoC's. |
| @@ -320,13 +321,6 @@ config XILINX_GMII2RGMII | |||
| 320 | the Reduced Gigabit Media Independent Interface(RGMII) between | 321 | the Reduced Gigabit Media Independent Interface(RGMII) between |
| 321 | Ethernet physical media devices and the Gigabit Ethernet controller. | 322 | Ethernet physical media devices and the Gigabit Ethernet controller. |
| 322 | 323 | ||
| 323 | config MDIO_XGENE | ||
| 324 | tristate "APM X-Gene SoC MDIO bus controller" | ||
| 325 | depends on ARCH_XGENE || COMPILE_TEST | ||
| 326 | help | ||
| 327 | This module provides a driver for the MDIO busses found in the | ||
| 328 | APM X-Gene SoC's. | ||
| 329 | |||
| 330 | endif # PHYLIB | 324 | endif # PHYLIB |
| 331 | 325 | ||
| 332 | config MICREL_KS8995MA | 326 | config MICREL_KS8995MA |
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index a17573e3bd8a..77a6671d572e 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/phy.h> | 13 | #include <linux/phy.h> |
| 14 | #include <linux/of.h> | 14 | #include <linux/of.h> |
| 15 | #include <dt-bindings/net/mscc-phy-vsc8531.h> | 15 | #include <dt-bindings/net/mscc-phy-vsc8531.h> |
| 16 | #include <linux/netdevice.h> | ||
| 16 | 17 | ||
| 17 | enum rgmii_rx_clock_delay { | 18 | enum rgmii_rx_clock_delay { |
| 18 | RGMII_RX_CLK_DELAY_0_2_NS = 0, | 19 | RGMII_RX_CLK_DELAY_0_2_NS = 0, |
| @@ -37,6 +38,7 @@ enum rgmii_rx_clock_delay { | |||
| 37 | 38 | ||
| 38 | #define MII_VSC85XX_INT_MASK 25 | 39 | #define MII_VSC85XX_INT_MASK 25 |
| 39 | #define MII_VSC85XX_INT_MASK_MASK 0xa000 | 40 | #define MII_VSC85XX_INT_MASK_MASK 0xa000 |
| 41 | #define MII_VSC85XX_INT_MASK_WOL 0x0040 | ||
| 40 | #define MII_VSC85XX_INT_STATUS 26 | 42 | #define MII_VSC85XX_INT_STATUS 26 |
| 41 | 43 | ||
| 42 | #define MSCC_PHY_WOL_MAC_CONTROL 27 | 44 | #define MSCC_PHY_WOL_MAC_CONTROL 27 |
| @@ -52,6 +54,17 @@ enum rgmii_rx_clock_delay { | |||
| 52 | #define RGMII_RX_CLK_DELAY_MASK 0x0070 | 54 | #define RGMII_RX_CLK_DELAY_MASK 0x0070 |
| 53 | #define RGMII_RX_CLK_DELAY_POS 4 | 55 | #define RGMII_RX_CLK_DELAY_POS 4 |
| 54 | 56 | ||
| 57 | #define MSCC_PHY_WOL_LOWER_MAC_ADDR 21 | ||
| 58 | #define MSCC_PHY_WOL_MID_MAC_ADDR 22 | ||
| 59 | #define MSCC_PHY_WOL_UPPER_MAC_ADDR 23 | ||
| 60 | #define MSCC_PHY_WOL_LOWER_PASSWD 24 | ||
| 61 | #define MSCC_PHY_WOL_MID_PASSWD 25 | ||
| 62 | #define MSCC_PHY_WOL_UPPER_PASSWD 26 | ||
| 63 | |||
| 64 | #define MSCC_PHY_WOL_MAC_CONTROL 27 | ||
| 65 | #define SECURE_ON_ENABLE 0x8000 | ||
| 66 | #define SECURE_ON_PASSWD_LEN_4 0x4000 | ||
| 67 | |||
| 55 | /* Microsemi PHY ID's */ | 68 | /* Microsemi PHY ID's */ |
| 56 | #define PHY_ID_VSC8531 0x00070570 | 69 | #define PHY_ID_VSC8531 0x00070570 |
| 57 | #define PHY_ID_VSC8541 0x00070770 | 70 | #define PHY_ID_VSC8541 0x00070770 |
| @@ -81,6 +94,117 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page) | |||
| 81 | return rc; | 94 | return rc; |
| 82 | } | 95 | } |
| 83 | 96 | ||
| 97 | static int vsc85xx_wol_set(struct phy_device *phydev, | ||
| 98 | struct ethtool_wolinfo *wol) | ||
| 99 | { | ||
| 100 | int rc; | ||
| 101 | u16 reg_val; | ||
| 102 | u8 i; | ||
| 103 | u16 pwd[3] = {0, 0, 0}; | ||
| 104 | struct ethtool_wolinfo *wol_conf = wol; | ||
| 105 | u8 *mac_addr = phydev->attached_dev->dev_addr; | ||
| 106 | |||
| 107 | mutex_lock(&phydev->lock); | ||
| 108 | rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); | ||
| 109 | if (rc != 0) | ||
| 110 | goto out_unlock; | ||
| 111 | |||
| 112 | if (wol->wolopts & WAKE_MAGIC) { | ||
| 113 | /* Store the device address for the magic packet */ | ||
| 114 | for (i = 0; i < ARRAY_SIZE(pwd); i++) | ||
| 115 | pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 | | ||
| 116 | mac_addr[5 - i * 2]; | ||
| 117 | phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]); | ||
| 118 | phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]); | ||
| 119 | phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]); | ||
| 120 | } else { | ||
| 121 | phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0); | ||
| 122 | phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0); | ||
| 123 | phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0); | ||
| 124 | } | ||
| 125 | |||
| 126 | if (wol_conf->wolopts & WAKE_MAGICSECURE) { | ||
| 127 | for (i = 0; i < ARRAY_SIZE(pwd); i++) | ||
| 128 | pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 | | ||
| 129 | wol_conf->sopass[5 - i * 2]; | ||
| 130 | phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]); | ||
| 131 | phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]); | ||
| 132 | phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]); | ||
| 133 | } else { | ||
| 134 | phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0); | ||
| 135 | phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0); | ||
| 136 | phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0); | ||
| 137 | } | ||
| 138 | |||
| 139 | reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); | ||
| 140 | if (wol_conf->wolopts & WAKE_MAGICSECURE) | ||
| 141 | reg_val |= SECURE_ON_ENABLE; | ||
| 142 | else | ||
| 143 | reg_val &= ~SECURE_ON_ENABLE; | ||
| 144 | phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val); | ||
| 145 | |||
| 146 | rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); | ||
| 147 | if (rc != 0) | ||
| 148 | goto out_unlock; | ||
| 149 | |||
| 150 | if (wol->wolopts & WAKE_MAGIC) { | ||
| 151 | /* Enable the WOL interrupt */ | ||
| 152 | reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK); | ||
| 153 | reg_val |= MII_VSC85XX_INT_MASK_WOL; | ||
| 154 | rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); | ||
| 155 | if (rc != 0) | ||
| 156 | goto out_unlock; | ||
| 157 | } else { | ||
| 158 | /* Disable the WOL interrupt */ | ||
| 159 | reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK); | ||
| 160 | reg_val &= (~MII_VSC85XX_INT_MASK_WOL); | ||
| 161 | rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); | ||
| 162 | if (rc != 0) | ||
| 163 | goto out_unlock; | ||
| 164 | } | ||
| 165 | /* Clear WOL iterrupt status */ | ||
| 166 | reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS); | ||
| 167 | |||
| 168 | out_unlock: | ||
| 169 | mutex_unlock(&phydev->lock); | ||
| 170 | |||
| 171 | return rc; | ||
| 172 | } | ||
| 173 | |||
| 174 | static void vsc85xx_wol_get(struct phy_device *phydev, | ||
| 175 | struct ethtool_wolinfo *wol) | ||
| 176 | { | ||
| 177 | int rc; | ||
| 178 | u16 reg_val; | ||
| 179 | u8 i; | ||
| 180 | u16 pwd[3] = {0, 0, 0}; | ||
| 181 | struct ethtool_wolinfo *wol_conf = wol; | ||
| 182 | |||
| 183 | mutex_lock(&phydev->lock); | ||
| 184 | rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); | ||
| 185 | if (rc != 0) | ||
| 186 | goto out_unlock; | ||
| 187 | |||
| 188 | reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); | ||
| 189 | if (reg_val & SECURE_ON_ENABLE) | ||
| 190 | wol_conf->wolopts |= WAKE_MAGICSECURE; | ||
| 191 | if (wol_conf->wolopts & WAKE_MAGICSECURE) { | ||
| 192 | pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD); | ||
| 193 | pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD); | ||
| 194 | pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD); | ||
| 195 | for (i = 0; i < ARRAY_SIZE(pwd); i++) { | ||
| 196 | wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff; | ||
| 197 | wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00) | ||
| 198 | >> 8; | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); | ||
| 203 | |||
| 204 | out_unlock: | ||
| 205 | mutex_unlock(&phydev->lock); | ||
| 206 | } | ||
| 207 | |||
| 84 | static u8 edge_rate_magic_get(u16 vddmac, | 208 | static u8 edge_rate_magic_get(u16 vddmac, |
| 85 | int slowdown) | 209 | int slowdown) |
| 86 | { | 210 | { |
| @@ -301,6 +425,8 @@ static struct phy_driver vsc85xx_driver[] = { | |||
| 301 | .suspend = &genphy_suspend, | 425 | .suspend = &genphy_suspend, |
| 302 | .resume = &genphy_resume, | 426 | .resume = &genphy_resume, |
| 303 | .probe = &vsc85xx_probe, | 427 | .probe = &vsc85xx_probe, |
| 428 | .set_wol = &vsc85xx_wol_set, | ||
| 429 | .get_wol = &vsc85xx_wol_get, | ||
| 304 | }, | 430 | }, |
| 305 | { | 431 | { |
| 306 | .phy_id = PHY_ID_VSC8541, | 432 | .phy_id = PHY_ID_VSC8541, |
| @@ -318,6 +444,8 @@ static struct phy_driver vsc85xx_driver[] = { | |||
| 318 | .suspend = &genphy_suspend, | 444 | .suspend = &genphy_suspend, |
| 319 | .resume = &genphy_resume, | 445 | .resume = &genphy_resume, |
| 320 | .probe = &vsc85xx_probe, | 446 | .probe = &vsc85xx_probe, |
| 447 | .set_wol = &vsc85xx_wol_set, | ||
| 448 | .get_wol = &vsc85xx_wol_get, | ||
| 321 | } | 449 | } |
| 322 | 450 | ||
| 323 | }; | 451 | }; |
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 5fbf83d5aa57..65647533b401 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c | |||
| @@ -295,11 +295,11 @@ free_ucc_pram: | |||
| 295 | qe_muram_free(priv->ucc_pram_offset); | 295 | qe_muram_free(priv->ucc_pram_offset); |
| 296 | free_tx_bd: | 296 | free_tx_bd: |
| 297 | dma_free_coherent(priv->dev, | 297 | dma_free_coherent(priv->dev, |
| 298 | TX_BD_RING_LEN * sizeof(struct qe_bd), | 298 | TX_BD_RING_LEN * sizeof(struct qe_bd *), |
| 299 | priv->tx_bd_base, priv->dma_tx_bd); | 299 | priv->tx_bd_base, priv->dma_tx_bd); |
| 300 | free_rx_bd: | 300 | free_rx_bd: |
| 301 | dma_free_coherent(priv->dev, | 301 | dma_free_coherent(priv->dev, |
| 302 | RX_BD_RING_LEN * sizeof(struct qe_bd), | 302 | RX_BD_RING_LEN * sizeof(struct qe_bd *), |
| 303 | priv->rx_bd_base, priv->dma_rx_bd); | 303 | priv->rx_bd_base, priv->dma_rx_bd); |
| 304 | free_uccf: | 304 | free_uccf: |
| 305 | ucc_fast_free(priv->uccf); | 305 | ucc_fast_free(priv->uccf); |
| @@ -688,7 +688,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv) | |||
| 688 | 688 | ||
| 689 | if (priv->rx_bd_base) { | 689 | if (priv->rx_bd_base) { |
| 690 | dma_free_coherent(priv->dev, | 690 | dma_free_coherent(priv->dev, |
| 691 | RX_BD_RING_LEN * sizeof(struct qe_bd), | 691 | RX_BD_RING_LEN * sizeof(struct qe_bd *), |
| 692 | priv->rx_bd_base, priv->dma_rx_bd); | 692 | priv->rx_bd_base, priv->dma_rx_bd); |
| 693 | 693 | ||
| 694 | priv->rx_bd_base = NULL; | 694 | priv->rx_bd_base = NULL; |
| @@ -697,7 +697,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv) | |||
| 697 | 697 | ||
| 698 | if (priv->tx_bd_base) { | 698 | if (priv->tx_bd_base) { |
| 699 | dma_free_coherent(priv->dev, | 699 | dma_free_coherent(priv->dev, |
| 700 | TX_BD_RING_LEN * sizeof(struct qe_bd), | 700 | TX_BD_RING_LEN * sizeof(struct qe_bd *), |
| 701 | priv->tx_bd_base, priv->dma_tx_bd); | 701 | priv->tx_bd_base, priv->dma_tx_bd); |
| 702 | 702 | ||
| 703 | priv->tx_bd_base = NULL; | 703 | priv->tx_bd_base = NULL; |
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile index 11e02be9db1a..d49798a46b51 100644 --- a/drivers/net/xen-netback/Makefile +++ b/drivers/net/xen-netback/Makefile | |||
| @@ -1,3 +1,3 @@ | |||
| 1 | obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o | 1 | obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o |
| 2 | 2 | ||
| 3 | xen-netback-y := netback.o xenbus.o interface.o hash.o | 3 | xen-netback-y := netback.o xenbus.o interface.o hash.o rx.o |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index b38fb2cf3364..cf68149cbb55 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
| @@ -91,13 +91,6 @@ struct xenvif_rx_meta { | |||
| 91 | */ | 91 | */ |
| 92 | #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) | 92 | #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) |
| 93 | 93 | ||
| 94 | /* It's possible for an skb to have a maximal number of frags | ||
| 95 | * but still be less than MAX_BUFFER_OFFSET in size. Thus the | ||
| 96 | * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per | ||
| 97 | * ring slot. | ||
| 98 | */ | ||
| 99 | #define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) | ||
| 100 | |||
| 101 | #define NETBACK_INVALID_HANDLE -1 | 94 | #define NETBACK_INVALID_HANDLE -1 |
| 102 | 95 | ||
| 103 | /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating | 96 | /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating |
| @@ -133,6 +126,15 @@ struct xenvif_stats { | |||
| 133 | unsigned long tx_frag_overflow; | 126 | unsigned long tx_frag_overflow; |
| 134 | }; | 127 | }; |
| 135 | 128 | ||
| 129 | #define COPY_BATCH_SIZE 64 | ||
| 130 | |||
| 131 | struct xenvif_copy_state { | ||
| 132 | struct gnttab_copy op[COPY_BATCH_SIZE]; | ||
| 133 | RING_IDX idx[COPY_BATCH_SIZE]; | ||
| 134 | unsigned int num; | ||
| 135 | struct sk_buff_head *completed; | ||
| 136 | }; | ||
| 137 | |||
| 136 | struct xenvif_queue { /* Per-queue data for xenvif */ | 138 | struct xenvif_queue { /* Per-queue data for xenvif */ |
| 137 | unsigned int id; /* Queue ID, 0-based */ | 139 | unsigned int id; /* Queue ID, 0-based */ |
| 138 | char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ | 140 | char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ |
| @@ -189,12 +191,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ | |||
| 189 | unsigned long last_rx_time; | 191 | unsigned long last_rx_time; |
| 190 | bool stalled; | 192 | bool stalled; |
| 191 | 193 | ||
| 192 | struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; | 194 | struct xenvif_copy_state rx_copy; |
| 193 | |||
| 194 | /* We create one meta structure per ring request we consume, so | ||
| 195 | * the maximum number is the same as the ring size. | ||
| 196 | */ | ||
| 197 | struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; | ||
| 198 | 195 | ||
| 199 | /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ | 196 | /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ |
| 200 | unsigned long credit_bytes; | 197 | unsigned long credit_bytes; |
| @@ -260,7 +257,6 @@ struct xenvif { | |||
| 260 | 257 | ||
| 261 | /* Frontend feature information. */ | 258 | /* Frontend feature information. */ |
| 262 | int gso_mask; | 259 | int gso_mask; |
| 263 | int gso_prefix_mask; | ||
| 264 | 260 | ||
| 265 | u8 can_sg:1; | 261 | u8 can_sg:1; |
| 266 | u8 ip_csum:1; | 262 | u8 ip_csum:1; |
| @@ -359,6 +355,7 @@ int xenvif_dealloc_kthread(void *data); | |||
| 359 | 355 | ||
| 360 | irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); | 356 | irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); |
| 361 | 357 | ||
| 358 | void xenvif_rx_action(struct xenvif_queue *queue); | ||
| 362 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); | 359 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); |
| 363 | 360 | ||
| 364 | void xenvif_carrier_on(struct xenvif *vif); | 361 | void xenvif_carrier_on(struct xenvif *vif); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fb50c6d5f6c3..74dc2bf71428 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -149,17 +149,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
| 149 | struct xenvif *vif = netdev_priv(dev); | 149 | struct xenvif *vif = netdev_priv(dev); |
| 150 | unsigned int size = vif->hash.size; | 150 | unsigned int size = vif->hash.size; |
| 151 | 151 | ||
| 152 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) { | 152 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) |
| 153 | u16 index = fallback(dev, skb) % dev->real_num_tx_queues; | 153 | return fallback(dev, skb) % dev->real_num_tx_queues; |
| 154 | |||
| 155 | /* Make sure there is no hash information in the socket | ||
| 156 | * buffer otherwise it would be incorrectly forwarded | ||
| 157 | * to the frontend. | ||
| 158 | */ | ||
| 159 | skb_clear_hash(skb); | ||
| 160 | |||
| 161 | return index; | ||
| 162 | } | ||
| 163 | 154 | ||
| 164 | xenvif_set_skb_hash(vif, skb); | 155 | xenvif_set_skb_hash(vif, skb); |
| 165 | 156 | ||
| @@ -208,6 +199,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 208 | cb = XENVIF_RX_CB(skb); | 199 | cb = XENVIF_RX_CB(skb); |
| 209 | cb->expires = jiffies + vif->drain_timeout; | 200 | cb->expires = jiffies + vif->drain_timeout; |
| 210 | 201 | ||
| 202 | /* If there is no hash algorithm configured then make sure there | ||
| 203 | * is no hash information in the socket buffer otherwise it | ||
| 204 | * would be incorrectly forwarded to the frontend. | ||
| 205 | */ | ||
| 206 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) | ||
| 207 | skb_clear_hash(skb); | ||
| 208 | |||
| 211 | xenvif_rx_queue_tail(queue, skb); | 209 | xenvif_rx_queue_tail(queue, skb); |
| 212 | xenvif_kick_thread(queue); | 210 | xenvif_kick_thread(queue); |
| 213 | 211 | ||
| @@ -319,9 +317,9 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev, | |||
| 319 | 317 | ||
| 320 | if (!vif->can_sg) | 318 | if (!vif->can_sg) |
| 321 | features &= ~NETIF_F_SG; | 319 | features &= ~NETIF_F_SG; |
| 322 | if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4)) | 320 | if (~(vif->gso_mask) & GSO_BIT(TCPV4)) |
| 323 | features &= ~NETIF_F_TSO; | 321 | features &= ~NETIF_F_TSO; |
| 324 | if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6)) | 322 | if (~(vif->gso_mask) & GSO_BIT(TCPV6)) |
| 325 | features &= ~NETIF_F_TSO6; | 323 | features &= ~NETIF_F_TSO6; |
| 326 | if (!vif->ip_csum) | 324 | if (!vif->ip_csum) |
| 327 | features &= ~NETIF_F_IP_CSUM; | 325 | features &= ~NETIF_F_IP_CSUM; |
| @@ -467,7 +465,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 467 | dev->netdev_ops = &xenvif_netdev_ops; | 465 | dev->netdev_ops = &xenvif_netdev_ops; |
| 468 | dev->hw_features = NETIF_F_SG | | 466 | dev->hw_features = NETIF_F_SG | |
| 469 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 467 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 470 | NETIF_F_TSO | NETIF_F_TSO6; | 468 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST; |
| 471 | dev->features = dev->hw_features | NETIF_F_RXCSUM; | 469 | dev->features = dev->hw_features | NETIF_F_RXCSUM; |
| 472 | dev->ethtool_ops = &xenvif_ethtool_ops; | 470 | dev->ethtool_ops = &xenvif_ethtool_ops; |
| 473 | 471 | ||
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3d0c989384b5..47b481095d77 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -106,13 +106,6 @@ static void push_tx_responses(struct xenvif_queue *queue); | |||
| 106 | 106 | ||
| 107 | static inline int tx_work_todo(struct xenvif_queue *queue); | 107 | static inline int tx_work_todo(struct xenvif_queue *queue); |
| 108 | 108 | ||
| 109 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, | ||
| 110 | u16 id, | ||
| 111 | s8 st, | ||
| 112 | u16 offset, | ||
| 113 | u16 size, | ||
| 114 | u16 flags); | ||
| 115 | |||
| 116 | static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, | 109 | static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, |
| 117 | u16 idx) | 110 | u16 idx) |
| 118 | { | 111 | { |
| @@ -155,571 +148,11 @@ static inline pending_ring_idx_t pending_index(unsigned i) | |||
| 155 | return i & (MAX_PENDING_REQS-1); | 148 | return i & (MAX_PENDING_REQS-1); |
| 156 | } | 149 | } |
| 157 | 150 | ||
| 158 | static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) | ||
| 159 | { | ||
| 160 | RING_IDX prod, cons; | ||
| 161 | struct sk_buff *skb; | ||
| 162 | int needed; | ||
| 163 | |||
| 164 | skb = skb_peek(&queue->rx_queue); | ||
| 165 | if (!skb) | ||
| 166 | return false; | ||
| 167 | |||
| 168 | needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); | ||
| 169 | if (skb_is_gso(skb)) | ||
| 170 | needed++; | ||
| 171 | if (skb->sw_hash) | ||
| 172 | needed++; | ||
| 173 | |||
| 174 | do { | ||
| 175 | prod = queue->rx.sring->req_prod; | ||
| 176 | cons = queue->rx.req_cons; | ||
| 177 | |||
| 178 | if (prod - cons >= needed) | ||
| 179 | return true; | ||
| 180 | |||
| 181 | queue->rx.sring->req_event = prod + 1; | ||
| 182 | |||
| 183 | /* Make sure event is visible before we check prod | ||
| 184 | * again. | ||
| 185 | */ | ||
| 186 | mb(); | ||
| 187 | } while (queue->rx.sring->req_prod != prod); | ||
| 188 | |||
| 189 | return false; | ||
| 190 | } | ||
| 191 | |||
| 192 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) | ||
| 193 | { | ||
| 194 | unsigned long flags; | ||
| 195 | |||
| 196 | spin_lock_irqsave(&queue->rx_queue.lock, flags); | ||
| 197 | |||
| 198 | __skb_queue_tail(&queue->rx_queue, skb); | ||
| 199 | |||
| 200 | queue->rx_queue_len += skb->len; | ||
| 201 | if (queue->rx_queue_len > queue->rx_queue_max) | ||
| 202 | netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); | ||
| 203 | |||
| 204 | spin_unlock_irqrestore(&queue->rx_queue.lock, flags); | ||
| 205 | } | ||
| 206 | |||
| 207 | static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) | ||
| 208 | { | ||
| 209 | struct sk_buff *skb; | ||
| 210 | |||
| 211 | spin_lock_irq(&queue->rx_queue.lock); | ||
| 212 | |||
| 213 | skb = __skb_dequeue(&queue->rx_queue); | ||
| 214 | if (skb) | ||
| 215 | queue->rx_queue_len -= skb->len; | ||
| 216 | |||
| 217 | spin_unlock_irq(&queue->rx_queue.lock); | ||
| 218 | |||
| 219 | return skb; | ||
| 220 | } | ||
| 221 | |||
| 222 | static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) | ||
| 223 | { | ||
| 224 | spin_lock_irq(&queue->rx_queue.lock); | ||
| 225 | |||
| 226 | if (queue->rx_queue_len < queue->rx_queue_max) | ||
| 227 | netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); | ||
| 228 | |||
| 229 | spin_unlock_irq(&queue->rx_queue.lock); | ||
| 230 | } | ||
| 231 | |||
| 232 | |||
| 233 | static void xenvif_rx_queue_purge(struct xenvif_queue *queue) | ||
| 234 | { | ||
| 235 | struct sk_buff *skb; | ||
| 236 | while ((skb = xenvif_rx_dequeue(queue)) != NULL) | ||
| 237 | kfree_skb(skb); | ||
| 238 | } | ||
| 239 | |||
| 240 | static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) | ||
| 241 | { | ||
| 242 | struct sk_buff *skb; | ||
| 243 | |||
| 244 | for(;;) { | ||
| 245 | skb = skb_peek(&queue->rx_queue); | ||
| 246 | if (!skb) | ||
| 247 | break; | ||
| 248 | if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) | ||
| 249 | break; | ||
| 250 | xenvif_rx_dequeue(queue); | ||
| 251 | kfree_skb(skb); | ||
| 252 | } | ||
| 253 | } | ||
| 254 | |||
| 255 | struct netrx_pending_operations { | ||
| 256 | unsigned copy_prod, copy_cons; | ||
| 257 | unsigned meta_prod, meta_cons; | ||
| 258 | struct gnttab_copy *copy; | ||
| 259 | struct xenvif_rx_meta *meta; | ||
| 260 | int copy_off; | ||
| 261 | grant_ref_t copy_gref; | ||
| 262 | }; | ||
| 263 | |||
| 264 | static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, | ||
| 265 | struct netrx_pending_operations *npo) | ||
| 266 | { | ||
| 267 | struct xenvif_rx_meta *meta; | ||
| 268 | struct xen_netif_rx_request req; | ||
| 269 | |||
| 270 | RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); | ||
| 271 | |||
| 272 | meta = npo->meta + npo->meta_prod++; | ||
| 273 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; | ||
| 274 | meta->gso_size = 0; | ||
| 275 | meta->size = 0; | ||
| 276 | meta->id = req.id; | ||
| 277 | |||
| 278 | npo->copy_off = 0; | ||
| 279 | npo->copy_gref = req.gref; | ||
| 280 | |||
| 281 | return meta; | ||
| 282 | } | ||
| 283 | |||
| 284 | struct gop_frag_copy { | ||
| 285 | struct xenvif_queue *queue; | ||
| 286 | struct netrx_pending_operations *npo; | ||
| 287 | struct xenvif_rx_meta *meta; | ||
| 288 | int head; | ||
| 289 | int gso_type; | ||
| 290 | int protocol; | ||
| 291 | int hash_present; | ||
| 292 | |||
| 293 | struct page *page; | ||
| 294 | }; | ||
| 295 | |||
| 296 | static void xenvif_setup_copy_gop(unsigned long gfn, | ||
| 297 | unsigned int offset, | ||
| 298 | unsigned int *len, | ||
| 299 | struct gop_frag_copy *info) | ||
| 300 | { | ||
| 301 | struct gnttab_copy *copy_gop; | ||
| 302 | struct xen_page_foreign *foreign; | ||
| 303 | /* Convenient aliases */ | ||
| 304 | struct xenvif_queue *queue = info->queue; | ||
| 305 | struct netrx_pending_operations *npo = info->npo; | ||
| 306 | struct page *page = info->page; | ||
| 307 | |||
| 308 | BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); | ||
| 309 | |||
| 310 | if (npo->copy_off == MAX_BUFFER_OFFSET) | ||
| 311 | info->meta = get_next_rx_buffer(queue, npo); | ||
| 312 | |||
| 313 | if (npo->copy_off + *len > MAX_BUFFER_OFFSET) | ||
| 314 | *len = MAX_BUFFER_OFFSET - npo->copy_off; | ||
| 315 | |||
| 316 | copy_gop = npo->copy + npo->copy_prod++; | ||
| 317 | copy_gop->flags = GNTCOPY_dest_gref; | ||
| 318 | copy_gop->len = *len; | ||
| 319 | |||
| 320 | foreign = xen_page_foreign(page); | ||
| 321 | if (foreign) { | ||
| 322 | copy_gop->source.domid = foreign->domid; | ||
| 323 | copy_gop->source.u.ref = foreign->gref; | ||
| 324 | copy_gop->flags |= GNTCOPY_source_gref; | ||
| 325 | } else { | ||
| 326 | copy_gop->source.domid = DOMID_SELF; | ||
| 327 | copy_gop->source.u.gmfn = gfn; | ||
| 328 | } | ||
| 329 | copy_gop->source.offset = offset; | ||
| 330 | |||
| 331 | copy_gop->dest.domid = queue->vif->domid; | ||
| 332 | copy_gop->dest.offset = npo->copy_off; | ||
| 333 | copy_gop->dest.u.ref = npo->copy_gref; | ||
| 334 | |||
| 335 | npo->copy_off += *len; | ||
| 336 | info->meta->size += *len; | ||
| 337 | |||
| 338 | if (!info->head) | ||
| 339 | return; | ||
| 340 | |||
| 341 | /* Leave a gap for the GSO descriptor. */ | ||
| 342 | if ((1 << info->gso_type) & queue->vif->gso_mask) | ||
| 343 | queue->rx.req_cons++; | ||
| 344 | |||
| 345 | /* Leave a gap for the hash extra segment. */ | ||
| 346 | if (info->hash_present) | ||
| 347 | queue->rx.req_cons++; | ||
| 348 | |||
| 349 | info->head = 0; /* There must be something in this buffer now */ | ||
| 350 | } | ||
| 351 | |||
| 352 | static void xenvif_gop_frag_copy_grant(unsigned long gfn, | ||
| 353 | unsigned offset, | ||
| 354 | unsigned int len, | ||
| 355 | void *data) | ||
| 356 | { | ||
| 357 | unsigned int bytes; | ||
| 358 | |||
| 359 | while (len) { | ||
| 360 | bytes = len; | ||
| 361 | xenvif_setup_copy_gop(gfn, offset, &bytes, data); | ||
| 362 | offset += bytes; | ||
| 363 | len -= bytes; | ||
| 364 | } | ||
| 365 | } | ||
| 366 | |||
| 367 | /* | ||
| 368 | * Set up the grant operations for this fragment. If it's a flipping | ||
| 369 | * interface, we also set up the unmap request from here. | ||
| 370 | */ | ||
| 371 | static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, | ||
| 372 | struct netrx_pending_operations *npo, | ||
| 373 | struct page *page, unsigned long size, | ||
| 374 | unsigned long offset, int *head) | ||
| 375 | { | ||
| 376 | struct gop_frag_copy info = { | ||
| 377 | .queue = queue, | ||
| 378 | .npo = npo, | ||
| 379 | .head = *head, | ||
| 380 | .gso_type = XEN_NETIF_GSO_TYPE_NONE, | ||
| 381 | /* xenvif_set_skb_hash() will have either set a s/w | ||
| 382 | * hash or cleared the hash depending on | ||
| 383 | * whether the the frontend wants a hash for this skb. | ||
| 384 | */ | ||
| 385 | .hash_present = skb->sw_hash, | ||
| 386 | }; | ||
| 387 | unsigned long bytes; | ||
| 388 | |||
| 389 | if (skb_is_gso(skb)) { | ||
| 390 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | ||
| 391 | info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | ||
| 392 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
| 393 | info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | ||
| 394 | } | ||
| 395 | |||
| 396 | /* Data must not cross a page boundary. */ | ||
| 397 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); | ||
| 398 | |||
| 399 | info.meta = npo->meta + npo->meta_prod - 1; | ||
| 400 | |||
| 401 | /* Skip unused frames from start of page */ | ||
| 402 | page += offset >> PAGE_SHIFT; | ||
| 403 | offset &= ~PAGE_MASK; | ||
| 404 | |||
| 405 | while (size > 0) { | ||
| 406 | BUG_ON(offset >= PAGE_SIZE); | ||
| 407 | |||
| 408 | bytes = PAGE_SIZE - offset; | ||
| 409 | if (bytes > size) | ||
| 410 | bytes = size; | ||
| 411 | |||
| 412 | info.page = page; | ||
| 413 | gnttab_foreach_grant_in_range(page, offset, bytes, | ||
| 414 | xenvif_gop_frag_copy_grant, | ||
| 415 | &info); | ||
| 416 | size -= bytes; | ||
| 417 | offset = 0; | ||
| 418 | |||
| 419 | /* Next page */ | ||
| 420 | if (size) { | ||
| 421 | BUG_ON(!PageCompound(page)); | ||
| 422 | page++; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | |||
| 426 | *head = info.head; | ||
| 427 | } | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Prepare an SKB to be transmitted to the frontend. | ||
| 431 | * | ||
| 432 | * This function is responsible for allocating grant operations, meta | ||
| 433 | * structures, etc. | ||
| 434 | * | ||
| 435 | * It returns the number of meta structures consumed. The number of | ||
| 436 | * ring slots used is always equal to the number of meta slots used | ||
| 437 | * plus the number of GSO descriptors used. Currently, we use either | ||
| 438 | * zero GSO descriptors (for non-GSO packets) or one descriptor (for | ||
| 439 | * frontend-side LRO). | ||
| 440 | */ | ||
| 441 | static int xenvif_gop_skb(struct sk_buff *skb, | ||
| 442 | struct netrx_pending_operations *npo, | ||
| 443 | struct xenvif_queue *queue) | ||
| 444 | { | ||
| 445 | struct xenvif *vif = netdev_priv(skb->dev); | ||
| 446 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
| 447 | int i; | ||
| 448 | struct xen_netif_rx_request req; | ||
| 449 | struct xenvif_rx_meta *meta; | ||
| 450 | unsigned char *data; | ||
| 451 | int head = 1; | ||
| 452 | int old_meta_prod; | ||
| 453 | int gso_type; | ||
| 454 | |||
| 455 | old_meta_prod = npo->meta_prod; | ||
| 456 | |||
| 457 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | ||
| 458 | if (skb_is_gso(skb)) { | ||
| 459 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | ||
| 460 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | ||
| 461 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
| 462 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | ||
| 463 | } | ||
| 464 | |||
| 465 | /* Set up a GSO prefix descriptor, if necessary */ | ||
| 466 | if ((1 << gso_type) & vif->gso_prefix_mask) { | ||
| 467 | RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); | ||
| 468 | meta = npo->meta + npo->meta_prod++; | ||
| 469 | meta->gso_type = gso_type; | ||
| 470 | meta->gso_size = skb_shinfo(skb)->gso_size; | ||
| 471 | meta->size = 0; | ||
| 472 | meta->id = req.id; | ||
| 473 | } | ||
| 474 | |||
| 475 | RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); | ||
| 476 | meta = npo->meta + npo->meta_prod++; | ||
| 477 | |||
| 478 | if ((1 << gso_type) & vif->gso_mask) { | ||
| 479 | meta->gso_type = gso_type; | ||
| 480 | meta->gso_size = skb_shinfo(skb)->gso_size; | ||
| 481 | } else { | ||
| 482 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; | ||
| 483 | meta->gso_size = 0; | ||
| 484 | } | ||
| 485 | |||
| 486 | meta->size = 0; | ||
| 487 | meta->id = req.id; | ||
| 488 | npo->copy_off = 0; | ||
| 489 | npo->copy_gref = req.gref; | ||
| 490 | |||
| 491 | data = skb->data; | ||
| 492 | while (data < skb_tail_pointer(skb)) { | ||
| 493 | unsigned int offset = offset_in_page(data); | ||
| 494 | unsigned int len = PAGE_SIZE - offset; | ||
| 495 | |||
| 496 | if (data + len > skb_tail_pointer(skb)) | ||
| 497 | len = skb_tail_pointer(skb) - data; | ||
| 498 | |||
| 499 | xenvif_gop_frag_copy(queue, skb, npo, | ||
| 500 | virt_to_page(data), len, offset, &head); | ||
| 501 | data += len; | ||
| 502 | } | ||
| 503 | |||
| 504 | for (i = 0; i < nr_frags; i++) { | ||
| 505 | xenvif_gop_frag_copy(queue, skb, npo, | ||
| 506 | skb_frag_page(&skb_shinfo(skb)->frags[i]), | ||
| 507 | skb_frag_size(&skb_shinfo(skb)->frags[i]), | ||
| 508 | skb_shinfo(skb)->frags[i].page_offset, | ||
| 509 | &head); | ||
| 510 | } | ||
| 511 | |||
| 512 | return npo->meta_prod - old_meta_prod; | ||
| 513 | } | ||
| 514 | |||
| 515 | /* | ||
| 516 | * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was | ||
| 517 | * used to set up the operations on the top of | ||
| 518 | * netrx_pending_operations, which have since been done. Check that | ||
| 519 | * they didn't give any errors and advance over them. | ||
| 520 | */ | ||
| 521 | static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, | ||
| 522 | struct netrx_pending_operations *npo) | ||
| 523 | { | ||
| 524 | struct gnttab_copy *copy_op; | ||
| 525 | int status = XEN_NETIF_RSP_OKAY; | ||
| 526 | int i; | ||
| 527 | |||
| 528 | for (i = 0; i < nr_meta_slots; i++) { | ||
| 529 | copy_op = npo->copy + npo->copy_cons++; | ||
| 530 | if (copy_op->status != GNTST_okay) { | ||
| 531 | netdev_dbg(vif->dev, | ||
| 532 | "Bad status %d from copy to DOM%d.\n", | ||
| 533 | copy_op->status, vif->domid); | ||
| 534 | status = XEN_NETIF_RSP_ERROR; | ||
| 535 | } | ||
| 536 | } | ||
| 537 | |||
| 538 | return status; | ||
| 539 | } | ||
| 540 | |||
| 541 | static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, | ||
| 542 | struct xenvif_rx_meta *meta, | ||
| 543 | int nr_meta_slots) | ||
| 544 | { | ||
| 545 | int i; | ||
| 546 | unsigned long offset; | ||
| 547 | |||
| 548 | /* No fragments used */ | ||
| 549 | if (nr_meta_slots <= 1) | ||
| 550 | return; | ||
| 551 | |||
| 552 | nr_meta_slots--; | ||
| 553 | |||
| 554 | for (i = 0; i < nr_meta_slots; i++) { | ||
| 555 | int flags; | ||
| 556 | if (i == nr_meta_slots - 1) | ||
| 557 | flags = 0; | ||
| 558 | else | ||
| 559 | flags = XEN_NETRXF_more_data; | ||
| 560 | |||
| 561 | offset = 0; | ||
| 562 | make_rx_response(queue, meta[i].id, status, offset, | ||
| 563 | meta[i].size, flags); | ||
| 564 | } | ||
| 565 | } | ||
| 566 | |||
| 567 | void xenvif_kick_thread(struct xenvif_queue *queue) | 151 | void xenvif_kick_thread(struct xenvif_queue *queue) |
| 568 | { | 152 | { |
| 569 | wake_up(&queue->wq); | 153 | wake_up(&queue->wq); |
| 570 | } | 154 | } |
| 571 | 155 | ||
| 572 | static void xenvif_rx_action(struct xenvif_queue *queue) | ||
| 573 | { | ||
| 574 | struct xenvif *vif = queue->vif; | ||
| 575 | s8 status; | ||
| 576 | u16 flags; | ||
| 577 | struct xen_netif_rx_response *resp; | ||
| 578 | struct sk_buff_head rxq; | ||
| 579 | struct sk_buff *skb; | ||
| 580 | LIST_HEAD(notify); | ||
| 581 | int ret; | ||
| 582 | unsigned long offset; | ||
| 583 | bool need_to_notify = false; | ||
| 584 | |||
| 585 | struct netrx_pending_operations npo = { | ||
| 586 | .copy = queue->grant_copy_op, | ||
| 587 | .meta = queue->meta, | ||
| 588 | }; | ||
| 589 | |||
| 590 | skb_queue_head_init(&rxq); | ||
| 591 | |||
| 592 | while (xenvif_rx_ring_slots_available(queue) | ||
| 593 | && (skb = xenvif_rx_dequeue(queue)) != NULL) { | ||
| 594 | queue->last_rx_time = jiffies; | ||
| 595 | |||
| 596 | XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); | ||
| 597 | |||
| 598 | __skb_queue_tail(&rxq, skb); | ||
| 599 | } | ||
| 600 | |||
| 601 | BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); | ||
| 602 | |||
| 603 | if (!npo.copy_prod) | ||
| 604 | goto done; | ||
| 605 | |||
| 606 | BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); | ||
| 607 | gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); | ||
| 608 | |||
| 609 | while ((skb = __skb_dequeue(&rxq)) != NULL) { | ||
| 610 | struct xen_netif_extra_info *extra = NULL; | ||
| 611 | |||
| 612 | if ((1 << queue->meta[npo.meta_cons].gso_type) & | ||
| 613 | vif->gso_prefix_mask) { | ||
| 614 | resp = RING_GET_RESPONSE(&queue->rx, | ||
| 615 | queue->rx.rsp_prod_pvt++); | ||
| 616 | |||
| 617 | resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; | ||
| 618 | |||
| 619 | resp->offset = queue->meta[npo.meta_cons].gso_size; | ||
| 620 | resp->id = queue->meta[npo.meta_cons].id; | ||
| 621 | resp->status = XENVIF_RX_CB(skb)->meta_slots_used; | ||
| 622 | |||
| 623 | npo.meta_cons++; | ||
| 624 | XENVIF_RX_CB(skb)->meta_slots_used--; | ||
| 625 | } | ||
| 626 | |||
| 627 | |||
| 628 | queue->stats.tx_bytes += skb->len; | ||
| 629 | queue->stats.tx_packets++; | ||
| 630 | |||
| 631 | status = xenvif_check_gop(vif, | ||
| 632 | XENVIF_RX_CB(skb)->meta_slots_used, | ||
| 633 | &npo); | ||
| 634 | |||
| 635 | if (XENVIF_RX_CB(skb)->meta_slots_used == 1) | ||
| 636 | flags = 0; | ||
| 637 | else | ||
| 638 | flags = XEN_NETRXF_more_data; | ||
| 639 | |||
| 640 | if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ | ||
| 641 | flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; | ||
| 642 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | ||
| 643 | /* remote but checksummed. */ | ||
| 644 | flags |= XEN_NETRXF_data_validated; | ||
| 645 | |||
| 646 | offset = 0; | ||
| 647 | resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, | ||
| 648 | status, offset, | ||
| 649 | queue->meta[npo.meta_cons].size, | ||
| 650 | flags); | ||
| 651 | |||
| 652 | if ((1 << queue->meta[npo.meta_cons].gso_type) & | ||
| 653 | vif->gso_mask) { | ||
| 654 | extra = (struct xen_netif_extra_info *) | ||
| 655 | RING_GET_RESPONSE(&queue->rx, | ||
| 656 | queue->rx.rsp_prod_pvt++); | ||
| 657 | |||
| 658 | resp->flags |= XEN_NETRXF_extra_info; | ||
| 659 | |||
| 660 | extra->u.gso.type = queue->meta[npo.meta_cons].gso_type; | ||
| 661 | extra->u.gso.size = queue->meta[npo.meta_cons].gso_size; | ||
| 662 | extra->u.gso.pad = 0; | ||
| 663 | extra->u.gso.features = 0; | ||
| 664 | |||
| 665 | extra->type = XEN_NETIF_EXTRA_TYPE_GSO; | ||
| 666 | extra->flags = 0; | ||
| 667 | } | ||
| 668 | |||
| 669 | if (skb->sw_hash) { | ||
| 670 | /* Since the skb got here via xenvif_select_queue() | ||
| 671 | * we know that the hash has been re-calculated | ||
| 672 | * according to a configuration set by the frontend | ||
| 673 | * and therefore we know that it is legitimate to | ||
| 674 | * pass it to the frontend. | ||
| 675 | */ | ||
| 676 | if (resp->flags & XEN_NETRXF_extra_info) | ||
| 677 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; | ||
| 678 | else | ||
| 679 | resp->flags |= XEN_NETRXF_extra_info; | ||
| 680 | |||
| 681 | extra = (struct xen_netif_extra_info *) | ||
| 682 | RING_GET_RESPONSE(&queue->rx, | ||
| 683 | queue->rx.rsp_prod_pvt++); | ||
| 684 | |||
| 685 | extra->u.hash.algorithm = | ||
| 686 | XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ; | ||
| 687 | |||
| 688 | if (skb->l4_hash) | ||
| 689 | extra->u.hash.type = | ||
| 690 | skb->protocol == htons(ETH_P_IP) ? | ||
| 691 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP : | ||
| 692 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP; | ||
| 693 | else | ||
| 694 | extra->u.hash.type = | ||
| 695 | skb->protocol == htons(ETH_P_IP) ? | ||
| 696 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4 : | ||
| 697 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6; | ||
| 698 | |||
| 699 | *(uint32_t *)extra->u.hash.value = | ||
| 700 | skb_get_hash_raw(skb); | ||
| 701 | |||
| 702 | extra->type = XEN_NETIF_EXTRA_TYPE_HASH; | ||
| 703 | extra->flags = 0; | ||
| 704 | } | ||
| 705 | |||
| 706 | xenvif_add_frag_responses(queue, status, | ||
| 707 | queue->meta + npo.meta_cons + 1, | ||
| 708 | XENVIF_RX_CB(skb)->meta_slots_used); | ||
| 709 | |||
| 710 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); | ||
| 711 | |||
| 712 | need_to_notify |= !!ret; | ||
| 713 | |||
| 714 | npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used; | ||
| 715 | dev_kfree_skb(skb); | ||
| 716 | } | ||
| 717 | |||
| 718 | done: | ||
| 719 | if (need_to_notify) | ||
| 720 | notify_remote_via_irq(queue->rx_irq); | ||
| 721 | } | ||
| 722 | |||
| 723 | void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) | 156 | void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) |
| 724 | { | 157 | { |
| 725 | int more_to_do; | 158 | int more_to_do; |
| @@ -1951,29 +1384,6 @@ static void push_tx_responses(struct xenvif_queue *queue) | |||
| 1951 | notify_remote_via_irq(queue->tx_irq); | 1384 | notify_remote_via_irq(queue->tx_irq); |
| 1952 | } | 1385 | } |
| 1953 | 1386 | ||
| 1954 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, | ||
| 1955 | u16 id, | ||
| 1956 | s8 st, | ||
| 1957 | u16 offset, | ||
| 1958 | u16 size, | ||
| 1959 | u16 flags) | ||
| 1960 | { | ||
| 1961 | RING_IDX i = queue->rx.rsp_prod_pvt; | ||
| 1962 | struct xen_netif_rx_response *resp; | ||
| 1963 | |||
| 1964 | resp = RING_GET_RESPONSE(&queue->rx, i); | ||
| 1965 | resp->offset = offset; | ||
| 1966 | resp->flags = flags; | ||
| 1967 | resp->id = id; | ||
| 1968 | resp->status = (s16)size; | ||
| 1969 | if (st < 0) | ||
| 1970 | resp->status = (s16)st; | ||
| 1971 | |||
| 1972 | queue->rx.rsp_prod_pvt = ++i; | ||
| 1973 | |||
| 1974 | return resp; | ||
| 1975 | } | ||
| 1976 | |||
| 1977 | void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) | 1387 | void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) |
| 1978 | { | 1388 | { |
| 1979 | int ret; | 1389 | int ret; |
| @@ -2055,170 +1465,6 @@ err: | |||
| 2055 | return err; | 1465 | return err; |
| 2056 | } | 1466 | } |
| 2057 | 1467 | ||
| 2058 | static void xenvif_queue_carrier_off(struct xenvif_queue *queue) | ||
| 2059 | { | ||
| 2060 | struct xenvif *vif = queue->vif; | ||
| 2061 | |||
| 2062 | queue->stalled = true; | ||
| 2063 | |||
| 2064 | /* At least one queue has stalled? Disable the carrier. */ | ||
| 2065 | spin_lock(&vif->lock); | ||
| 2066 | if (vif->stalled_queues++ == 0) { | ||
| 2067 | netdev_info(vif->dev, "Guest Rx stalled"); | ||
| 2068 | netif_carrier_off(vif->dev); | ||
| 2069 | } | ||
| 2070 | spin_unlock(&vif->lock); | ||
| 2071 | } | ||
| 2072 | |||
| 2073 | static void xenvif_queue_carrier_on(struct xenvif_queue *queue) | ||
| 2074 | { | ||
| 2075 | struct xenvif *vif = queue->vif; | ||
| 2076 | |||
| 2077 | queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ | ||
| 2078 | queue->stalled = false; | ||
| 2079 | |||
| 2080 | /* All queues are ready? Enable the carrier. */ | ||
| 2081 | spin_lock(&vif->lock); | ||
| 2082 | if (--vif->stalled_queues == 0) { | ||
| 2083 | netdev_info(vif->dev, "Guest Rx ready"); | ||
| 2084 | netif_carrier_on(vif->dev); | ||
| 2085 | } | ||
| 2086 | spin_unlock(&vif->lock); | ||
| 2087 | } | ||
| 2088 | |||
| 2089 | static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) | ||
| 2090 | { | ||
| 2091 | RING_IDX prod, cons; | ||
| 2092 | |||
| 2093 | prod = queue->rx.sring->req_prod; | ||
| 2094 | cons = queue->rx.req_cons; | ||
| 2095 | |||
| 2096 | return !queue->stalled && prod - cons < 1 | ||
| 2097 | && time_after(jiffies, | ||
| 2098 | queue->last_rx_time + queue->vif->stall_timeout); | ||
| 2099 | } | ||
| 2100 | |||
| 2101 | static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) | ||
| 2102 | { | ||
| 2103 | RING_IDX prod, cons; | ||
| 2104 | |||
| 2105 | prod = queue->rx.sring->req_prod; | ||
| 2106 | cons = queue->rx.req_cons; | ||
| 2107 | |||
| 2108 | return queue->stalled && prod - cons >= 1; | ||
| 2109 | } | ||
| 2110 | |||
| 2111 | static bool xenvif_have_rx_work(struct xenvif_queue *queue) | ||
| 2112 | { | ||
| 2113 | return xenvif_rx_ring_slots_available(queue) | ||
| 2114 | || (queue->vif->stall_timeout && | ||
| 2115 | (xenvif_rx_queue_stalled(queue) | ||
| 2116 | || xenvif_rx_queue_ready(queue))) | ||
| 2117 | || kthread_should_stop() | ||
| 2118 | || queue->vif->disabled; | ||
| 2119 | } | ||
| 2120 | |||
| 2121 | static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) | ||
| 2122 | { | ||
| 2123 | struct sk_buff *skb; | ||
| 2124 | long timeout; | ||
| 2125 | |||
| 2126 | skb = skb_peek(&queue->rx_queue); | ||
| 2127 | if (!skb) | ||
| 2128 | return MAX_SCHEDULE_TIMEOUT; | ||
| 2129 | |||
| 2130 | timeout = XENVIF_RX_CB(skb)->expires - jiffies; | ||
| 2131 | return timeout < 0 ? 0 : timeout; | ||
| 2132 | } | ||
| 2133 | |||
| 2134 | /* Wait until the guest Rx thread has work. | ||
| 2135 | * | ||
| 2136 | * The timeout needs to be adjusted based on the current head of the | ||
| 2137 | * queue (and not just the head at the beginning). In particular, if | ||
| 2138 | * the queue is initially empty an infinite timeout is used and this | ||
| 2139 | * needs to be reduced when a skb is queued. | ||
| 2140 | * | ||
| 2141 | * This cannot be done with wait_event_timeout() because it only | ||
| 2142 | * calculates the timeout once. | ||
| 2143 | */ | ||
| 2144 | static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) | ||
| 2145 | { | ||
| 2146 | DEFINE_WAIT(wait); | ||
| 2147 | |||
| 2148 | if (xenvif_have_rx_work(queue)) | ||
| 2149 | return; | ||
| 2150 | |||
| 2151 | for (;;) { | ||
| 2152 | long ret; | ||
| 2153 | |||
| 2154 | prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); | ||
| 2155 | if (xenvif_have_rx_work(queue)) | ||
| 2156 | break; | ||
| 2157 | ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); | ||
| 2158 | if (!ret) | ||
| 2159 | break; | ||
| 2160 | } | ||
| 2161 | finish_wait(&queue->wq, &wait); | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | int xenvif_kthread_guest_rx(void *data) | ||
| 2165 | { | ||
| 2166 | struct xenvif_queue *queue = data; | ||
| 2167 | struct xenvif *vif = queue->vif; | ||
| 2168 | |||
| 2169 | if (!vif->stall_timeout) | ||
| 2170 | xenvif_queue_carrier_on(queue); | ||
| 2171 | |||
| 2172 | for (;;) { | ||
| 2173 | xenvif_wait_for_rx_work(queue); | ||
| 2174 | |||
| 2175 | if (kthread_should_stop()) | ||
| 2176 | break; | ||
| 2177 | |||
| 2178 | /* This frontend is found to be rogue, disable it in | ||
| 2179 | * kthread context. Currently this is only set when | ||
| 2180 | * netback finds out frontend sends malformed packet, | ||
| 2181 | * but we cannot disable the interface in softirq | ||
| 2182 | * context so we defer it here, if this thread is | ||
| 2183 | * associated with queue 0. | ||
| 2184 | */ | ||
| 2185 | if (unlikely(vif->disabled && queue->id == 0)) { | ||
| 2186 | xenvif_carrier_off(vif); | ||
| 2187 | break; | ||
| 2188 | } | ||
| 2189 | |||
| 2190 | if (!skb_queue_empty(&queue->rx_queue)) | ||
| 2191 | xenvif_rx_action(queue); | ||
| 2192 | |||
| 2193 | /* If the guest hasn't provided any Rx slots for a | ||
| 2194 | * while it's probably not responsive, drop the | ||
| 2195 | * carrier so packets are dropped earlier. | ||
| 2196 | */ | ||
| 2197 | if (vif->stall_timeout) { | ||
| 2198 | if (xenvif_rx_queue_stalled(queue)) | ||
| 2199 | xenvif_queue_carrier_off(queue); | ||
| 2200 | else if (xenvif_rx_queue_ready(queue)) | ||
| 2201 | xenvif_queue_carrier_on(queue); | ||
| 2202 | } | ||
| 2203 | |||
| 2204 | /* Queued packets may have foreign pages from other | ||
| 2205 | * domains. These cannot be queued indefinitely as | ||
| 2206 | * this would starve guests of grant refs and transmit | ||
| 2207 | * slots. | ||
| 2208 | */ | ||
| 2209 | xenvif_rx_queue_drop_expired(queue); | ||
| 2210 | |||
| 2211 | xenvif_rx_queue_maybe_wake(queue); | ||
| 2212 | |||
| 2213 | cond_resched(); | ||
| 2214 | } | ||
| 2215 | |||
| 2216 | /* Bin any remaining skbs */ | ||
| 2217 | xenvif_rx_queue_purge(queue); | ||
| 2218 | |||
| 2219 | return 0; | ||
| 2220 | } | ||
| 2221 | |||
| 2222 | static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) | 1468 | static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) |
| 2223 | { | 1469 | { |
| 2224 | /* Dealloc thread must remain running until all inflight | 1470 | /* Dealloc thread must remain running until all inflight |
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c new file mode 100644 index 000000000000..8e9ade6ccf18 --- /dev/null +++ b/drivers/net/xen-netback/rx.c | |||
| @@ -0,0 +1,629 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Citrix Systems Inc. | ||
| 3 | * Copyright (c) 2002-2005, K A Fraser | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or | ||
| 6 | * modify it under the terms of the GNU General Public License version 2 | ||
| 7 | * as published by the Free Software Foundation; or, when distributed | ||
| 8 | * separately from the Linux kernel or incorporated into other | ||
| 9 | * software packages, subject to the following license: | ||
| 10 | * | ||
| 11 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
| 12 | * of this source file (the "Software"), to deal in the Software without | ||
| 13 | * restriction, including without limitation the rights to use, copy, modify, | ||
| 14 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
| 15 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
| 16 | * the following conditions: | ||
| 17 | * | ||
| 18 | * The above copyright notice and this permission notice shall be included in | ||
| 19 | * all copies or substantial portions of the Software. | ||
| 20 | * | ||
| 21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 23 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
| 24 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 26 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
| 27 | * IN THE SOFTWARE. | ||
| 28 | */ | ||
| 29 | #include "common.h" | ||
| 30 | |||
| 31 | #include <linux/kthread.h> | ||
| 32 | |||
| 33 | #include <xen/xen.h> | ||
| 34 | #include <xen/events.h> | ||
| 35 | |||
| 36 | static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) | ||
| 37 | { | ||
| 38 | RING_IDX prod, cons; | ||
| 39 | struct sk_buff *skb; | ||
| 40 | int needed; | ||
| 41 | |||
| 42 | skb = skb_peek(&queue->rx_queue); | ||
| 43 | if (!skb) | ||
| 44 | return false; | ||
| 45 | |||
| 46 | needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); | ||
| 47 | if (skb_is_gso(skb)) | ||
| 48 | needed++; | ||
| 49 | if (skb->sw_hash) | ||
| 50 | needed++; | ||
| 51 | |||
| 52 | do { | ||
| 53 | prod = queue->rx.sring->req_prod; | ||
| 54 | cons = queue->rx.req_cons; | ||
| 55 | |||
| 56 | if (prod - cons >= needed) | ||
| 57 | return true; | ||
| 58 | |||
| 59 | queue->rx.sring->req_event = prod + 1; | ||
| 60 | |||
| 61 | /* Make sure event is visible before we check prod | ||
| 62 | * again. | ||
| 63 | */ | ||
| 64 | mb(); | ||
| 65 | } while (queue->rx.sring->req_prod != prod); | ||
| 66 | |||
| 67 | return false; | ||
| 68 | } | ||
| 69 | |||
| 70 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) | ||
| 71 | { | ||
| 72 | unsigned long flags; | ||
| 73 | |||
| 74 | spin_lock_irqsave(&queue->rx_queue.lock, flags); | ||
| 75 | |||
| 76 | __skb_queue_tail(&queue->rx_queue, skb); | ||
| 77 | |||
| 78 | queue->rx_queue_len += skb->len; | ||
| 79 | if (queue->rx_queue_len > queue->rx_queue_max) { | ||
| 80 | struct net_device *dev = queue->vif->dev; | ||
| 81 | |||
| 82 | netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); | ||
| 83 | } | ||
| 84 | |||
| 85 | spin_unlock_irqrestore(&queue->rx_queue.lock, flags); | ||
| 86 | } | ||
| 87 | |||
| 88 | static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) | ||
| 89 | { | ||
| 90 | struct sk_buff *skb; | ||
| 91 | |||
| 92 | spin_lock_irq(&queue->rx_queue.lock); | ||
| 93 | |||
| 94 | skb = __skb_dequeue(&queue->rx_queue); | ||
| 95 | if (skb) { | ||
| 96 | queue->rx_queue_len -= skb->len; | ||
| 97 | if (queue->rx_queue_len < queue->rx_queue_max) { | ||
| 98 | struct netdev_queue *txq; | ||
| 99 | |||
| 100 | txq = netdev_get_tx_queue(queue->vif->dev, queue->id); | ||
| 101 | netif_tx_wake_queue(txq); | ||
| 102 | } | ||
| 103 | } | ||
| 104 | |||
| 105 | spin_unlock_irq(&queue->rx_queue.lock); | ||
| 106 | |||
| 107 | return skb; | ||
| 108 | } | ||
| 109 | |||
| 110 | static void xenvif_rx_queue_purge(struct xenvif_queue *queue) | ||
| 111 | { | ||
| 112 | struct sk_buff *skb; | ||
| 113 | |||
| 114 | while ((skb = xenvif_rx_dequeue(queue)) != NULL) | ||
| 115 | kfree_skb(skb); | ||
| 116 | } | ||
| 117 | |||
| 118 | static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) | ||
| 119 | { | ||
| 120 | struct sk_buff *skb; | ||
| 121 | |||
| 122 | for (;;) { | ||
| 123 | skb = skb_peek(&queue->rx_queue); | ||
| 124 | if (!skb) | ||
| 125 | break; | ||
| 126 | if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) | ||
| 127 | break; | ||
| 128 | xenvif_rx_dequeue(queue); | ||
| 129 | kfree_skb(skb); | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | static void xenvif_rx_copy_flush(struct xenvif_queue *queue) | ||
| 134 | { | ||
| 135 | unsigned int i; | ||
| 136 | int notify; | ||
| 137 | |||
| 138 | gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); | ||
| 139 | |||
| 140 | for (i = 0; i < queue->rx_copy.num; i++) { | ||
| 141 | struct gnttab_copy *op; | ||
| 142 | |||
| 143 | op = &queue->rx_copy.op[i]; | ||
| 144 | |||
| 145 | /* If the copy failed, overwrite the status field in | ||
| 146 | * the corresponding response. | ||
| 147 | */ | ||
| 148 | if (unlikely(op->status != GNTST_okay)) { | ||
| 149 | struct xen_netif_rx_response *rsp; | ||
| 150 | |||
| 151 | rsp = RING_GET_RESPONSE(&queue->rx, | ||
| 152 | queue->rx_copy.idx[i]); | ||
| 153 | rsp->status = op->status; | ||
| 154 | } | ||
| 155 | } | ||
| 156 | |||
| 157 | queue->rx_copy.num = 0; | ||
| 158 | |||
| 159 | /* Push responses for all completed packets. */ | ||
| 160 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); | ||
| 161 | if (notify) | ||
| 162 | notify_remote_via_irq(queue->rx_irq); | ||
| 163 | |||
| 164 | __skb_queue_purge(queue->rx_copy.completed); | ||
| 165 | } | ||
| 166 | |||
| 167 | static void xenvif_rx_copy_add(struct xenvif_queue *queue, | ||
| 168 | struct xen_netif_rx_request *req, | ||
| 169 | unsigned int offset, void *data, size_t len) | ||
| 170 | { | ||
| 171 | struct gnttab_copy *op; | ||
| 172 | struct page *page; | ||
| 173 | struct xen_page_foreign *foreign; | ||
| 174 | |||
| 175 | if (queue->rx_copy.num == COPY_BATCH_SIZE) | ||
| 176 | xenvif_rx_copy_flush(queue); | ||
| 177 | |||
| 178 | op = &queue->rx_copy.op[queue->rx_copy.num]; | ||
| 179 | |||
| 180 | page = virt_to_page(data); | ||
| 181 | |||
| 182 | op->flags = GNTCOPY_dest_gref; | ||
| 183 | |||
| 184 | foreign = xen_page_foreign(page); | ||
| 185 | if (foreign) { | ||
| 186 | op->source.domid = foreign->domid; | ||
| 187 | op->source.u.ref = foreign->gref; | ||
| 188 | op->flags |= GNTCOPY_source_gref; | ||
| 189 | } else { | ||
| 190 | op->source.u.gmfn = virt_to_gfn(data); | ||
| 191 | op->source.domid = DOMID_SELF; | ||
| 192 | } | ||
| 193 | |||
| 194 | op->source.offset = xen_offset_in_page(data); | ||
| 195 | op->dest.u.ref = req->gref; | ||
| 196 | op->dest.domid = queue->vif->domid; | ||
| 197 | op->dest.offset = offset; | ||
| 198 | op->len = len; | ||
| 199 | |||
| 200 | queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; | ||
| 201 | queue->rx_copy.num++; | ||
| 202 | } | ||
| 203 | |||
| 204 | static unsigned int xenvif_gso_type(struct sk_buff *skb) | ||
| 205 | { | ||
| 206 | if (skb_is_gso(skb)) { | ||
| 207 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | ||
| 208 | return XEN_NETIF_GSO_TYPE_TCPV4; | ||
| 209 | else | ||
| 210 | return XEN_NETIF_GSO_TYPE_TCPV6; | ||
| 211 | } | ||
| 212 | return XEN_NETIF_GSO_TYPE_NONE; | ||
| 213 | } | ||
| 214 | |||
| 215 | struct xenvif_pkt_state { | ||
| 216 | struct sk_buff *skb; | ||
| 217 | size_t remaining_len; | ||
| 218 | struct sk_buff *frag_iter; | ||
| 219 | int frag; /* frag == -1 => frag_iter->head */ | ||
| 220 | unsigned int frag_offset; | ||
| 221 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; | ||
| 222 | unsigned int extra_count; | ||
| 223 | unsigned int slot; | ||
| 224 | }; | ||
| 225 | |||
| 226 | static void xenvif_rx_next_skb(struct xenvif_queue *queue, | ||
| 227 | struct xenvif_pkt_state *pkt) | ||
| 228 | { | ||
| 229 | struct sk_buff *skb; | ||
| 230 | unsigned int gso_type; | ||
| 231 | |||
| 232 | skb = xenvif_rx_dequeue(queue); | ||
| 233 | |||
| 234 | queue->stats.tx_bytes += skb->len; | ||
| 235 | queue->stats.tx_packets++; | ||
| 236 | |||
| 237 | /* Reset packet state. */ | ||
| 238 | memset(pkt, 0, sizeof(struct xenvif_pkt_state)); | ||
| 239 | |||
| 240 | pkt->skb = skb; | ||
| 241 | pkt->frag_iter = skb; | ||
| 242 | pkt->remaining_len = skb->len; | ||
| 243 | pkt->frag = -1; | ||
| 244 | |||
| 245 | gso_type = xenvif_gso_type(skb); | ||
| 246 | if ((1 << gso_type) & queue->vif->gso_mask) { | ||
| 247 | struct xen_netif_extra_info *extra; | ||
| 248 | |||
| 249 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | ||
| 250 | |||
| 251 | extra->u.gso.type = gso_type; | ||
| 252 | extra->u.gso.size = skb_shinfo(skb)->gso_size; | ||
| 253 | extra->u.gso.pad = 0; | ||
| 254 | extra->u.gso.features = 0; | ||
| 255 | extra->type = XEN_NETIF_EXTRA_TYPE_GSO; | ||
| 256 | extra->flags = 0; | ||
| 257 | |||
| 258 | pkt->extra_count++; | ||
| 259 | } | ||
| 260 | |||
| 261 | if (skb->sw_hash) { | ||
| 262 | struct xen_netif_extra_info *extra; | ||
| 263 | |||
| 264 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; | ||
| 265 | |||
| 266 | extra->u.hash.algorithm = | ||
| 267 | XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ; | ||
| 268 | |||
| 269 | if (skb->l4_hash) | ||
| 270 | extra->u.hash.type = | ||
| 271 | skb->protocol == htons(ETH_P_IP) ? | ||
| 272 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP : | ||
| 273 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP; | ||
| 274 | else | ||
| 275 | extra->u.hash.type = | ||
| 276 | skb->protocol == htons(ETH_P_IP) ? | ||
| 277 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4 : | ||
| 278 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6; | ||
| 279 | |||
| 280 | *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb); | ||
| 281 | |||
| 282 | extra->type = XEN_NETIF_EXTRA_TYPE_HASH; | ||
| 283 | extra->flags = 0; | ||
| 284 | |||
| 285 | pkt->extra_count++; | ||
| 286 | } | ||
| 287 | } | ||
| 288 | |||
| 289 | static void xenvif_rx_complete(struct xenvif_queue *queue, | ||
| 290 | struct xenvif_pkt_state *pkt) | ||
| 291 | { | ||
| 292 | /* All responses are ready to be pushed. */ | ||
| 293 | queue->rx.rsp_prod_pvt = queue->rx.req_cons; | ||
| 294 | |||
| 295 | __skb_queue_tail(queue->rx_copy.completed, pkt->skb); | ||
| 296 | } | ||
| 297 | |||
| 298 | static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt) | ||
| 299 | { | ||
| 300 | struct sk_buff *frag_iter = pkt->frag_iter; | ||
| 301 | unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags; | ||
| 302 | |||
| 303 | pkt->frag++; | ||
| 304 | pkt->frag_offset = 0; | ||
| 305 | |||
| 306 | if (pkt->frag >= nr_frags) { | ||
| 307 | if (frag_iter == pkt->skb) | ||
| 308 | pkt->frag_iter = skb_shinfo(frag_iter)->frag_list; | ||
| 309 | else | ||
| 310 | pkt->frag_iter = frag_iter->next; | ||
| 311 | |||
| 312 | pkt->frag = -1; | ||
| 313 | } | ||
| 314 | } | ||
| 315 | |||
| 316 | static void xenvif_rx_next_chunk(struct xenvif_queue *queue, | ||
| 317 | struct xenvif_pkt_state *pkt, | ||
| 318 | unsigned int offset, void **data, | ||
| 319 | size_t *len) | ||
| 320 | { | ||
| 321 | struct sk_buff *frag_iter = pkt->frag_iter; | ||
| 322 | void *frag_data; | ||
| 323 | size_t frag_len, chunk_len; | ||
| 324 | |||
| 325 | BUG_ON(!frag_iter); | ||
| 326 | |||
| 327 | if (pkt->frag == -1) { | ||
| 328 | frag_data = frag_iter->data; | ||
| 329 | frag_len = skb_headlen(frag_iter); | ||
| 330 | } else { | ||
| 331 | skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag]; | ||
| 332 | |||
| 333 | frag_data = skb_frag_address(frag); | ||
| 334 | frag_len = skb_frag_size(frag); | ||
| 335 | } | ||
| 336 | |||
| 337 | frag_data += pkt->frag_offset; | ||
| 338 | frag_len -= pkt->frag_offset; | ||
| 339 | |||
| 340 | chunk_len = min(frag_len, XEN_PAGE_SIZE - offset); | ||
| 341 | chunk_len = min(chunk_len, | ||
| 342 | XEN_PAGE_SIZE - xen_offset_in_page(frag_data)); | ||
| 343 | |||
| 344 | pkt->frag_offset += chunk_len; | ||
| 345 | |||
| 346 | /* Advance to next frag? */ | ||
| 347 | if (frag_len == chunk_len) | ||
| 348 | xenvif_rx_next_frag(pkt); | ||
| 349 | |||
| 350 | *data = frag_data; | ||
| 351 | *len = chunk_len; | ||
| 352 | } | ||
| 353 | |||
| 354 | static void xenvif_rx_data_slot(struct xenvif_queue *queue, | ||
| 355 | struct xenvif_pkt_state *pkt, | ||
| 356 | struct xen_netif_rx_request *req, | ||
| 357 | struct xen_netif_rx_response *rsp) | ||
| 358 | { | ||
| 359 | unsigned int offset = 0; | ||
| 360 | unsigned int flags; | ||
| 361 | |||
| 362 | do { | ||
| 363 | size_t len; | ||
| 364 | void *data; | ||
| 365 | |||
| 366 | xenvif_rx_next_chunk(queue, pkt, offset, &data, &len); | ||
| 367 | xenvif_rx_copy_add(queue, req, offset, data, len); | ||
| 368 | |||
| 369 | offset += len; | ||
| 370 | pkt->remaining_len -= len; | ||
| 371 | |||
| 372 | } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0); | ||
| 373 | |||
| 374 | if (pkt->remaining_len > 0) | ||
| 375 | flags = XEN_NETRXF_more_data; | ||
| 376 | else | ||
| 377 | flags = 0; | ||
| 378 | |||
| 379 | if (pkt->slot == 0) { | ||
| 380 | struct sk_buff *skb = pkt->skb; | ||
| 381 | |||
| 382 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
| 383 | flags |= XEN_NETRXF_csum_blank | | ||
| 384 | XEN_NETRXF_data_validated; | ||
| 385 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | ||
| 386 | flags |= XEN_NETRXF_data_validated; | ||
| 387 | |||
| 388 | if (pkt->extra_count != 0) | ||
| 389 | flags |= XEN_NETRXF_extra_info; | ||
| 390 | } | ||
| 391 | |||
| 392 | rsp->offset = 0; | ||
| 393 | rsp->flags = flags; | ||
| 394 | rsp->id = req->id; | ||
| 395 | rsp->status = (s16)offset; | ||
| 396 | } | ||
| 397 | |||
| 398 | static void xenvif_rx_extra_slot(struct xenvif_queue *queue, | ||
| 399 | struct xenvif_pkt_state *pkt, | ||
| 400 | struct xen_netif_rx_request *req, | ||
| 401 | struct xen_netif_rx_response *rsp) | ||
| 402 | { | ||
| 403 | struct xen_netif_extra_info *extra = (void *)rsp; | ||
| 404 | unsigned int i; | ||
| 405 | |||
| 406 | pkt->extra_count--; | ||
| 407 | |||
| 408 | for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) { | ||
| 409 | if (pkt->extras[i].type) { | ||
| 410 | *extra = pkt->extras[i]; | ||
| 411 | |||
| 412 | if (pkt->extra_count != 0) | ||
| 413 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; | ||
| 414 | |||
| 415 | pkt->extras[i].type = 0; | ||
| 416 | return; | ||
| 417 | } | ||
| 418 | } | ||
| 419 | BUG(); | ||
| 420 | } | ||
| 421 | |||
| 422 | void xenvif_rx_skb(struct xenvif_queue *queue) | ||
| 423 | { | ||
| 424 | struct xenvif_pkt_state pkt; | ||
| 425 | |||
| 426 | xenvif_rx_next_skb(queue, &pkt); | ||
| 427 | |||
| 428 | do { | ||
| 429 | struct xen_netif_rx_request *req; | ||
| 430 | struct xen_netif_rx_response *rsp; | ||
| 431 | |||
| 432 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); | ||
| 433 | rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); | ||
| 434 | |||
| 435 | /* Extras must go after the first data slot */ | ||
| 436 | if (pkt.slot != 0 && pkt.extra_count != 0) | ||
| 437 | xenvif_rx_extra_slot(queue, &pkt, req, rsp); | ||
| 438 | else | ||
| 439 | xenvif_rx_data_slot(queue, &pkt, req, rsp); | ||
| 440 | |||
| 441 | queue->rx.req_cons++; | ||
| 442 | pkt.slot++; | ||
| 443 | } while (pkt.remaining_len > 0 || pkt.extra_count != 0); | ||
| 444 | |||
| 445 | xenvif_rx_complete(queue, &pkt); | ||
| 446 | } | ||
| 447 | |||
| 448 | #define RX_BATCH_SIZE 64 | ||
| 449 | |||
| 450 | void xenvif_rx_action(struct xenvif_queue *queue) | ||
| 451 | { | ||
| 452 | struct sk_buff_head completed_skbs; | ||
| 453 | unsigned int work_done = 0; | ||
| 454 | |||
| 455 | __skb_queue_head_init(&completed_skbs); | ||
| 456 | queue->rx_copy.completed = &completed_skbs; | ||
| 457 | |||
| 458 | while (xenvif_rx_ring_slots_available(queue) && | ||
| 459 | work_done < RX_BATCH_SIZE) { | ||
| 460 | xenvif_rx_skb(queue); | ||
| 461 | work_done++; | ||
| 462 | } | ||
| 463 | |||
| 464 | /* Flush any pending copies and complete all skbs. */ | ||
| 465 | xenvif_rx_copy_flush(queue); | ||
| 466 | } | ||
| 467 | |||
| 468 | static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) | ||
| 469 | { | ||
| 470 | RING_IDX prod, cons; | ||
| 471 | |||
| 472 | prod = queue->rx.sring->req_prod; | ||
| 473 | cons = queue->rx.req_cons; | ||
| 474 | |||
| 475 | return !queue->stalled && | ||
| 476 | prod - cons < 1 && | ||
| 477 | time_after(jiffies, | ||
| 478 | queue->last_rx_time + queue->vif->stall_timeout); | ||
| 479 | } | ||
| 480 | |||
| 481 | static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) | ||
| 482 | { | ||
| 483 | RING_IDX prod, cons; | ||
| 484 | |||
| 485 | prod = queue->rx.sring->req_prod; | ||
| 486 | cons = queue->rx.req_cons; | ||
| 487 | |||
| 488 | return queue->stalled && prod - cons >= 1; | ||
| 489 | } | ||
| 490 | |||
| 491 | static bool xenvif_have_rx_work(struct xenvif_queue *queue) | ||
| 492 | { | ||
| 493 | return xenvif_rx_ring_slots_available(queue) || | ||
| 494 | (queue->vif->stall_timeout && | ||
| 495 | (xenvif_rx_queue_stalled(queue) || | ||
| 496 | xenvif_rx_queue_ready(queue))) || | ||
| 497 | kthread_should_stop() || | ||
| 498 | queue->vif->disabled; | ||
| 499 | } | ||
| 500 | |||
| 501 | static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) | ||
| 502 | { | ||
| 503 | struct sk_buff *skb; | ||
| 504 | long timeout; | ||
| 505 | |||
| 506 | skb = skb_peek(&queue->rx_queue); | ||
| 507 | if (!skb) | ||
| 508 | return MAX_SCHEDULE_TIMEOUT; | ||
| 509 | |||
| 510 | timeout = XENVIF_RX_CB(skb)->expires - jiffies; | ||
| 511 | return timeout < 0 ? 0 : timeout; | ||
| 512 | } | ||
| 513 | |||
| 514 | /* Wait until the guest Rx thread has work. | ||
| 515 | * | ||
| 516 | * The timeout needs to be adjusted based on the current head of the | ||
| 517 | * queue (and not just the head at the beginning). In particular, if | ||
| 518 | * the queue is initially empty an infinite timeout is used and this | ||
| 519 | * needs to be reduced when a skb is queued. | ||
| 520 | * | ||
| 521 | * This cannot be done with wait_event_timeout() because it only | ||
| 522 | * calculates the timeout once. | ||
| 523 | */ | ||
| 524 | static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) | ||
| 525 | { | ||
| 526 | DEFINE_WAIT(wait); | ||
| 527 | |||
| 528 | if (xenvif_have_rx_work(queue)) | ||
| 529 | return; | ||
| 530 | |||
| 531 | for (;;) { | ||
| 532 | long ret; | ||
| 533 | |||
| 534 | prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); | ||
| 535 | if (xenvif_have_rx_work(queue)) | ||
| 536 | break; | ||
| 537 | ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); | ||
| 538 | if (!ret) | ||
| 539 | break; | ||
| 540 | } | ||
| 541 | finish_wait(&queue->wq, &wait); | ||
| 542 | } | ||
| 543 | |||
| 544 | static void xenvif_queue_carrier_off(struct xenvif_queue *queue) | ||
| 545 | { | ||
| 546 | struct xenvif *vif = queue->vif; | ||
| 547 | |||
| 548 | queue->stalled = true; | ||
| 549 | |||
| 550 | /* At least one queue has stalled? Disable the carrier. */ | ||
| 551 | spin_lock(&vif->lock); | ||
| 552 | if (vif->stalled_queues++ == 0) { | ||
| 553 | netdev_info(vif->dev, "Guest Rx stalled"); | ||
| 554 | netif_carrier_off(vif->dev); | ||
| 555 | } | ||
| 556 | spin_unlock(&vif->lock); | ||
| 557 | } | ||
| 558 | |||
| 559 | static void xenvif_queue_carrier_on(struct xenvif_queue *queue) | ||
| 560 | { | ||
| 561 | struct xenvif *vif = queue->vif; | ||
| 562 | |||
| 563 | queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ | ||
| 564 | queue->stalled = false; | ||
| 565 | |||
| 566 | /* All queues are ready? Enable the carrier. */ | ||
| 567 | spin_lock(&vif->lock); | ||
| 568 | if (--vif->stalled_queues == 0) { | ||
| 569 | netdev_info(vif->dev, "Guest Rx ready"); | ||
| 570 | netif_carrier_on(vif->dev); | ||
| 571 | } | ||
| 572 | spin_unlock(&vif->lock); | ||
| 573 | } | ||
| 574 | |||
| 575 | int xenvif_kthread_guest_rx(void *data) | ||
| 576 | { | ||
| 577 | struct xenvif_queue *queue = data; | ||
| 578 | struct xenvif *vif = queue->vif; | ||
| 579 | |||
| 580 | if (!vif->stall_timeout) | ||
| 581 | xenvif_queue_carrier_on(queue); | ||
| 582 | |||
| 583 | for (;;) { | ||
| 584 | xenvif_wait_for_rx_work(queue); | ||
| 585 | |||
| 586 | if (kthread_should_stop()) | ||
| 587 | break; | ||
| 588 | |||
| 589 | /* This frontend is found to be rogue, disable it in | ||
| 590 | * kthread context. Currently this is only set when | ||
| 591 | * netback finds out frontend sends malformed packet, | ||
| 592 | * but we cannot disable the interface in softirq | ||
| 593 | * context so we defer it here, if this thread is | ||
| 594 | * associated with queue 0. | ||
| 595 | */ | ||
| 596 | if (unlikely(vif->disabled && queue->id == 0)) { | ||
| 597 | xenvif_carrier_off(vif); | ||
| 598 | break; | ||
| 599 | } | ||
| 600 | |||
| 601 | if (!skb_queue_empty(&queue->rx_queue)) | ||
| 602 | xenvif_rx_action(queue); | ||
| 603 | |||
| 604 | /* If the guest hasn't provided any Rx slots for a | ||
| 605 | * while it's probably not responsive, drop the | ||
| 606 | * carrier so packets are dropped earlier. | ||
| 607 | */ | ||
| 608 | if (vif->stall_timeout) { | ||
| 609 | if (xenvif_rx_queue_stalled(queue)) | ||
| 610 | xenvif_queue_carrier_off(queue); | ||
| 611 | else if (xenvif_rx_queue_ready(queue)) | ||
| 612 | xenvif_queue_carrier_on(queue); | ||
| 613 | } | ||
| 614 | |||
| 615 | /* Queued packets may have foreign pages from other | ||
| 616 | * domains. These cannot be queued indefinitely as | ||
| 617 | * this would starve guests of grant refs and transmit | ||
| 618 | * slots. | ||
| 619 | */ | ||
| 620 | xenvif_rx_queue_drop_expired(queue); | ||
| 621 | |||
| 622 | cond_resched(); | ||
| 623 | } | ||
| 624 | |||
| 625 | /* Bin any remaining skbs */ | ||
| 626 | xenvif_rx_queue_purge(queue); | ||
| 627 | |||
| 628 | return 0; | ||
| 629 | } | ||
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index daf4c7867102..7056404e3cb8 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
| @@ -1135,7 +1135,6 @@ static int read_xenbus_vif_flags(struct backend_info *be) | |||
| 1135 | vif->can_sg = !!val; | 1135 | vif->can_sg = !!val; |
| 1136 | 1136 | ||
| 1137 | vif->gso_mask = 0; | 1137 | vif->gso_mask = 0; |
| 1138 | vif->gso_prefix_mask = 0; | ||
| 1139 | 1138 | ||
| 1140 | if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", | 1139 | if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", |
| 1141 | "%d", &val) < 0) | 1140 | "%d", &val) < 0) |
| @@ -1143,32 +1142,12 @@ static int read_xenbus_vif_flags(struct backend_info *be) | |||
| 1143 | if (val) | 1142 | if (val) |
| 1144 | vif->gso_mask |= GSO_BIT(TCPV4); | 1143 | vif->gso_mask |= GSO_BIT(TCPV4); |
| 1145 | 1144 | ||
| 1146 | if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", | ||
| 1147 | "%d", &val) < 0) | ||
| 1148 | val = 0; | ||
| 1149 | if (val) | ||
| 1150 | vif->gso_prefix_mask |= GSO_BIT(TCPV4); | ||
| 1151 | |||
| 1152 | if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", | 1145 | if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", |
| 1153 | "%d", &val) < 0) | 1146 | "%d", &val) < 0) |
| 1154 | val = 0; | 1147 | val = 0; |
| 1155 | if (val) | 1148 | if (val) |
| 1156 | vif->gso_mask |= GSO_BIT(TCPV6); | 1149 | vif->gso_mask |= GSO_BIT(TCPV6); |
| 1157 | 1150 | ||
| 1158 | if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", | ||
| 1159 | "%d", &val) < 0) | ||
| 1160 | val = 0; | ||
| 1161 | if (val) | ||
| 1162 | vif->gso_prefix_mask |= GSO_BIT(TCPV6); | ||
| 1163 | |||
| 1164 | if (vif->gso_mask & vif->gso_prefix_mask) { | ||
| 1165 | xenbus_dev_fatal(dev, err, | ||
| 1166 | "%s: gso and gso prefix flags are not " | ||
| 1167 | "mutually exclusive", | ||
| 1168 | dev->otherend); | ||
| 1169 | return -EOPNOTSUPP; | ||
| 1170 | } | ||
| 1171 | |||
| 1172 | if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", | 1151 | if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", |
| 1173 | "%d", &val) < 0) | 1152 | "%d", &val) < 0) |
| 1174 | val = 0; | 1153 | val = 0; |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 59bdaa7527b6..477928b25940 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
| @@ -418,7 +418,7 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
| 418 | &call->abort_code); | 418 | &call->abort_code); |
| 419 | if (ret == -EINPROGRESS || ret == -EAGAIN) | 419 | if (ret == -EINPROGRESS || ret == -EAGAIN) |
| 420 | return; | 420 | return; |
| 421 | if (ret == 1) { | 421 | if (ret == 1 || ret < 0) { |
| 422 | call->state = AFS_CALL_COMPLETE; | 422 | call->state = AFS_CALL_COMPLETE; |
| 423 | goto done; | 423 | goto done; |
| 424 | } | 424 | } |
diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 174f43f43aff..c05216a8fbac 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h | |||
| @@ -245,7 +245,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team, | |||
| 245 | return NULL; | 245 | return NULL; |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | static inline int team_num_to_port_index(struct team *team, int num) | 248 | static inline int team_num_to_port_index(struct team *team, unsigned int num) |
| 249 | { | 249 | { |
| 250 | int en_port_count = ACCESS_ONCE(team->en_port_count); | 250 | int en_port_count = ACCESS_ONCE(team->en_port_count); |
| 251 | 251 | ||
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index c8135680c43e..e2288421fe6b 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c | |||
| @@ -21,8 +21,6 @@ | |||
| 21 | SOFTWARE IS DISCLAIMED. | 21 | SOFTWARE IS DISCLAIMED. |
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | #include <asm/unaligned.h> | ||
| 25 | |||
| 26 | #include <net/bluetooth/bluetooth.h> | 24 | #include <net/bluetooth/bluetooth.h> |
| 27 | #include <net/bluetooth/hci_core.h> | 25 | #include <net/bluetooth/hci_core.h> |
| 28 | #include <net/bluetooth/mgmt.h> | 26 | #include <net/bluetooth/mgmt.h> |
| @@ -973,33 +971,58 @@ void __hci_req_enable_advertising(struct hci_request *req) | |||
| 973 | 971 | ||
| 974 | static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) | 972 | static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) |
| 975 | { | 973 | { |
| 976 | size_t name_len; | 974 | size_t complete_len; |
| 975 | size_t short_len; | ||
| 977 | int max_len; | 976 | int max_len; |
| 978 | 977 | ||
| 979 | max_len = HCI_MAX_AD_LENGTH - ad_len - 2; | 978 | max_len = HCI_MAX_AD_LENGTH - ad_len - 2; |
| 980 | name_len = strlen(hdev->dev_name); | 979 | complete_len = strlen(hdev->dev_name); |
| 981 | if (name_len > 0 && max_len > 0) { | 980 | short_len = strlen(hdev->short_name); |
| 982 | 981 | ||
| 983 | if (name_len > max_len) { | 982 | /* no space left for name */ |
| 984 | name_len = max_len; | 983 | if (max_len < 1) |
| 985 | ptr[1] = EIR_NAME_SHORT; | 984 | return ad_len; |
| 986 | } else | 985 | |
| 987 | ptr[1] = EIR_NAME_COMPLETE; | 986 | /* no name set */ |
| 988 | 987 | if (!complete_len) | |
| 989 | ptr[0] = name_len + 1; | 988 | return ad_len; |
| 989 | |||
| 990 | /* complete name fits and is eq to max short name len or smaller */ | ||
| 991 | if (complete_len <= max_len && | ||
| 992 | complete_len <= HCI_MAX_SHORT_NAME_LENGTH) { | ||
| 993 | return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, | ||
| 994 | hdev->dev_name, complete_len); | ||
| 995 | } | ||
| 990 | 996 | ||
| 991 | memcpy(ptr + 2, hdev->dev_name, name_len); | 997 | /* short name set and fits */ |
| 998 | if (short_len && short_len <= max_len) { | ||
| 999 | return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, | ||
| 1000 | hdev->short_name, short_len); | ||
| 1001 | } | ||
| 992 | 1002 | ||
| 993 | ad_len += (name_len + 2); | 1003 | /* no short name set so shorten complete name */ |
| 994 | ptr += (name_len + 2); | 1004 | if (!short_len) { |
| 1005 | return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, | ||
| 1006 | hdev->dev_name, max_len); | ||
| 995 | } | 1007 | } |
| 996 | 1008 | ||
| 997 | return ad_len; | 1009 | return ad_len; |
| 998 | } | 1010 | } |
| 999 | 1011 | ||
| 1012 | static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) | ||
| 1013 | { | ||
| 1014 | return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); | ||
| 1015 | } | ||
| 1016 | |||
| 1000 | static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) | 1017 | static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) |
| 1001 | { | 1018 | { |
| 1002 | return append_local_name(hdev, ptr, 0); | 1019 | u8 scan_rsp_len = 0; |
| 1020 | |||
| 1021 | if (hdev->appearance) { | ||
| 1022 | scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | return append_local_name(hdev, ptr, scan_rsp_len); | ||
| 1003 | } | 1026 | } |
| 1004 | 1027 | ||
| 1005 | static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, | 1028 | static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, |
| @@ -1016,18 +1039,13 @@ static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, | |||
| 1016 | instance_flags = adv_instance->flags; | 1039 | instance_flags = adv_instance->flags; |
| 1017 | 1040 | ||
| 1018 | if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) { | 1041 | if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) { |
| 1019 | ptr[0] = 3; | 1042 | scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); |
| 1020 | ptr[1] = EIR_APPEARANCE; | ||
| 1021 | put_unaligned_le16(hdev->appearance, ptr + 2); | ||
| 1022 | scan_rsp_len += 4; | ||
| 1023 | ptr += 4; | ||
| 1024 | } | 1043 | } |
| 1025 | 1044 | ||
| 1026 | memcpy(ptr, adv_instance->scan_rsp_data, | 1045 | memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data, |
| 1027 | adv_instance->scan_rsp_len); | 1046 | adv_instance->scan_rsp_len); |
| 1028 | 1047 | ||
| 1029 | scan_rsp_len += adv_instance->scan_rsp_len; | 1048 | scan_rsp_len += adv_instance->scan_rsp_len; |
| 1030 | ptr += adv_instance->scan_rsp_len; | ||
| 1031 | 1049 | ||
| 1032 | if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME) | 1050 | if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME) |
| 1033 | scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len); | 1051 | scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len); |
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index ac1e11006f38..6b06629245a8 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | SOFTWARE IS DISCLAIMED. | 20 | SOFTWARE IS DISCLAIMED. |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <asm/unaligned.h> | ||
| 24 | |||
| 23 | #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) | 25 | #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) |
| 24 | #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) | 26 | #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) |
| 25 | 27 | ||
| @@ -103,3 +105,24 @@ static inline void hci_update_background_scan(struct hci_dev *hdev) | |||
| 103 | 105 | ||
| 104 | void hci_request_setup(struct hci_dev *hdev); | 106 | void hci_request_setup(struct hci_dev *hdev); |
| 105 | void hci_request_cancel_all(struct hci_dev *hdev); | 107 | void hci_request_cancel_all(struct hci_dev *hdev); |
| 108 | |||
| 109 | static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, | ||
| 110 | u8 *data, u8 data_len) | ||
| 111 | { | ||
| 112 | eir[eir_len++] = sizeof(type) + data_len; | ||
| 113 | eir[eir_len++] = type; | ||
| 114 | memcpy(&eir[eir_len], data, data_len); | ||
| 115 | eir_len += data_len; | ||
| 116 | |||
| 117 | return eir_len; | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) | ||
| 121 | { | ||
| 122 | eir[eir_len++] = sizeof(type) + sizeof(data); | ||
| 123 | eir[eir_len++] = type; | ||
| 124 | put_unaligned_le16(data, &eir[eir_len]); | ||
| 125 | eir_len += sizeof(data); | ||
| 126 | |||
| 127 | return eir_len; | ||
| 128 | } | ||
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 19b8a5e9420d..736038085feb 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
| @@ -867,27 +867,6 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev, | |||
| 867 | sizeof(rp)); | 867 | sizeof(rp)); |
| 868 | } | 868 | } |
| 869 | 869 | ||
| 870 | static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, | ||
| 871 | u8 data_len) | ||
| 872 | { | ||
| 873 | eir[eir_len++] = sizeof(type) + data_len; | ||
| 874 | eir[eir_len++] = type; | ||
| 875 | memcpy(&eir[eir_len], data, data_len); | ||
| 876 | eir_len += data_len; | ||
| 877 | |||
| 878 | return eir_len; | ||
| 879 | } | ||
| 880 | |||
| 881 | static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) | ||
| 882 | { | ||
| 883 | eir[eir_len++] = sizeof(type) + sizeof(data); | ||
| 884 | eir[eir_len++] = type; | ||
| 885 | put_unaligned_le16(data, &eir[eir_len]); | ||
| 886 | eir_len += sizeof(data); | ||
| 887 | |||
| 888 | return eir_len; | ||
| 889 | } | ||
| 890 | |||
| 891 | static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir) | 870 | static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir) |
| 892 | { | 871 | { |
| 893 | u16 eir_len = 0; | 872 | u16 eir_len = 0; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index cbd9343751a2..d8983e15f859 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -5729,6 +5729,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl, | |||
| 5729 | return ret; | 5729 | return ret; |
| 5730 | } | 5730 | } |
| 5731 | 5731 | ||
| 5732 | static int minus_one = -1; | ||
| 5732 | static const int one = 1; | 5733 | static const int one = 1; |
| 5733 | static const int two_five_five = 255; | 5734 | static const int two_five_five = 255; |
| 5734 | 5735 | ||
| @@ -5789,7 +5790,8 @@ static const struct ctl_table addrconf_sysctl[] = { | |||
| 5789 | .data = &ipv6_devconf.rtr_solicits, | 5790 | .data = &ipv6_devconf.rtr_solicits, |
| 5790 | .maxlen = sizeof(int), | 5791 | .maxlen = sizeof(int), |
| 5791 | .mode = 0644, | 5792 | .mode = 0644, |
| 5792 | .proc_handler = proc_dointvec, | 5793 | .proc_handler = proc_dointvec_minmax, |
| 5794 | .extra1 = &minus_one, | ||
| 5793 | }, | 5795 | }, |
| 5794 | { | 5796 | { |
| 5795 | .procname = "router_solicitation_interval", | 5797 | .procname = "router_solicitation_interval", |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index c9d90eb64046..fcb5d1df11e9 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
| @@ -65,49 +65,24 @@ static DEFINE_MUTEX(nf_hook_mutex); | |||
| 65 | #define nf_entry_dereference(e) \ | 65 | #define nf_entry_dereference(e) \ |
| 66 | rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex)) | 66 | rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex)) |
| 67 | 67 | ||
| 68 | static struct nf_hook_entry *nf_hook_entry_head(struct net *net, | 68 | static struct nf_hook_entry __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg) |
| 69 | const struct nf_hook_ops *reg) | ||
| 70 | { | 69 | { |
| 71 | struct nf_hook_entry *hook_head = NULL; | ||
| 72 | |||
| 73 | if (reg->pf != NFPROTO_NETDEV) | 70 | if (reg->pf != NFPROTO_NETDEV) |
| 74 | hook_head = nf_entry_dereference(net->nf.hooks[reg->pf] | 71 | return net->nf.hooks[reg->pf]+reg->hooknum; |
| 75 | [reg->hooknum]); | 72 | |
| 76 | else if (reg->hooknum == NF_NETDEV_INGRESS) { | ||
| 77 | #ifdef CONFIG_NETFILTER_INGRESS | 73 | #ifdef CONFIG_NETFILTER_INGRESS |
| 74 | if (reg->hooknum == NF_NETDEV_INGRESS) { | ||
| 78 | if (reg->dev && dev_net(reg->dev) == net) | 75 | if (reg->dev && dev_net(reg->dev) == net) |
| 79 | hook_head = | 76 | return ®->dev->nf_hooks_ingress; |
| 80 | nf_entry_dereference( | ||
| 81 | reg->dev->nf_hooks_ingress); | ||
| 82 | #endif | ||
| 83 | } | 77 | } |
| 84 | return hook_head; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* must hold nf_hook_mutex */ | ||
| 88 | static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg, | ||
| 89 | struct nf_hook_entry *entry) | ||
| 90 | { | ||
| 91 | switch (reg->pf) { | ||
| 92 | case NFPROTO_NETDEV: | ||
| 93 | #ifdef CONFIG_NETFILTER_INGRESS | ||
| 94 | /* We already checked in nf_register_net_hook() that this is | ||
| 95 | * used from ingress. | ||
| 96 | */ | ||
| 97 | rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry); | ||
| 98 | #endif | 78 | #endif |
| 99 | break; | 79 | return NULL; |
| 100 | default: | ||
| 101 | rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum], | ||
| 102 | entry); | ||
| 103 | break; | ||
| 104 | } | ||
| 105 | } | 80 | } |
| 106 | 81 | ||
| 107 | int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) | 82 | int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) |
| 108 | { | 83 | { |
| 109 | struct nf_hook_entry *hooks_entry; | 84 | struct nf_hook_entry __rcu **pp; |
| 110 | struct nf_hook_entry *entry; | 85 | struct nf_hook_entry *entry, *p; |
| 111 | 86 | ||
| 112 | if (reg->pf == NFPROTO_NETDEV) { | 87 | if (reg->pf == NFPROTO_NETDEV) { |
| 113 | #ifndef CONFIG_NETFILTER_INGRESS | 88 | #ifndef CONFIG_NETFILTER_INGRESS |
| @@ -119,6 +94,10 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) | |||
| 119 | return -EINVAL; | 94 | return -EINVAL; |
| 120 | } | 95 | } |
| 121 | 96 | ||
| 97 | pp = nf_hook_entry_head(net, reg); | ||
| 98 | if (!pp) | ||
| 99 | return -EINVAL; | ||
| 100 | |||
| 122 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 101 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
| 123 | if (!entry) | 102 | if (!entry) |
| 124 | return -ENOMEM; | 103 | return -ENOMEM; |
| @@ -128,26 +107,15 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) | |||
| 128 | entry->next = NULL; | 107 | entry->next = NULL; |
| 129 | 108 | ||
| 130 | mutex_lock(&nf_hook_mutex); | 109 | mutex_lock(&nf_hook_mutex); |
| 131 | hooks_entry = nf_hook_entry_head(net, reg); | ||
| 132 | |||
| 133 | if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) { | ||
| 134 | /* This is the case where we need to insert at the head */ | ||
| 135 | entry->next = hooks_entry; | ||
| 136 | hooks_entry = NULL; | ||
| 137 | } | ||
| 138 | |||
| 139 | while (hooks_entry && | ||
| 140 | reg->priority >= hooks_entry->orig_ops->priority && | ||
| 141 | nf_entry_dereference(hooks_entry->next)) { | ||
| 142 | hooks_entry = nf_entry_dereference(hooks_entry->next); | ||
| 143 | } | ||
| 144 | 110 | ||
| 145 | if (hooks_entry) { | 111 | /* Find the spot in the list */ |
| 146 | entry->next = nf_entry_dereference(hooks_entry->next); | 112 | while ((p = nf_entry_dereference(*pp)) != NULL) { |
| 147 | rcu_assign_pointer(hooks_entry->next, entry); | 113 | if (reg->priority < p->orig_ops->priority) |
| 148 | } else { | 114 | break; |
| 149 | nf_set_hooks_head(net, reg, entry); | 115 | pp = &p->next; |
| 150 | } | 116 | } |
| 117 | rcu_assign_pointer(entry->next, p); | ||
| 118 | rcu_assign_pointer(*pp, entry); | ||
| 151 | 119 | ||
| 152 | mutex_unlock(&nf_hook_mutex); | 120 | mutex_unlock(&nf_hook_mutex); |
| 153 | #ifdef CONFIG_NETFILTER_INGRESS | 121 | #ifdef CONFIG_NETFILTER_INGRESS |
| @@ -163,33 +131,23 @@ EXPORT_SYMBOL(nf_register_net_hook); | |||
| 163 | 131 | ||
| 164 | void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) | 132 | void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) |
| 165 | { | 133 | { |
| 166 | struct nf_hook_entry *hooks_entry; | 134 | struct nf_hook_entry __rcu **pp; |
| 135 | struct nf_hook_entry *p; | ||
| 167 | 136 | ||
| 168 | mutex_lock(&nf_hook_mutex); | 137 | pp = nf_hook_entry_head(net, reg); |
| 169 | hooks_entry = nf_hook_entry_head(net, reg); | 138 | if (WARN_ON_ONCE(!pp)) |
| 170 | if (hooks_entry && hooks_entry->orig_ops == reg) { | 139 | return; |
| 171 | nf_set_hooks_head(net, reg, | ||
| 172 | nf_entry_dereference(hooks_entry->next)); | ||
| 173 | goto unlock; | ||
| 174 | } | ||
| 175 | while (hooks_entry && nf_entry_dereference(hooks_entry->next)) { | ||
| 176 | struct nf_hook_entry *next = | ||
| 177 | nf_entry_dereference(hooks_entry->next); | ||
| 178 | struct nf_hook_entry *nnext; | ||
| 179 | 140 | ||
| 180 | if (next->orig_ops != reg) { | 141 | mutex_lock(&nf_hook_mutex); |
| 181 | hooks_entry = next; | 142 | while ((p = nf_entry_dereference(*pp)) != NULL) { |
| 182 | continue; | 143 | if (p->orig_ops == reg) { |
| 144 | rcu_assign_pointer(*pp, p->next); | ||
| 145 | break; | ||
| 183 | } | 146 | } |
| 184 | nnext = nf_entry_dereference(next->next); | 147 | pp = &p->next; |
| 185 | rcu_assign_pointer(hooks_entry->next, nnext); | ||
| 186 | hooks_entry = next; | ||
| 187 | break; | ||
| 188 | } | 148 | } |
| 189 | |||
| 190 | unlock: | ||
| 191 | mutex_unlock(&nf_hook_mutex); | 149 | mutex_unlock(&nf_hook_mutex); |
| 192 | if (!hooks_entry) { | 150 | if (!p) { |
| 193 | WARN(1, "nf_unregister_net_hook: hook not found!\n"); | 151 | WARN(1, "nf_unregister_net_hook: hook not found!\n"); |
| 194 | return; | 152 | return; |
| 195 | } | 153 | } |
| @@ -201,10 +159,10 @@ unlock: | |||
| 201 | static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); | 159 | static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); |
| 202 | #endif | 160 | #endif |
| 203 | synchronize_net(); | 161 | synchronize_net(); |
| 204 | nf_queue_nf_hook_drop(net, hooks_entry); | 162 | nf_queue_nf_hook_drop(net, p); |
| 205 | /* other cpu might still process nfqueue verdict that used reg */ | 163 | /* other cpu might still process nfqueue verdict that used reg */ |
| 206 | synchronize_net(); | 164 | synchronize_net(); |
| 207 | kfree(hooks_entry); | 165 | kfree(p); |
| 208 | } | 166 | } |
| 209 | EXPORT_SYMBOL(nf_unregister_net_hook); | 167 | EXPORT_SYMBOL(nf_unregister_net_hook); |
| 210 | 168 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 627f898c05b9..62bea4591054 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -1832,7 +1832,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
| 1832 | /* Record the max length of recvmsg() calls for future allocations */ | 1832 | /* Record the max length of recvmsg() calls for future allocations */ |
| 1833 | nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len); | 1833 | nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len); |
| 1834 | nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len, | 1834 | nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len, |
| 1835 | 16384); | 1835 | SKB_WITH_OVERHEAD(32768)); |
| 1836 | 1836 | ||
| 1837 | copied = data_skb->len; | 1837 | copied = data_skb->len; |
| 1838 | if (len < copied) { | 1838 | if (len < copied) { |
| @@ -2083,8 +2083,9 @@ static int netlink_dump(struct sock *sk) | |||
| 2083 | 2083 | ||
| 2084 | if (alloc_min_size < nlk->max_recvmsg_len) { | 2084 | if (alloc_min_size < nlk->max_recvmsg_len) { |
| 2085 | alloc_size = nlk->max_recvmsg_len; | 2085 | alloc_size = nlk->max_recvmsg_len; |
| 2086 | skb = alloc_skb(alloc_size, GFP_KERNEL | | 2086 | skb = alloc_skb(alloc_size, |
| 2087 | __GFP_NOWARN | __GFP_NORETRY); | 2087 | (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) | |
| 2088 | __GFP_NOWARN | __GFP_NORETRY); | ||
| 2088 | } | 2089 | } |
| 2089 | if (!skb) { | 2090 | if (!skb) { |
| 2090 | alloc_size = alloc_min_size; | 2091 | alloc_size = alloc_min_size; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 33a4697d5539..11db0d619c00 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -3952,6 +3952,7 @@ static int packet_notifier(struct notifier_block *this, | |||
| 3952 | } | 3952 | } |
| 3953 | if (msg == NETDEV_UNREGISTER) { | 3953 | if (msg == NETDEV_UNREGISTER) { |
| 3954 | packet_cached_dev_reset(po); | 3954 | packet_cached_dev_reset(po); |
| 3955 | fanout_release(sk); | ||
| 3955 | po->ifindex = -1; | 3956 | po->ifindex = -1; |
| 3956 | if (po->prot_hook.dev) | 3957 | if (po->prot_hook.dev) |
| 3957 | dev_put(po->prot_hook.dev); | 3958 | dev_put(po->prot_hook.dev); |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 44c9c2b0b190..2d59c9be40e1 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
| @@ -678,9 +678,9 @@ static int rxrpc_release_sock(struct sock *sk) | |||
| 678 | sk->sk_state = RXRPC_CLOSE; | 678 | sk->sk_state = RXRPC_CLOSE; |
| 679 | spin_unlock_bh(&sk->sk_receive_queue.lock); | 679 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
| 680 | 680 | ||
| 681 | if (rx->local && rx->local->service == rx) { | 681 | if (rx->local && rcu_access_pointer(rx->local->service) == rx) { |
| 682 | write_lock(&rx->local->services_lock); | 682 | write_lock(&rx->local->services_lock); |
| 683 | rx->local->service = NULL; | 683 | rcu_assign_pointer(rx->local->service, NULL); |
| 684 | write_unlock(&rx->local->services_lock); | 684 | write_unlock(&rx->local->services_lock); |
| 685 | } | 685 | } |
| 686 | 686 | ||
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index d38dffd78085..f60e35576526 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
| @@ -398,6 +398,7 @@ enum rxrpc_call_flag { | |||
| 398 | RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ | 398 | RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ |
| 399 | RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ | 399 | RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ |
| 400 | RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ | 400 | RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ |
| 401 | RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ | ||
| 401 | RXRPC_CALL_PINGING, /* Ping in process */ | 402 | RXRPC_CALL_PINGING, /* Ping in process */ |
| 402 | RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ | 403 | RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ |
| 403 | }; | 404 | }; |
| @@ -410,6 +411,7 @@ enum rxrpc_call_event { | |||
| 410 | RXRPC_CALL_EV_ABORT, /* need to generate abort */ | 411 | RXRPC_CALL_EV_ABORT, /* need to generate abort */ |
| 411 | RXRPC_CALL_EV_TIMER, /* Timer expired */ | 412 | RXRPC_CALL_EV_TIMER, /* Timer expired */ |
| 412 | RXRPC_CALL_EV_RESEND, /* Tx resend required */ | 413 | RXRPC_CALL_EV_RESEND, /* Tx resend required */ |
| 414 | RXRPC_CALL_EV_PING, /* Ping send required */ | ||
| 413 | }; | 415 | }; |
| 414 | 416 | ||
| 415 | /* | 417 | /* |
| @@ -466,6 +468,7 @@ struct rxrpc_call { | |||
| 466 | struct rxrpc_sock __rcu *socket; /* socket responsible */ | 468 | struct rxrpc_sock __rcu *socket; /* socket responsible */ |
| 467 | ktime_t ack_at; /* When deferred ACK needs to happen */ | 469 | ktime_t ack_at; /* When deferred ACK needs to happen */ |
| 468 | ktime_t resend_at; /* When next resend needs to happen */ | 470 | ktime_t resend_at; /* When next resend needs to happen */ |
| 471 | ktime_t ping_at; /* When next to send a ping */ | ||
| 469 | ktime_t expire_at; /* When the call times out */ | 472 | ktime_t expire_at; /* When the call times out */ |
| 470 | struct timer_list timer; /* Combined event timer */ | 473 | struct timer_list timer; /* Combined event timer */ |
| 471 | struct work_struct processor; /* Event processor */ | 474 | struct work_struct processor; /* Event processor */ |
| @@ -558,8 +561,10 @@ struct rxrpc_call { | |||
| 558 | rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ | 561 | rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ |
| 559 | rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ | 562 | rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ |
| 560 | rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ | 563 | rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ |
| 561 | rxrpc_serial_t ackr_ping; /* Last ping sent */ | 564 | |
| 562 | ktime_t ackr_ping_time; /* Time last ping sent */ | 565 | /* ping management */ |
| 566 | rxrpc_serial_t ping_serial; /* Last ping sent */ | ||
| 567 | ktime_t ping_time; /* Time last ping sent */ | ||
| 563 | 568 | ||
| 564 | /* transmission-phase ACK management */ | 569 | /* transmission-phase ACK management */ |
| 565 | ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ | 570 | ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ |
| @@ -728,8 +733,10 @@ extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5]; | |||
| 728 | enum rxrpc_timer_trace { | 733 | enum rxrpc_timer_trace { |
| 729 | rxrpc_timer_begin, | 734 | rxrpc_timer_begin, |
| 730 | rxrpc_timer_init_for_reply, | 735 | rxrpc_timer_init_for_reply, |
| 736 | rxrpc_timer_init_for_send_reply, | ||
| 731 | rxrpc_timer_expired, | 737 | rxrpc_timer_expired, |
| 732 | rxrpc_timer_set_for_ack, | 738 | rxrpc_timer_set_for_ack, |
| 739 | rxrpc_timer_set_for_ping, | ||
| 733 | rxrpc_timer_set_for_resend, | 740 | rxrpc_timer_set_for_resend, |
| 734 | rxrpc_timer_set_for_send, | 741 | rxrpc_timer_set_for_send, |
| 735 | rxrpc_timer__nr_trace | 742 | rxrpc_timer__nr_trace |
| @@ -743,6 +750,7 @@ enum rxrpc_propose_ack_trace { | |||
| 743 | rxrpc_propose_ack_ping_for_lost_ack, | 750 | rxrpc_propose_ack_ping_for_lost_ack, |
| 744 | rxrpc_propose_ack_ping_for_lost_reply, | 751 | rxrpc_propose_ack_ping_for_lost_reply, |
| 745 | rxrpc_propose_ack_ping_for_params, | 752 | rxrpc_propose_ack_ping_for_params, |
| 753 | rxrpc_propose_ack_processing_op, | ||
| 746 | rxrpc_propose_ack_respond_to_ack, | 754 | rxrpc_propose_ack_respond_to_ack, |
| 747 | rxrpc_propose_ack_respond_to_ping, | 755 | rxrpc_propose_ack_respond_to_ping, |
| 748 | rxrpc_propose_ack_retry_tx, | 756 | rxrpc_propose_ack_retry_tx, |
| @@ -777,7 +785,7 @@ extern const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10]; | |||
| 777 | extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9]; | 785 | extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9]; |
| 778 | 786 | ||
| 779 | extern const char *const rxrpc_pkts[]; | 787 | extern const char *const rxrpc_pkts[]; |
| 780 | extern const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4]; | 788 | extern const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4]; |
| 781 | 789 | ||
| 782 | #include <trace/events/rxrpc.h> | 790 | #include <trace/events/rxrpc.h> |
| 783 | 791 | ||
| @@ -805,6 +813,7 @@ int rxrpc_reject_call(struct rxrpc_sock *); | |||
| 805 | /* | 813 | /* |
| 806 | * call_event.c | 814 | * call_event.c |
| 807 | */ | 815 | */ |
| 816 | void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); | ||
| 808 | void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); | 817 | void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); |
| 809 | void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, | 818 | void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, |
| 810 | enum rxrpc_propose_ack_trace); | 819 | enum rxrpc_propose_ack_trace); |
| @@ -1068,7 +1077,8 @@ extern const s8 rxrpc_ack_priority[]; | |||
| 1068 | /* | 1077 | /* |
| 1069 | * output.c | 1078 | * output.c |
| 1070 | */ | 1079 | */ |
| 1071 | int rxrpc_send_call_packet(struct rxrpc_call *, u8); | 1080 | int rxrpc_send_ack_packet(struct rxrpc_call *, bool); |
| 1081 | int rxrpc_send_abort_packet(struct rxrpc_call *); | ||
| 1072 | int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool); | 1082 | int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool); |
| 1073 | void rxrpc_reject_packets(struct rxrpc_local *); | 1083 | void rxrpc_reject_packets(struct rxrpc_local *); |
| 1074 | 1084 | ||
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 3cac231d8405..832d854c2d5c 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c | |||
| @@ -337,7 +337,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, | |||
| 337 | 337 | ||
| 338 | /* Get the socket providing the service */ | 338 | /* Get the socket providing the service */ |
| 339 | rx = rcu_dereference(local->service); | 339 | rx = rcu_dereference(local->service); |
| 340 | if (service_id == rx->srx.srx_service) | 340 | if (rx && service_id == rx->srx.srx_service) |
| 341 | goto found_service; | 341 | goto found_service; |
| 342 | 342 | ||
| 343 | trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 343 | trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
| @@ -565,7 +565,7 @@ out_discard: | |||
| 565 | write_unlock_bh(&call->state_lock); | 565 | write_unlock_bh(&call->state_lock); |
| 566 | write_unlock(&rx->call_lock); | 566 | write_unlock(&rx->call_lock); |
| 567 | if (abort) { | 567 | if (abort) { |
| 568 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT); | 568 | rxrpc_send_abort_packet(call); |
| 569 | rxrpc_release_call(rx, call); | 569 | rxrpc_release_call(rx, call); |
| 570 | rxrpc_put_call(call, rxrpc_call_put); | 570 | rxrpc_put_call(call, rxrpc_call_put); |
| 571 | } | 571 | } |
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 4f00476630b9..97a17ada4431 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c | |||
| @@ -24,19 +24,20 @@ | |||
| 24 | /* | 24 | /* |
| 25 | * Set the timer | 25 | * Set the timer |
| 26 | */ | 26 | */ |
| 27 | void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, | 27 | void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, |
| 28 | ktime_t now) | 28 | ktime_t now) |
| 29 | { | 29 | { |
| 30 | unsigned long t_j, now_j = jiffies; | 30 | unsigned long t_j, now_j = jiffies; |
| 31 | ktime_t t; | 31 | ktime_t t; |
| 32 | bool queue = false; | 32 | bool queue = false; |
| 33 | 33 | ||
| 34 | read_lock_bh(&call->state_lock); | ||
| 35 | |||
| 36 | if (call->state < RXRPC_CALL_COMPLETE) { | 34 | if (call->state < RXRPC_CALL_COMPLETE) { |
| 37 | t = call->expire_at; | 35 | t = call->expire_at; |
| 38 | if (!ktime_after(t, now)) | 36 | if (!ktime_after(t, now)) { |
| 37 | trace_rxrpc_timer(call, why, now, now_j); | ||
| 38 | queue = true; | ||
| 39 | goto out; | 39 | goto out; |
| 40 | } | ||
| 40 | 41 | ||
| 41 | if (!ktime_after(call->resend_at, now)) { | 42 | if (!ktime_after(call->resend_at, now)) { |
| 42 | call->resend_at = call->expire_at; | 43 | call->resend_at = call->expire_at; |
| @@ -54,6 +55,14 @@ void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, | |||
| 54 | t = call->ack_at; | 55 | t = call->ack_at; |
| 55 | } | 56 | } |
| 56 | 57 | ||
| 58 | if (!ktime_after(call->ping_at, now)) { | ||
| 59 | call->ping_at = call->expire_at; | ||
| 60 | if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) | ||
| 61 | queue = true; | ||
| 62 | } else if (ktime_before(call->ping_at, t)) { | ||
| 63 | t = call->ping_at; | ||
| 64 | } | ||
| 65 | |||
| 57 | t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now))); | 66 | t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now))); |
| 58 | t_j += jiffies; | 67 | t_j += jiffies; |
| 59 | 68 | ||
| @@ -68,16 +77,46 @@ void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, | |||
| 68 | mod_timer(&call->timer, t_j); | 77 | mod_timer(&call->timer, t_j); |
| 69 | trace_rxrpc_timer(call, why, now, now_j); | 78 | trace_rxrpc_timer(call, why, now, now_j); |
| 70 | } | 79 | } |
| 71 | |||
| 72 | if (queue) | ||
| 73 | rxrpc_queue_call(call); | ||
| 74 | } | 80 | } |
| 75 | 81 | ||
| 76 | out: | 82 | out: |
| 83 | if (queue) | ||
| 84 | rxrpc_queue_call(call); | ||
| 85 | } | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Set the timer | ||
| 89 | */ | ||
| 90 | void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, | ||
| 91 | ktime_t now) | ||
| 92 | { | ||
| 93 | read_lock_bh(&call->state_lock); | ||
| 94 | __rxrpc_set_timer(call, why, now); | ||
| 77 | read_unlock_bh(&call->state_lock); | 95 | read_unlock_bh(&call->state_lock); |
| 78 | } | 96 | } |
| 79 | 97 | ||
| 80 | /* | 98 | /* |
| 99 | * Propose a PING ACK be sent. | ||
| 100 | */ | ||
| 101 | static void rxrpc_propose_ping(struct rxrpc_call *call, | ||
| 102 | bool immediate, bool background) | ||
| 103 | { | ||
| 104 | if (immediate) { | ||
| 105 | if (background && | ||
| 106 | !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) | ||
| 107 | rxrpc_queue_call(call); | ||
| 108 | } else { | ||
| 109 | ktime_t now = ktime_get_real(); | ||
| 110 | ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay); | ||
| 111 | |||
| 112 | if (ktime_before(ping_at, call->ping_at)) { | ||
| 113 | call->ping_at = ping_at; | ||
| 114 | rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now); | ||
| 115 | } | ||
| 116 | } | ||
| 117 | } | ||
| 118 | |||
| 119 | /* | ||
| 81 | * propose an ACK be sent | 120 | * propose an ACK be sent |
| 82 | */ | 121 | */ |
| 83 | static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, | 122 | static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, |
| @@ -90,6 +129,14 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, | |||
| 90 | ktime_t now, ack_at; | 129 | ktime_t now, ack_at; |
| 91 | s8 prior = rxrpc_ack_priority[ack_reason]; | 130 | s8 prior = rxrpc_ack_priority[ack_reason]; |
| 92 | 131 | ||
| 132 | /* Pings are handled specially because we don't want to accidentally | ||
| 133 | * lose a ping response by subsuming it into a ping. | ||
| 134 | */ | ||
| 135 | if (ack_reason == RXRPC_ACK_PING) { | ||
| 136 | rxrpc_propose_ping(call, immediate, background); | ||
| 137 | goto trace; | ||
| 138 | } | ||
| 139 | |||
| 93 | /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial | 140 | /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial |
| 94 | * numbers, but we don't alter the timeout. | 141 | * numbers, but we don't alter the timeout. |
| 95 | */ | 142 | */ |
| @@ -125,7 +172,6 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, | |||
| 125 | expiry = rxrpc_soft_ack_delay; | 172 | expiry = rxrpc_soft_ack_delay; |
| 126 | break; | 173 | break; |
| 127 | 174 | ||
| 128 | case RXRPC_ACK_PING: | ||
| 129 | case RXRPC_ACK_IDLE: | 175 | case RXRPC_ACK_IDLE: |
| 130 | if (rxrpc_idle_ack_delay < expiry) | 176 | if (rxrpc_idle_ack_delay < expiry) |
| 131 | expiry = rxrpc_idle_ack_delay; | 177 | expiry = rxrpc_idle_ack_delay; |
| @@ -253,7 +299,7 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) | |||
| 253 | goto out; | 299 | goto out; |
| 254 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, | 300 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, |
| 255 | rxrpc_propose_ack_ping_for_lost_ack); | 301 | rxrpc_propose_ack_ping_for_lost_ack); |
| 256 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK); | 302 | rxrpc_send_ack_packet(call, true); |
| 257 | goto out; | 303 | goto out; |
| 258 | } | 304 | } |
| 259 | 305 | ||
| @@ -328,12 +374,13 @@ void rxrpc_process_call(struct work_struct *work) | |||
| 328 | 374 | ||
| 329 | recheck_state: | 375 | recheck_state: |
| 330 | if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) { | 376 | if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) { |
| 331 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT); | 377 | rxrpc_send_abort_packet(call); |
| 332 | goto recheck_state; | 378 | goto recheck_state; |
| 333 | } | 379 | } |
| 334 | 380 | ||
| 335 | if (call->state == RXRPC_CALL_COMPLETE) { | 381 | if (call->state == RXRPC_CALL_COMPLETE) { |
| 336 | del_timer_sync(&call->timer); | 382 | del_timer_sync(&call->timer); |
| 383 | rxrpc_notify_socket(call); | ||
| 337 | goto out_put; | 384 | goto out_put; |
| 338 | } | 385 | } |
| 339 | 386 | ||
| @@ -345,13 +392,17 @@ recheck_state: | |||
| 345 | } | 392 | } |
| 346 | 393 | ||
| 347 | if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) { | 394 | if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) { |
| 348 | call->ack_at = call->expire_at; | ||
| 349 | if (call->ackr_reason) { | 395 | if (call->ackr_reason) { |
| 350 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK); | 396 | rxrpc_send_ack_packet(call, false); |
| 351 | goto recheck_state; | 397 | goto recheck_state; |
| 352 | } | 398 | } |
| 353 | } | 399 | } |
| 354 | 400 | ||
| 401 | if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) { | ||
| 402 | rxrpc_send_ack_packet(call, true); | ||
| 403 | goto recheck_state; | ||
| 404 | } | ||
| 405 | |||
| 355 | if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) { | 406 | if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) { |
| 356 | rxrpc_resend(call, now); | 407 | rxrpc_resend(call, now); |
| 357 | goto recheck_state; | 408 | goto recheck_state; |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 364b42dc3dce..4353a29f3b57 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
| @@ -205,6 +205,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call) | |||
| 205 | expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime); | 205 | expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime); |
| 206 | call->expire_at = expire_at; | 206 | call->expire_at = expire_at; |
| 207 | call->ack_at = expire_at; | 207 | call->ack_at = expire_at; |
| 208 | call->ping_at = expire_at; | ||
| 208 | call->resend_at = expire_at; | 209 | call->resend_at = expire_at; |
| 209 | call->timer.expires = jiffies + LONG_MAX / 2; | 210 | call->timer.expires = jiffies + LONG_MAX / 2; |
| 210 | rxrpc_set_timer(call, rxrpc_timer_begin, now); | 211 | rxrpc_set_timer(call, rxrpc_timer_begin, now); |
| @@ -498,7 +499,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) | |||
| 498 | struct rxrpc_call, sock_link); | 499 | struct rxrpc_call, sock_link); |
| 499 | rxrpc_get_call(call, rxrpc_call_got); | 500 | rxrpc_get_call(call, rxrpc_call_got); |
| 500 | rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET); | 501 | rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET); |
| 501 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT); | 502 | rxrpc_send_abort_packet(call); |
| 502 | rxrpc_release_call(rx, call); | 503 | rxrpc_release_call(rx, call); |
| 503 | rxrpc_put_call(call, rxrpc_call_put); | 504 | rxrpc_put_call(call, rxrpc_call_put); |
| 504 | } | 505 | } |
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 3ad9f75031e3..44fb8d893c7d 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
| @@ -625,9 +625,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call, | |||
| 625 | rxrpc_serial_t ping_serial; | 625 | rxrpc_serial_t ping_serial; |
| 626 | ktime_t ping_time; | 626 | ktime_t ping_time; |
| 627 | 627 | ||
| 628 | ping_time = call->ackr_ping_time; | 628 | ping_time = call->ping_time; |
| 629 | smp_rmb(); | 629 | smp_rmb(); |
| 630 | ping_serial = call->ackr_ping; | 630 | ping_serial = call->ping_serial; |
| 631 | 631 | ||
| 632 | if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || | 632 | if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || |
| 633 | before(orig_serial, ping_serial)) | 633 | before(orig_serial, ping_serial)) |
| @@ -847,7 +847,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 847 | 847 | ||
| 848 | if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & | 848 | if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & |
| 849 | RXRPC_TX_ANNO_LAST && | 849 | RXRPC_TX_ANNO_LAST && |
| 850 | summary.nr_acks == call->tx_top - hard_ack) | 850 | summary.nr_acks == call->tx_top - hard_ack && |
| 851 | rxrpc_is_client_call(call)) | ||
| 851 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, | 852 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, |
| 852 | false, true, | 853 | false, true, |
| 853 | rxrpc_propose_ack_ping_for_lost_reply); | 854 | rxrpc_propose_ack_ping_for_lost_reply); |
| @@ -938,6 +939,33 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, | |||
| 938 | } | 939 | } |
| 939 | 940 | ||
| 940 | /* | 941 | /* |
| 942 | * Handle a new call on a channel implicitly completing the preceding call on | ||
| 943 | * that channel. | ||
| 944 | * | ||
| 945 | * TODO: If callNumber > call_id + 1, renegotiate security. | ||
| 946 | */ | ||
| 947 | static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, | ||
| 948 | struct rxrpc_call *call) | ||
| 949 | { | ||
| 950 | switch (call->state) { | ||
| 951 | case RXRPC_CALL_SERVER_AWAIT_ACK: | ||
| 952 | rxrpc_call_completed(call); | ||
| 953 | break; | ||
| 954 | case RXRPC_CALL_COMPLETE: | ||
| 955 | break; | ||
| 956 | default: | ||
| 957 | if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, ESHUTDOWN)) { | ||
| 958 | set_bit(RXRPC_CALL_EV_ABORT, &call->events); | ||
| 959 | rxrpc_queue_call(call); | ||
| 960 | } | ||
| 961 | break; | ||
| 962 | } | ||
| 963 | |||
| 964 | __rxrpc_disconnect_call(conn, call); | ||
| 965 | rxrpc_notify_socket(call); | ||
| 966 | } | ||
| 967 | |||
| 968 | /* | ||
| 941 | * post connection-level events to the connection | 969 | * post connection-level events to the connection |
| 942 | * - this includes challenges, responses, some aborts and call terminal packet | 970 | * - this includes challenges, responses, some aborts and call terminal packet |
| 943 | * retransmission. | 971 | * retransmission. |
| @@ -1145,6 +1173,16 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
| 1145 | } | 1173 | } |
| 1146 | 1174 | ||
| 1147 | call = rcu_dereference(chan->call); | 1175 | call = rcu_dereference(chan->call); |
| 1176 | |||
| 1177 | if (sp->hdr.callNumber > chan->call_id) { | ||
| 1178 | if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) { | ||
| 1179 | rcu_read_unlock(); | ||
| 1180 | goto reject_packet; | ||
| 1181 | } | ||
| 1182 | if (call) | ||
| 1183 | rxrpc_input_implicit_end_call(conn, call); | ||
| 1184 | call = NULL; | ||
| 1185 | } | ||
| 1148 | } else { | 1186 | } else { |
| 1149 | skew = 0; | 1187 | skew = 0; |
| 1150 | call = NULL; | 1188 | call = NULL; |
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c index 9d1c721bc4e8..6dee55fad2d3 100644 --- a/net/rxrpc/misc.c +++ b/net/rxrpc/misc.c | |||
| @@ -93,10 +93,9 @@ const s8 rxrpc_ack_priority[] = { | |||
| 93 | [RXRPC_ACK_EXCEEDS_WINDOW] = 6, | 93 | [RXRPC_ACK_EXCEEDS_WINDOW] = 6, |
| 94 | [RXRPC_ACK_NOSPACE] = 7, | 94 | [RXRPC_ACK_NOSPACE] = 7, |
| 95 | [RXRPC_ACK_PING_RESPONSE] = 8, | 95 | [RXRPC_ACK_PING_RESPONSE] = 8, |
| 96 | [RXRPC_ACK_PING] = 9, | ||
| 97 | }; | 96 | }; |
| 98 | 97 | ||
| 99 | const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = { | 98 | const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = { |
| 100 | "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", | 99 | "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", |
| 101 | "IDL", "-?-" | 100 | "IDL", "-?-" |
| 102 | }; | 101 | }; |
| @@ -196,7 +195,9 @@ const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8] = { | |||
| 196 | [rxrpc_timer_begin] = "Begin ", | 195 | [rxrpc_timer_begin] = "Begin ", |
| 197 | [rxrpc_timer_expired] = "*EXPR*", | 196 | [rxrpc_timer_expired] = "*EXPR*", |
| 198 | [rxrpc_timer_init_for_reply] = "IniRpl", | 197 | [rxrpc_timer_init_for_reply] = "IniRpl", |
| 198 | [rxrpc_timer_init_for_send_reply] = "SndRpl", | ||
| 199 | [rxrpc_timer_set_for_ack] = "SetAck", | 199 | [rxrpc_timer_set_for_ack] = "SetAck", |
| 200 | [rxrpc_timer_set_for_ping] = "SetPng", | ||
| 200 | [rxrpc_timer_set_for_send] = "SetTx ", | 201 | [rxrpc_timer_set_for_send] = "SetTx ", |
| 201 | [rxrpc_timer_set_for_resend] = "SetRTx", | 202 | [rxrpc_timer_set_for_resend] = "SetRTx", |
| 202 | }; | 203 | }; |
| @@ -207,6 +208,7 @@ const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8] = { | |||
| 207 | [rxrpc_propose_ack_ping_for_lost_ack] = "LostAck", | 208 | [rxrpc_propose_ack_ping_for_lost_ack] = "LostAck", |
| 208 | [rxrpc_propose_ack_ping_for_lost_reply] = "LostRpl", | 209 | [rxrpc_propose_ack_ping_for_lost_reply] = "LostRpl", |
| 209 | [rxrpc_propose_ack_ping_for_params] = "Params ", | 210 | [rxrpc_propose_ack_ping_for_params] = "Params ", |
| 211 | [rxrpc_propose_ack_processing_op] = "ProcOp ", | ||
| 210 | [rxrpc_propose_ack_respond_to_ack] = "Rsp2Ack", | 212 | [rxrpc_propose_ack_respond_to_ack] = "Rsp2Ack", |
| 211 | [rxrpc_propose_ack_respond_to_ping] = "Rsp2Png", | 213 | [rxrpc_propose_ack_respond_to_ping] = "Rsp2Png", |
| 212 | [rxrpc_propose_ack_retry_tx] = "RetryTx", | 214 | [rxrpc_propose_ack_retry_tx] = "RetryTx", |
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 0d47db886f6e..5dab1ff3a6c2 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c | |||
| @@ -19,26 +19,27 @@ | |||
| 19 | #include <net/af_rxrpc.h> | 19 | #include <net/af_rxrpc.h> |
| 20 | #include "ar-internal.h" | 20 | #include "ar-internal.h" |
| 21 | 21 | ||
| 22 | struct rxrpc_pkt_buffer { | 22 | struct rxrpc_ack_buffer { |
| 23 | struct rxrpc_wire_header whdr; | 23 | struct rxrpc_wire_header whdr; |
| 24 | union { | 24 | struct rxrpc_ackpacket ack; |
| 25 | struct { | 25 | u8 acks[255]; |
| 26 | struct rxrpc_ackpacket ack; | 26 | u8 pad[3]; |
| 27 | u8 acks[255]; | ||
| 28 | u8 pad[3]; | ||
| 29 | }; | ||
| 30 | __be32 abort_code; | ||
| 31 | }; | ||
| 32 | struct rxrpc_ackinfo ackinfo; | 27 | struct rxrpc_ackinfo ackinfo; |
| 33 | }; | 28 | }; |
| 34 | 29 | ||
| 30 | struct rxrpc_abort_buffer { | ||
| 31 | struct rxrpc_wire_header whdr; | ||
| 32 | __be32 abort_code; | ||
| 33 | }; | ||
| 34 | |||
| 35 | /* | 35 | /* |
| 36 | * Fill out an ACK packet. | 36 | * Fill out an ACK packet. |
| 37 | */ | 37 | */ |
| 38 | static size_t rxrpc_fill_out_ack(struct rxrpc_call *call, | 38 | static size_t rxrpc_fill_out_ack(struct rxrpc_call *call, |
| 39 | struct rxrpc_pkt_buffer *pkt, | 39 | struct rxrpc_ack_buffer *pkt, |
| 40 | rxrpc_seq_t *_hard_ack, | 40 | rxrpc_seq_t *_hard_ack, |
| 41 | rxrpc_seq_t *_top) | 41 | rxrpc_seq_t *_top, |
| 42 | u8 reason) | ||
| 42 | { | 43 | { |
| 43 | rxrpc_serial_t serial; | 44 | rxrpc_serial_t serial; |
| 44 | rxrpc_seq_t hard_ack, top, seq; | 45 | rxrpc_seq_t hard_ack, top, seq; |
| @@ -58,10 +59,10 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_call *call, | |||
| 58 | pkt->ack.firstPacket = htonl(hard_ack + 1); | 59 | pkt->ack.firstPacket = htonl(hard_ack + 1); |
| 59 | pkt->ack.previousPacket = htonl(call->ackr_prev_seq); | 60 | pkt->ack.previousPacket = htonl(call->ackr_prev_seq); |
| 60 | pkt->ack.serial = htonl(serial); | 61 | pkt->ack.serial = htonl(serial); |
| 61 | pkt->ack.reason = call->ackr_reason; | 62 | pkt->ack.reason = reason; |
| 62 | pkt->ack.nAcks = top - hard_ack; | 63 | pkt->ack.nAcks = top - hard_ack; |
| 63 | 64 | ||
| 64 | if (pkt->ack.reason == RXRPC_ACK_PING) | 65 | if (reason == RXRPC_ACK_PING) |
| 65 | pkt->whdr.flags |= RXRPC_REQUEST_ACK; | 66 | pkt->whdr.flags |= RXRPC_REQUEST_ACK; |
| 66 | 67 | ||
| 67 | if (after(top, hard_ack)) { | 68 | if (after(top, hard_ack)) { |
| @@ -91,22 +92,19 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_call *call, | |||
| 91 | } | 92 | } |
| 92 | 93 | ||
| 93 | /* | 94 | /* |
| 94 | * Send an ACK or ABORT call packet. | 95 | * Send an ACK call packet. |
| 95 | */ | 96 | */ |
| 96 | int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type) | 97 | int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) |
| 97 | { | 98 | { |
| 98 | struct rxrpc_connection *conn = NULL; | 99 | struct rxrpc_connection *conn = NULL; |
| 99 | struct rxrpc_pkt_buffer *pkt; | 100 | struct rxrpc_ack_buffer *pkt; |
| 100 | struct msghdr msg; | 101 | struct msghdr msg; |
| 101 | struct kvec iov[2]; | 102 | struct kvec iov[2]; |
| 102 | rxrpc_serial_t serial; | 103 | rxrpc_serial_t serial; |
| 103 | rxrpc_seq_t hard_ack, top; | 104 | rxrpc_seq_t hard_ack, top; |
| 104 | size_t len, n; | 105 | size_t len, n; |
| 105 | bool ping = false; | 106 | int ret; |
| 106 | int ioc, ret; | 107 | u8 reason; |
| 107 | u32 abort_code; | ||
| 108 | |||
| 109 | _enter("%u,%s", call->debug_id, rxrpc_pkts[type]); | ||
| 110 | 108 | ||
| 111 | spin_lock_bh(&call->lock); | 109 | spin_lock_bh(&call->lock); |
| 112 | if (call->conn) | 110 | if (call->conn) |
| @@ -131,68 +129,44 @@ int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type) | |||
| 131 | pkt->whdr.cid = htonl(call->cid); | 129 | pkt->whdr.cid = htonl(call->cid); |
| 132 | pkt->whdr.callNumber = htonl(call->call_id); | 130 | pkt->whdr.callNumber = htonl(call->call_id); |
| 133 | pkt->whdr.seq = 0; | 131 | pkt->whdr.seq = 0; |
| 134 | pkt->whdr.type = type; | 132 | pkt->whdr.type = RXRPC_PACKET_TYPE_ACK; |
| 135 | pkt->whdr.flags = conn->out_clientflag; | 133 | pkt->whdr.flags = RXRPC_SLOW_START_OK | conn->out_clientflag; |
| 136 | pkt->whdr.userStatus = 0; | 134 | pkt->whdr.userStatus = 0; |
| 137 | pkt->whdr.securityIndex = call->security_ix; | 135 | pkt->whdr.securityIndex = call->security_ix; |
| 138 | pkt->whdr._rsvd = 0; | 136 | pkt->whdr._rsvd = 0; |
| 139 | pkt->whdr.serviceId = htons(call->service_id); | 137 | pkt->whdr.serviceId = htons(call->service_id); |
| 140 | 138 | ||
| 141 | iov[0].iov_base = pkt; | 139 | spin_lock_bh(&call->lock); |
| 142 | iov[0].iov_len = sizeof(pkt->whdr); | 140 | if (ping) { |
| 143 | len = sizeof(pkt->whdr); | 141 | reason = RXRPC_ACK_PING; |
| 144 | 142 | } else { | |
| 145 | switch (type) { | 143 | reason = call->ackr_reason; |
| 146 | case RXRPC_PACKET_TYPE_ACK: | ||
| 147 | spin_lock_bh(&call->lock); | ||
| 148 | if (!call->ackr_reason) { | 144 | if (!call->ackr_reason) { |
| 149 | spin_unlock_bh(&call->lock); | 145 | spin_unlock_bh(&call->lock); |
| 150 | ret = 0; | 146 | ret = 0; |
| 151 | goto out; | 147 | goto out; |
| 152 | } | 148 | } |
| 153 | ping = (call->ackr_reason == RXRPC_ACK_PING); | ||
| 154 | n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top); | ||
| 155 | call->ackr_reason = 0; | 149 | call->ackr_reason = 0; |
| 150 | } | ||
| 151 | n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top, reason); | ||
| 156 | 152 | ||
| 157 | spin_unlock_bh(&call->lock); | 153 | spin_unlock_bh(&call->lock); |
| 158 | |||
| 159 | |||
| 160 | pkt->whdr.flags |= RXRPC_SLOW_START_OK; | ||
| 161 | |||
| 162 | iov[0].iov_len += sizeof(pkt->ack) + n; | ||
| 163 | iov[1].iov_base = &pkt->ackinfo; | ||
| 164 | iov[1].iov_len = sizeof(pkt->ackinfo); | ||
| 165 | len += sizeof(pkt->ack) + n + sizeof(pkt->ackinfo); | ||
| 166 | ioc = 2; | ||
| 167 | break; | ||
| 168 | |||
| 169 | case RXRPC_PACKET_TYPE_ABORT: | ||
| 170 | abort_code = call->abort_code; | ||
| 171 | pkt->abort_code = htonl(abort_code); | ||
| 172 | iov[0].iov_len += sizeof(pkt->abort_code); | ||
| 173 | len += sizeof(pkt->abort_code); | ||
| 174 | ioc = 1; | ||
| 175 | break; | ||
| 176 | 154 | ||
| 177 | default: | 155 | iov[0].iov_base = pkt; |
| 178 | BUG(); | 156 | iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n; |
| 179 | ret = -ENOANO; | 157 | iov[1].iov_base = &pkt->ackinfo; |
| 180 | goto out; | 158 | iov[1].iov_len = sizeof(pkt->ackinfo); |
| 181 | } | 159 | len = iov[0].iov_len + iov[1].iov_len; |
| 182 | 160 | ||
| 183 | serial = atomic_inc_return(&conn->serial); | 161 | serial = atomic_inc_return(&conn->serial); |
| 184 | pkt->whdr.serial = htonl(serial); | 162 | pkt->whdr.serial = htonl(serial); |
| 185 | switch (type) { | 163 | trace_rxrpc_tx_ack(call, serial, |
| 186 | case RXRPC_PACKET_TYPE_ACK: | 164 | ntohl(pkt->ack.firstPacket), |
| 187 | trace_rxrpc_tx_ack(call, serial, | 165 | ntohl(pkt->ack.serial), |
| 188 | ntohl(pkt->ack.firstPacket), | 166 | pkt->ack.reason, pkt->ack.nAcks); |
| 189 | ntohl(pkt->ack.serial), | ||
| 190 | pkt->ack.reason, pkt->ack.nAcks); | ||
| 191 | break; | ||
| 192 | } | ||
| 193 | 167 | ||
| 194 | if (ping) { | 168 | if (ping) { |
| 195 | call->ackr_ping = serial; | 169 | call->ping_serial = serial; |
| 196 | smp_wmb(); | 170 | smp_wmb(); |
| 197 | /* We need to stick a time in before we send the packet in case | 171 | /* We need to stick a time in before we send the packet in case |
| 198 | * the reply gets back before kernel_sendmsg() completes - but | 172 | * the reply gets back before kernel_sendmsg() completes - but |
| @@ -201,19 +175,19 @@ int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type) | |||
| 201 | * the packet transmission is more likely to happen towards the | 175 | * the packet transmission is more likely to happen towards the |
| 202 | * end of the kernel_sendmsg() call. | 176 | * end of the kernel_sendmsg() call. |
| 203 | */ | 177 | */ |
| 204 | call->ackr_ping_time = ktime_get_real(); | 178 | call->ping_time = ktime_get_real(); |
| 205 | set_bit(RXRPC_CALL_PINGING, &call->flags); | 179 | set_bit(RXRPC_CALL_PINGING, &call->flags); |
| 206 | trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial); | 180 | trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial); |
| 207 | } | 181 | } |
| 208 | ret = kernel_sendmsg(conn->params.local->socket, | 182 | |
| 209 | &msg, iov, ioc, len); | 183 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); |
| 210 | if (ping) | 184 | if (ping) |
| 211 | call->ackr_ping_time = ktime_get_real(); | 185 | call->ping_time = ktime_get_real(); |
| 212 | 186 | ||
| 213 | if (type == RXRPC_PACKET_TYPE_ACK && | 187 | if (call->state < RXRPC_CALL_COMPLETE) { |
| 214 | call->state < RXRPC_CALL_COMPLETE) { | ||
| 215 | if (ret < 0) { | 188 | if (ret < 0) { |
| 216 | clear_bit(RXRPC_CALL_PINGING, &call->flags); | 189 | if (ping) |
| 190 | clear_bit(RXRPC_CALL_PINGING, &call->flags); | ||
| 217 | rxrpc_propose_ACK(call, pkt->ack.reason, | 191 | rxrpc_propose_ACK(call, pkt->ack.reason, |
| 218 | ntohs(pkt->ack.maxSkew), | 192 | ntohs(pkt->ack.maxSkew), |
| 219 | ntohl(pkt->ack.serial), | 193 | ntohl(pkt->ack.serial), |
| @@ -236,6 +210,56 @@ out: | |||
| 236 | } | 210 | } |
| 237 | 211 | ||
| 238 | /* | 212 | /* |
| 213 | * Send an ABORT call packet. | ||
| 214 | */ | ||
| 215 | int rxrpc_send_abort_packet(struct rxrpc_call *call) | ||
| 216 | { | ||
| 217 | struct rxrpc_connection *conn = NULL; | ||
| 218 | struct rxrpc_abort_buffer pkt; | ||
| 219 | struct msghdr msg; | ||
| 220 | struct kvec iov[1]; | ||
| 221 | rxrpc_serial_t serial; | ||
| 222 | int ret; | ||
| 223 | |||
| 224 | spin_lock_bh(&call->lock); | ||
| 225 | if (call->conn) | ||
| 226 | conn = rxrpc_get_connection_maybe(call->conn); | ||
| 227 | spin_unlock_bh(&call->lock); | ||
| 228 | if (!conn) | ||
| 229 | return -ECONNRESET; | ||
| 230 | |||
| 231 | msg.msg_name = &call->peer->srx.transport; | ||
| 232 | msg.msg_namelen = call->peer->srx.transport_len; | ||
| 233 | msg.msg_control = NULL; | ||
| 234 | msg.msg_controllen = 0; | ||
| 235 | msg.msg_flags = 0; | ||
| 236 | |||
| 237 | pkt.whdr.epoch = htonl(conn->proto.epoch); | ||
| 238 | pkt.whdr.cid = htonl(call->cid); | ||
| 239 | pkt.whdr.callNumber = htonl(call->call_id); | ||
| 240 | pkt.whdr.seq = 0; | ||
| 241 | pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT; | ||
| 242 | pkt.whdr.flags = conn->out_clientflag; | ||
| 243 | pkt.whdr.userStatus = 0; | ||
| 244 | pkt.whdr.securityIndex = call->security_ix; | ||
| 245 | pkt.whdr._rsvd = 0; | ||
| 246 | pkt.whdr.serviceId = htons(call->service_id); | ||
| 247 | pkt.abort_code = htonl(call->abort_code); | ||
| 248 | |||
| 249 | iov[0].iov_base = &pkt; | ||
| 250 | iov[0].iov_len = sizeof(pkt); | ||
| 251 | |||
| 252 | serial = atomic_inc_return(&conn->serial); | ||
| 253 | pkt.whdr.serial = htonl(serial); | ||
| 254 | |||
| 255 | ret = kernel_sendmsg(conn->params.local->socket, | ||
| 256 | &msg, iov, 1, sizeof(pkt)); | ||
| 257 | |||
| 258 | rxrpc_put_connection(conn); | ||
| 259 | return ret; | ||
| 260 | } | ||
| 261 | |||
| 262 | /* | ||
| 239 | * send a packet through the transport endpoint | 263 | * send a packet through the transport endpoint |
| 240 | */ | 264 | */ |
| 241 | int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, | 265 | int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, |
| @@ -283,11 +307,12 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 283 | /* If our RTT cache needs working on, request an ACK. Also request | 307 | /* If our RTT cache needs working on, request an ACK. Also request |
| 284 | * ACKs if a DATA packet appears to have been lost. | 308 | * ACKs if a DATA packet appears to have been lost. |
| 285 | */ | 309 | */ |
| 286 | if (retrans || | 310 | if (!(sp->hdr.flags & RXRPC_LAST_PACKET) && |
| 287 | call->cong_mode == RXRPC_CALL_SLOW_START || | 311 | (retrans || |
| 288 | (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || | 312 | call->cong_mode == RXRPC_CALL_SLOW_START || |
| 289 | ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), | 313 | (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || |
| 290 | ktime_get_real())) | 314 | ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), |
| 315 | ktime_get_real()))) | ||
| 291 | whdr.flags |= RXRPC_REQUEST_ACK; | 316 | whdr.flags |= RXRPC_REQUEST_ACK; |
| 292 | 317 | ||
| 293 | if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { | 318 | if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { |
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index f05ea0a88076..c29362d50a92 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c | |||
| @@ -143,7 +143,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) | |||
| 143 | if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { | 143 | if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { |
| 144 | rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, | 144 | rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, |
| 145 | rxrpc_propose_ack_terminal_ack); | 145 | rxrpc_propose_ack_terminal_ack); |
| 146 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK); | 146 | rxrpc_send_ack_packet(call, false); |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | write_lock_bh(&call->state_lock); | 149 | write_lock_bh(&call->state_lock); |
| @@ -151,17 +151,21 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) | |||
| 151 | switch (call->state) { | 151 | switch (call->state) { |
| 152 | case RXRPC_CALL_CLIENT_RECV_REPLY: | 152 | case RXRPC_CALL_CLIENT_RECV_REPLY: |
| 153 | __rxrpc_call_completed(call); | 153 | __rxrpc_call_completed(call); |
| 154 | write_unlock_bh(&call->state_lock); | ||
| 154 | break; | 155 | break; |
| 155 | 156 | ||
| 156 | case RXRPC_CALL_SERVER_RECV_REQUEST: | 157 | case RXRPC_CALL_SERVER_RECV_REQUEST: |
| 157 | call->tx_phase = true; | 158 | call->tx_phase = true; |
| 158 | call->state = RXRPC_CALL_SERVER_ACK_REQUEST; | 159 | call->state = RXRPC_CALL_SERVER_ACK_REQUEST; |
| 160 | call->ack_at = call->expire_at; | ||
| 161 | write_unlock_bh(&call->state_lock); | ||
| 162 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, | ||
| 163 | rxrpc_propose_ack_processing_op); | ||
| 159 | break; | 164 | break; |
| 160 | default: | 165 | default: |
| 166 | write_unlock_bh(&call->state_lock); | ||
| 161 | break; | 167 | break; |
| 162 | } | 168 | } |
| 163 | |||
| 164 | write_unlock_bh(&call->state_lock); | ||
| 165 | } | 169 | } |
| 166 | 170 | ||
| 167 | /* | 171 | /* |
| @@ -212,7 +216,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) | |||
| 212 | true, false, | 216 | true, false, |
| 213 | rxrpc_propose_ack_rotate_rx); | 217 | rxrpc_propose_ack_rotate_rx); |
| 214 | if (call->ackr_reason) | 218 | if (call->ackr_reason) |
| 215 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK); | 219 | rxrpc_send_ack_packet(call, false); |
| 216 | } | 220 | } |
| 217 | } | 221 | } |
| 218 | 222 | ||
| @@ -652,7 +656,7 @@ excess_data: | |||
| 652 | goto out; | 656 | goto out; |
| 653 | call_complete: | 657 | call_complete: |
| 654 | *_abort = call->abort_code; | 658 | *_abort = call->abort_code; |
| 655 | ret = call->error; | 659 | ret = -call->error; |
| 656 | if (call->completion == RXRPC_CALL_SUCCEEDED) { | 660 | if (call->completion == RXRPC_CALL_SUCCEEDED) { |
| 657 | ret = 1; | 661 | ret = 1; |
| 658 | if (size > 0) | 662 | if (size > 0) |
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 627abed5f999..4374e7b9c7bf 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
| @@ -381,7 +381,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 381 | return 0; | 381 | return 0; |
| 382 | 382 | ||
| 383 | protocol_error: | 383 | protocol_error: |
| 384 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT); | 384 | rxrpc_send_abort_packet(call); |
| 385 | _leave(" = -EPROTO"); | 385 | _leave(" = -EPROTO"); |
| 386 | return -EPROTO; | 386 | return -EPROTO; |
| 387 | 387 | ||
| @@ -471,7 +471,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 471 | return 0; | 471 | return 0; |
| 472 | 472 | ||
| 473 | protocol_error: | 473 | protocol_error: |
| 474 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT); | 474 | rxrpc_send_abort_packet(call); |
| 475 | _leave(" = -EPROTO"); | 475 | _leave(" = -EPROTO"); |
| 476 | return -EPROTO; | 476 | return -EPROTO; |
| 477 | 477 | ||
| @@ -523,7 +523,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 523 | 523 | ||
| 524 | if (cksum != expected_cksum) { | 524 | if (cksum != expected_cksum) { |
| 525 | rxrpc_abort_call("VCK", call, seq, RXKADSEALEDINCON, EPROTO); | 525 | rxrpc_abort_call("VCK", call, seq, RXKADSEALEDINCON, EPROTO); |
| 526 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT); | 526 | rxrpc_send_abort_packet(call); |
| 527 | _leave(" = -EPROTO [csum failed]"); | 527 | _leave(" = -EPROTO [csum failed]"); |
| 528 | return -EPROTO; | 528 | return -EPROTO; |
| 529 | } | 529 | } |
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 3322543d460a..b214a4d4a641 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c | |||
| @@ -130,6 +130,11 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 130 | break; | 130 | break; |
| 131 | case RXRPC_CALL_SERVER_ACK_REQUEST: | 131 | case RXRPC_CALL_SERVER_ACK_REQUEST: |
| 132 | call->state = RXRPC_CALL_SERVER_SEND_REPLY; | 132 | call->state = RXRPC_CALL_SERVER_SEND_REPLY; |
| 133 | call->ack_at = call->expire_at; | ||
| 134 | if (call->ackr_reason == RXRPC_ACK_DELAY) | ||
| 135 | call->ackr_reason = 0; | ||
| 136 | __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply, | ||
| 137 | ktime_get_real()); | ||
| 133 | if (!last) | 138 | if (!last) |
| 134 | break; | 139 | break; |
| 135 | case RXRPC_CALL_SERVER_SEND_REPLY: | 140 | case RXRPC_CALL_SERVER_SEND_REPLY: |
| @@ -197,7 +202,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
| 197 | do { | 202 | do { |
| 198 | /* Check to see if there's a ping ACK to reply to. */ | 203 | /* Check to see if there's a ping ACK to reply to. */ |
| 199 | if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) | 204 | if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) |
| 200 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK); | 205 | rxrpc_send_ack_packet(call, false); |
| 201 | 206 | ||
| 202 | if (!skb) { | 207 | if (!skb) { |
| 203 | size_t size, chunk, max, space; | 208 | size_t size, chunk, max, space; |
| @@ -514,8 +519,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) | |||
| 514 | } else if (cmd == RXRPC_CMD_SEND_ABORT) { | 519 | } else if (cmd == RXRPC_CMD_SEND_ABORT) { |
| 515 | ret = 0; | 520 | ret = 0; |
| 516 | if (rxrpc_abort_call("CMD", call, 0, abort_code, ECONNABORTED)) | 521 | if (rxrpc_abort_call("CMD", call, 0, abort_code, ECONNABORTED)) |
| 517 | ret = rxrpc_send_call_packet(call, | 522 | ret = rxrpc_send_abort_packet(call); |
| 518 | RXRPC_PACKET_TYPE_ABORT); | ||
| 519 | } else if (cmd != RXRPC_CMD_SEND_DATA) { | 523 | } else if (cmd != RXRPC_CMD_SEND_DATA) { |
| 520 | ret = -EINVAL; | 524 | ret = -EINVAL; |
| 521 | } else if (rxrpc_is_client_call(call) && | 525 | } else if (rxrpc_is_client_call(call) && |
| @@ -597,7 +601,7 @@ void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, | |||
| 597 | lock_sock(sock->sk); | 601 | lock_sock(sock->sk); |
| 598 | 602 | ||
| 599 | if (rxrpc_abort_call(why, call, 0, abort_code, error)) | 603 | if (rxrpc_abort_call(why, call, 0, abort_code, error)) |
| 600 | rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT); | 604 | rxrpc_send_abort_packet(call); |
| 601 | 605 | ||
| 602 | release_sock(sock->sk); | 606 | release_sock(sock->sk); |
| 603 | _leave(""); | 607 | _leave(""); |
