diff options
107 files changed, 1301 insertions, 845 deletions
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt index 2c6be0377f55..d2ea4605d078 100644 --- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt +++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt | |||
@@ -86,6 +86,7 @@ General Properties: | |||
86 | 86 | ||
87 | Clock Properties: | 87 | Clock Properties: |
88 | 88 | ||
89 | - fsl,cksel Timer reference clock source. | ||
89 | - fsl,tclk-period Timer reference clock period in nanoseconds. | 90 | - fsl,tclk-period Timer reference clock period in nanoseconds. |
90 | - fsl,tmr-prsc Prescaler, divides the output clock. | 91 | - fsl,tmr-prsc Prescaler, divides the output clock. |
91 | - fsl,tmr-add Frequency compensation value. | 92 | - fsl,tmr-add Frequency compensation value. |
@@ -97,7 +98,7 @@ Clock Properties: | |||
97 | clock. You must choose these carefully for the clock to work right. | 98 | clock. You must choose these carefully for the clock to work right. |
98 | Here is how to figure good values: | 99 | Here is how to figure good values: |
99 | 100 | ||
100 | TimerOsc = system clock MHz | 101 | TimerOsc = selected reference clock MHz |
101 | tclk_period = desired clock period nanoseconds | 102 | tclk_period = desired clock period nanoseconds |
102 | NominalFreq = 1000 / tclk_period MHz | 103 | NominalFreq = 1000 / tclk_period MHz |
103 | FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0) | 104 | FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0) |
@@ -114,6 +115,20 @@ Clock Properties: | |||
114 | Pulse Per Second (PPS) signal, since this will be offered to the PPS | 115 | Pulse Per Second (PPS) signal, since this will be offered to the PPS |
115 | subsystem to synchronize the Linux clock. | 116 | subsystem to synchronize the Linux clock. |
116 | 117 | ||
118 | Reference clock source is determined by the value, which is holded | ||
119 | in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the | ||
120 | value, which will be directly written in those bits, that is why, | ||
121 | according to reference manual, the next clock sources can be used: | ||
122 | |||
123 | <0> - external high precision timer reference clock (TSEC_TMR_CLK | ||
124 | input is used for this purpose); | ||
125 | <1> - eTSEC system clock; | ||
126 | <2> - eTSEC1 transmit clock; | ||
127 | <3> - RTC clock input. | ||
128 | |||
129 | When this attribute is not used, eTSEC system clock will serve as | ||
130 | IEEE 1588 timer reference clock. | ||
131 | |||
117 | Example: | 132 | Example: |
118 | 133 | ||
119 | ptp_clock@24E00 { | 134 | ptp_clock@24E00 { |
@@ -121,6 +136,7 @@ Example: | |||
121 | reg = <0x24E00 0xB0>; | 136 | reg = <0x24E00 0xB0>; |
122 | interrupts = <12 0x8 13 0x8>; | 137 | interrupts = <12 0x8 13 0x8>; |
123 | interrupt-parent = < &ipic >; | 138 | interrupt-parent = < &ipic >; |
139 | fsl,cksel = <1>; | ||
124 | fsl,tclk-period = <10>; | 140 | fsl,tclk-period = <10>; |
125 | fsl,tmr-prsc = <100>; | 141 | fsl,tmr-prsc = <100>; |
126 | fsl,tmr-add = <0x999999A4>; | 142 | fsl,tmr-add = <0x999999A4>; |
diff --git a/MAINTAINERS b/MAINTAINERS index d2d2f457dcd0..744a23954a34 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9378,6 +9378,7 @@ F: arch/arm64/include/asm/xen/ | |||
9378 | 9378 | ||
9379 | XEN NETWORK BACKEND DRIVER | 9379 | XEN NETWORK BACKEND DRIVER |
9380 | M: Ian Campbell <ian.campbell@citrix.com> | 9380 | M: Ian Campbell <ian.campbell@citrix.com> |
9381 | M: Wei Liu <wei.liu2@citrix.com> | ||
9381 | L: xen-devel@lists.xenproject.org (moderated for non-subscribers) | 9382 | L: xen-devel@lists.xenproject.org (moderated for non-subscribers) |
9382 | L: netdev@vger.kernel.org | 9383 | L: netdev@vger.kernel.org |
9383 | S: Supported | 9384 | S: Supported |
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index c9fd6943ce45..50329d1057ed 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c | |||
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) | |||
210 | } | 210 | } |
211 | } | 211 | } |
212 | 212 | ||
213 | static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up) | ||
214 | { | ||
215 | u16 data; | ||
216 | |||
217 | if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) { | ||
218 | data = up ? 0x74 : 0x7C; | ||
219 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
220 | BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64); | ||
221 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
222 | BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); | ||
223 | } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) { | ||
224 | data = up ? 0x75 : 0x7D; | ||
225 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
226 | BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65); | ||
227 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
228 | BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | /************************************************** | 213 | /************************************************** |
233 | * Init. | 214 | * Init. |
234 | **************************************************/ | 215 | **************************************************/ |
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc) | |||
255 | bcma_core_pci_clientmode_init(pc); | 236 | bcma_core_pci_clientmode_init(pc); |
256 | } | 237 | } |
257 | 238 | ||
239 | void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) | ||
240 | { | ||
241 | struct bcma_drv_pci *pc; | ||
242 | u16 data; | ||
243 | |||
244 | if (bus->hosttype != BCMA_HOSTTYPE_PCI) | ||
245 | return; | ||
246 | |||
247 | pc = &bus->drv_pci[0]; | ||
248 | |||
249 | if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) { | ||
250 | data = up ? 0x74 : 0x7C; | ||
251 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
252 | BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64); | ||
253 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
254 | BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); | ||
255 | } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) { | ||
256 | data = up ? 0x75 : 0x7D; | ||
257 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
258 | BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65); | ||
259 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
260 | BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); | ||
261 | } | ||
262 | } | ||
263 | EXPORT_SYMBOL_GPL(bcma_core_pci_power_save); | ||
264 | |||
258 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, | 265 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, |
259 | bool enable) | 266 | bool enable) |
260 | { | 267 | { |
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus) | |||
310 | 317 | ||
311 | pc = &bus->drv_pci[0]; | 318 | pc = &bus->drv_pci[0]; |
312 | 319 | ||
313 | bcma_core_pci_power_save(pc, true); | ||
314 | |||
315 | bcma_core_pci_extend_L1timer(pc, true); | 320 | bcma_core_pci_extend_L1timer(pc, true); |
316 | } | 321 | } |
317 | EXPORT_SYMBOL_GPL(bcma_core_pci_up); | 322 | EXPORT_SYMBOL_GPL(bcma_core_pci_up); |
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus) | |||
326 | pc = &bus->drv_pci[0]; | 331 | pc = &bus->drv_pci[0]; |
327 | 332 | ||
328 | bcma_core_pci_extend_L1timer(pc, false); | 333 | bcma_core_pci_extend_L1timer(pc, false); |
329 | |||
330 | bcma_core_pci_power_save(pc, false); | ||
331 | } | 334 | } |
332 | EXPORT_SYMBOL_GPL(bcma_core_pci_down); | 335 | EXPORT_SYMBOL_GPL(bcma_core_pci_down); |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a12b923bbaca..0a327f4154a2 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = { | |||
85 | { USB_DEVICE(0x04CA, 0x3008) }, | 85 | { USB_DEVICE(0x04CA, 0x3008) }, |
86 | { USB_DEVICE(0x13d3, 0x3362) }, | 86 | { USB_DEVICE(0x13d3, 0x3362) }, |
87 | { USB_DEVICE(0x0CF3, 0xE004) }, | 87 | { USB_DEVICE(0x0CF3, 0xE004) }, |
88 | { USB_DEVICE(0x0CF3, 0xE005) }, | ||
88 | { USB_DEVICE(0x0930, 0x0219) }, | 89 | { USB_DEVICE(0x0930, 0x0219) }, |
89 | { USB_DEVICE(0x0489, 0xe057) }, | 90 | { USB_DEVICE(0x0489, 0xe057) }, |
90 | { USB_DEVICE(0x13d3, 0x3393) }, | 91 | { USB_DEVICE(0x13d3, 0x3393) }, |
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { | |||
126 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, | 127 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
127 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, | 128 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, |
128 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, | 129 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
130 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, | ||
129 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, | 131 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
130 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, | 132 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
131 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, | 133 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 8e16f0af6358..f3dfc0a88fdc 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = { | |||
102 | 102 | ||
103 | /* Broadcom BCM20702A0 */ | 103 | /* Broadcom BCM20702A0 */ |
104 | { USB_DEVICE(0x0b05, 0x17b5) }, | 104 | { USB_DEVICE(0x0b05, 0x17b5) }, |
105 | { USB_DEVICE(0x0b05, 0x17cb) }, | ||
105 | { USB_DEVICE(0x04ca, 0x2003) }, | 106 | { USB_DEVICE(0x04ca, 0x2003) }, |
106 | { USB_DEVICE(0x0489, 0xe042) }, | 107 | { USB_DEVICE(0x0489, 0xe042) }, |
107 | { USB_DEVICE(0x413c, 0x8197) }, | 108 | { USB_DEVICE(0x413c, 0x8197) }, |
@@ -112,6 +113,9 @@ static struct usb_device_id btusb_table[] = { | |||
112 | /*Broadcom devices with vendor specific id */ | 113 | /*Broadcom devices with vendor specific id */ |
113 | { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, | 114 | { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, |
114 | 115 | ||
116 | /* Belkin F8065bf - Broadcom based */ | ||
117 | { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) }, | ||
118 | |||
115 | { } /* Terminating entry */ | 119 | { } /* Terminating entry */ |
116 | }; | 120 | }; |
117 | 121 | ||
@@ -148,6 +152,7 @@ static struct usb_device_id blacklist_table[] = { | |||
148 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, | 152 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
149 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, | 153 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, |
150 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, | 154 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
155 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, | ||
151 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, | 156 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
152 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, | 157 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
153 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, | 158 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 55bbb8b8200c..e883bfe2e727 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1724,6 +1724,7 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1724 | struct bonding *bond = netdev_priv(bond_dev); | 1724 | struct bonding *bond = netdev_priv(bond_dev); |
1725 | struct slave *slave, *oldcurrent; | 1725 | struct slave *slave, *oldcurrent; |
1726 | struct sockaddr addr; | 1726 | struct sockaddr addr; |
1727 | int old_flags = bond_dev->flags; | ||
1727 | netdev_features_t old_features = bond_dev->features; | 1728 | netdev_features_t old_features = bond_dev->features; |
1728 | 1729 | ||
1729 | /* slave is not a slave or master is not master of this slave */ | 1730 | /* slave is not a slave or master is not master of this slave */ |
@@ -1855,12 +1856,18 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1855 | * bond_change_active_slave(..., NULL) | 1856 | * bond_change_active_slave(..., NULL) |
1856 | */ | 1857 | */ |
1857 | if (!USES_PRIMARY(bond->params.mode)) { | 1858 | if (!USES_PRIMARY(bond->params.mode)) { |
1858 | /* unset promiscuity level from slave */ | 1859 | /* unset promiscuity level from slave |
1859 | if (bond_dev->flags & IFF_PROMISC) | 1860 | * NOTE: The NETDEV_CHANGEADDR call above may change the value |
1861 | * of the IFF_PROMISC flag in the bond_dev, but we need the | ||
1862 | * value of that flag before that change, as that was the value | ||
1863 | * when this slave was attached, so we cache at the start of the | ||
1864 | * function and use it here. Same goes for ALLMULTI below | ||
1865 | */ | ||
1866 | if (old_flags & IFF_PROMISC) | ||
1860 | dev_set_promiscuity(slave_dev, -1); | 1867 | dev_set_promiscuity(slave_dev, -1); |
1861 | 1868 | ||
1862 | /* unset allmulti level from slave */ | 1869 | /* unset allmulti level from slave */ |
1863 | if (bond_dev->flags & IFF_ALLMULTI) | 1870 | if (old_flags & IFF_ALLMULTI) |
1864 | dev_set_allmulti(slave_dev, -1); | 1871 | dev_set_allmulti(slave_dev, -1); |
1865 | 1872 | ||
1866 | bond_hw_addr_flush(bond_dev, slave_dev); | 1873 | bond_hw_addr_flush(bond_dev, slave_dev); |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 71c677e651d7..3f21142138b7 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev) | |||
702 | { | 702 | { |
703 | struct flexcan_priv *priv = netdev_priv(dev); | 703 | struct flexcan_priv *priv = netdev_priv(dev); |
704 | struct flexcan_regs __iomem *regs = priv->base; | 704 | struct flexcan_regs __iomem *regs = priv->base; |
705 | unsigned int i; | ||
706 | int err; | 705 | int err; |
707 | u32 reg_mcr, reg_ctrl; | 706 | u32 reg_mcr, reg_ctrl; |
708 | 707 | ||
@@ -772,17 +771,6 @@ static int flexcan_chip_start(struct net_device *dev) | |||
772 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); | 771 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); |
773 | flexcan_write(reg_ctrl, ®s->ctrl); | 772 | flexcan_write(reg_ctrl, ®s->ctrl); |
774 | 773 | ||
775 | for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) { | ||
776 | flexcan_write(0, ®s->cantxfg[i].can_ctrl); | ||
777 | flexcan_write(0, ®s->cantxfg[i].can_id); | ||
778 | flexcan_write(0, ®s->cantxfg[i].data[0]); | ||
779 | flexcan_write(0, ®s->cantxfg[i].data[1]); | ||
780 | |||
781 | /* put MB into rx queue */ | ||
782 | flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), | ||
783 | ®s->cantxfg[i].can_ctrl); | ||
784 | } | ||
785 | |||
786 | /* acceptance mask/acceptance code (accept everything) */ | 774 | /* acceptance mask/acceptance code (accept everything) */ |
787 | flexcan_write(0x0, ®s->rxgmask); | 775 | flexcan_write(0x0, ®s->rxgmask); |
788 | flexcan_write(0x0, ®s->rx14mask); | 776 | flexcan_write(0x0, ®s->rx14mask); |
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 874188ba06f7..25377e547f9b 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c | |||
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces"); | |||
76 | /* maximum rx buffer len: extended CAN frame with timestamp */ | 76 | /* maximum rx buffer len: extended CAN frame with timestamp */ |
77 | #define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) | 77 | #define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) |
78 | 78 | ||
79 | #define SLC_CMD_LEN 1 | ||
80 | #define SLC_SFF_ID_LEN 3 | ||
81 | #define SLC_EFF_ID_LEN 8 | ||
82 | |||
79 | struct slcan { | 83 | struct slcan { |
80 | int magic; | 84 | int magic; |
81 | 85 | ||
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl) | |||
142 | { | 146 | { |
143 | struct sk_buff *skb; | 147 | struct sk_buff *skb; |
144 | struct can_frame cf; | 148 | struct can_frame cf; |
145 | int i, dlc_pos, tmp; | 149 | int i, tmp; |
146 | unsigned long ultmp; | 150 | u32 tmpid; |
147 | char cmd = sl->rbuff[0]; | 151 | char *cmd = sl->rbuff; |
148 | 152 | ||
149 | if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R')) | 153 | cf.can_id = 0; |
154 | |||
155 | switch (*cmd) { | ||
156 | case 'r': | ||
157 | cf.can_id = CAN_RTR_FLAG; | ||
158 | /* fallthrough */ | ||
159 | case 't': | ||
160 | /* store dlc ASCII value and terminate SFF CAN ID string */ | ||
161 | cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; | ||
162 | sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0; | ||
163 | /* point to payload data behind the dlc */ | ||
164 | cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1; | ||
165 | break; | ||
166 | case 'R': | ||
167 | cf.can_id = CAN_RTR_FLAG; | ||
168 | /* fallthrough */ | ||
169 | case 'T': | ||
170 | cf.can_id |= CAN_EFF_FLAG; | ||
171 | /* store dlc ASCII value and terminate EFF CAN ID string */ | ||
172 | cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN]; | ||
173 | sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0; | ||
174 | /* point to payload data behind the dlc */ | ||
175 | cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1; | ||
176 | break; | ||
177 | default: | ||
150 | return; | 178 | return; |
179 | } | ||
151 | 180 | ||
152 | if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */ | 181 | if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid)) |
153 | dlc_pos = 4; /* dlc position tiiid */ | ||
154 | else | ||
155 | dlc_pos = 9; /* dlc position Tiiiiiiiid */ | ||
156 | |||
157 | if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9'))) | ||
158 | return; | 182 | return; |
159 | 183 | ||
160 | cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */ | 184 | cf.can_id |= tmpid; |
161 | 185 | ||
162 | sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ | 186 | /* get can_dlc from sanitized ASCII value */ |
163 | 187 | if (cf.can_dlc >= '0' && cf.can_dlc < '9') | |
164 | if (kstrtoul(sl->rbuff+1, 16, &ultmp)) | 188 | cf.can_dlc -= '0'; |
189 | else | ||
165 | return; | 190 | return; |
166 | 191 | ||
167 | cf.can_id = ultmp; | ||
168 | |||
169 | if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */ | ||
170 | cf.can_id |= CAN_EFF_FLAG; | ||
171 | |||
172 | if ((cmd | 0x20) == 'r') /* RTR frame */ | ||
173 | cf.can_id |= CAN_RTR_FLAG; | ||
174 | |||
175 | *(u64 *) (&cf.data) = 0; /* clear payload */ | 192 | *(u64 *) (&cf.data) = 0; /* clear payload */ |
176 | 193 | ||
177 | for (i = 0, dlc_pos++; i < cf.can_dlc; i++) { | 194 | /* RTR frames may have a dlc > 0 but they never have any data bytes */ |
178 | tmp = hex_to_bin(sl->rbuff[dlc_pos++]); | 195 | if (!(cf.can_id & CAN_RTR_FLAG)) { |
179 | if (tmp < 0) | 196 | for (i = 0; i < cf.can_dlc; i++) { |
180 | return; | 197 | tmp = hex_to_bin(*cmd++); |
181 | cf.data[i] = (tmp << 4); | 198 | if (tmp < 0) |
182 | tmp = hex_to_bin(sl->rbuff[dlc_pos++]); | 199 | return; |
183 | if (tmp < 0) | 200 | cf.data[i] = (tmp << 4); |
184 | return; | 201 | tmp = hex_to_bin(*cmd++); |
185 | cf.data[i] |= tmp; | 202 | if (tmp < 0) |
203 | return; | ||
204 | cf.data[i] |= tmp; | ||
205 | } | ||
186 | } | 206 | } |
187 | 207 | ||
188 | skb = dev_alloc_skb(sizeof(struct can_frame) + | 208 | skb = dev_alloc_skb(sizeof(struct can_frame) + |
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl) | |||
209 | /* parse tty input stream */ | 229 | /* parse tty input stream */ |
210 | static void slcan_unesc(struct slcan *sl, unsigned char s) | 230 | static void slcan_unesc(struct slcan *sl, unsigned char s) |
211 | { | 231 | { |
212 | |||
213 | if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ | 232 | if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ |
214 | if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && | 233 | if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && |
215 | (sl->rcount > 4)) { | 234 | (sl->rcount > 4)) { |
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s) | |||
236 | /* Encapsulate one can_frame and stuff into a TTY queue. */ | 255 | /* Encapsulate one can_frame and stuff into a TTY queue. */ |
237 | static void slc_encaps(struct slcan *sl, struct can_frame *cf) | 256 | static void slc_encaps(struct slcan *sl, struct can_frame *cf) |
238 | { | 257 | { |
239 | int actual, idx, i; | 258 | int actual, i; |
240 | char cmd; | 259 | unsigned char *pos; |
260 | unsigned char *endpos; | ||
261 | canid_t id = cf->can_id; | ||
262 | |||
263 | pos = sl->xbuff; | ||
241 | 264 | ||
242 | if (cf->can_id & CAN_RTR_FLAG) | 265 | if (cf->can_id & CAN_RTR_FLAG) |
243 | cmd = 'R'; /* becomes 'r' in standard frame format */ | 266 | *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */ |
244 | else | 267 | else |
245 | cmd = 'T'; /* becomes 't' in standard frame format */ | 268 | *pos = 'T'; /* becomes 't' in standard frame format (SSF) */ |
246 | 269 | ||
247 | if (cf->can_id & CAN_EFF_FLAG) | 270 | /* determine number of chars for the CAN-identifier */ |
248 | sprintf(sl->xbuff, "%c%08X%d", cmd, | 271 | if (cf->can_id & CAN_EFF_FLAG) { |
249 | cf->can_id & CAN_EFF_MASK, cf->can_dlc); | 272 | id &= CAN_EFF_MASK; |
250 | else | 273 | endpos = pos + SLC_EFF_ID_LEN; |
251 | sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20, | 274 | } else { |
252 | cf->can_id & CAN_SFF_MASK, cf->can_dlc); | 275 | *pos |= 0x20; /* convert R/T to lower case for SFF */ |
276 | id &= CAN_SFF_MASK; | ||
277 | endpos = pos + SLC_SFF_ID_LEN; | ||
278 | } | ||
253 | 279 | ||
254 | idx = strlen(sl->xbuff); | 280 | /* build 3 (SFF) or 8 (EFF) digit CAN identifier */ |
281 | pos++; | ||
282 | while (endpos >= pos) { | ||
283 | *endpos-- = hex_asc_upper[id & 0xf]; | ||
284 | id >>= 4; | ||
285 | } | ||
286 | |||
287 | pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN; | ||
255 | 288 | ||
256 | for (i = 0; i < cf->can_dlc; i++) | 289 | *pos++ = cf->can_dlc + '0'; |
257 | sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]); | 290 | |
291 | /* RTR frames may have a dlc > 0 but they never have any data bytes */ | ||
292 | if (!(cf->can_id & CAN_RTR_FLAG)) { | ||
293 | for (i = 0; i < cf->can_dlc; i++) | ||
294 | pos = hex_byte_pack_upper(pos, cf->data[i]); | ||
295 | } | ||
258 | 296 | ||
259 | strcat(sl->xbuff, "\r"); /* add terminating character */ | 297 | *pos++ = '\r'; |
260 | 298 | ||
261 | /* Order of next two lines is *very* important. | 299 | /* Order of next two lines is *very* important. |
262 | * When we are sending a little amount of data, | 300 | * When we are sending a little amount of data, |
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf) | |||
267 | * 14 Oct 1994 Dmitry Gorodchanin. | 305 | * 14 Oct 1994 Dmitry Gorodchanin. |
268 | */ | 306 | */ |
269 | set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); | 307 | set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); |
270 | actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff)); | 308 | actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); |
271 | sl->xleft = strlen(sl->xbuff) - actual; | 309 | sl->xleft = (pos - sl->xbuff) - actual; |
272 | sl->xhead = sl->xbuff + actual; | 310 | sl->xhead = sl->xbuff + actual; |
273 | sl->dev->stats.tx_bytes += cf->can_dlc; | 311 | sl->dev->stats.tx_bytes += cf->can_dlc; |
274 | } | 312 | } |
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty) | |||
286 | if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) | 324 | if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) |
287 | return; | 325 | return; |
288 | 326 | ||
327 | spin_lock(&sl->lock); | ||
289 | if (sl->xleft <= 0) { | 328 | if (sl->xleft <= 0) { |
290 | /* Now serial buffer is almost free & we can start | 329 | /* Now serial buffer is almost free & we can start |
291 | * transmission of another packet */ | 330 | * transmission of another packet */ |
292 | sl->dev->stats.tx_packets++; | 331 | sl->dev->stats.tx_packets++; |
293 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 332 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
333 | spin_unlock(&sl->lock); | ||
294 | netif_wake_queue(sl->dev); | 334 | netif_wake_queue(sl->dev); |
295 | return; | 335 | return; |
296 | } | 336 | } |
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty) | |||
298 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); | 338 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); |
299 | sl->xleft -= actual; | 339 | sl->xleft -= actual; |
300 | sl->xhead += actual; | 340 | sl->xhead += actual; |
341 | spin_unlock(&sl->lock); | ||
301 | } | 342 | } |
302 | 343 | ||
303 | /* Send a can_frame to a TTY queue. */ | 344 | /* Send a can_frame to a TTY queue. */ |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index a0f647f92bf5..0b7a4c3b01a2 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |||
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev) | |||
463 | if (i < PCAN_USB_MAX_TX_URBS) { | 463 | if (i < PCAN_USB_MAX_TX_URBS) { |
464 | if (i == 0) { | 464 | if (i == 0) { |
465 | netdev_err(netdev, "couldn't setup any tx URB\n"); | 465 | netdev_err(netdev, "couldn't setup any tx URB\n"); |
466 | return err; | 466 | goto err_tx; |
467 | } | 467 | } |
468 | 468 | ||
469 | netdev_warn(netdev, "tx performance may be slow\n"); | 469 | netdev_warn(netdev, "tx performance may be slow\n"); |
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev) | |||
472 | if (dev->adapter->dev_start) { | 472 | if (dev->adapter->dev_start) { |
473 | err = dev->adapter->dev_start(dev); | 473 | err = dev->adapter->dev_start(dev); |
474 | if (err) | 474 | if (err) |
475 | goto failed; | 475 | goto err_adapter; |
476 | } | 476 | } |
477 | 477 | ||
478 | dev->state |= PCAN_USB_STATE_STARTED; | 478 | dev->state |= PCAN_USB_STATE_STARTED; |
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev) | |||
481 | if (dev->adapter->dev_set_bus) { | 481 | if (dev->adapter->dev_set_bus) { |
482 | err = dev->adapter->dev_set_bus(dev, 1); | 482 | err = dev->adapter->dev_set_bus(dev, 1); |
483 | if (err) | 483 | if (err) |
484 | goto failed; | 484 | goto err_adapter; |
485 | } | 485 | } |
486 | 486 | ||
487 | dev->can.state = CAN_STATE_ERROR_ACTIVE; | 487 | dev->can.state = CAN_STATE_ERROR_ACTIVE; |
488 | 488 | ||
489 | return 0; | 489 | return 0; |
490 | 490 | ||
491 | failed: | 491 | err_adapter: |
492 | if (err == -ENODEV) | 492 | if (err == -ENODEV) |
493 | netif_device_detach(dev->netdev); | 493 | netif_device_detach(dev->netdev); |
494 | 494 | ||
495 | netdev_warn(netdev, "couldn't submit control: %d\n", err); | 495 | netdev_warn(netdev, "couldn't submit control: %d\n", err); |
496 | 496 | ||
497 | for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { | ||
498 | usb_free_urb(dev->tx_contexts[i].urb); | ||
499 | dev->tx_contexts[i].urb = NULL; | ||
500 | } | ||
501 | err_tx: | ||
502 | usb_kill_anchored_urbs(&dev->rx_submitted); | ||
503 | |||
497 | return err; | 504 | return err; |
498 | } | 505 | } |
499 | 506 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 61726af1de6e..e66beff2704d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2481,8 +2481,7 @@ load_error_cnic2: | |||
2481 | load_error_cnic1: | 2481 | load_error_cnic1: |
2482 | bnx2x_napi_disable_cnic(bp); | 2482 | bnx2x_napi_disable_cnic(bp); |
2483 | /* Update the number of queues without the cnic queues */ | 2483 | /* Update the number of queues without the cnic queues */ |
2484 | rc = bnx2x_set_real_num_queues(bp, 0); | 2484 | if (bnx2x_set_real_num_queues(bp, 0)) |
2485 | if (rc) | ||
2486 | BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); | 2485 | BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); |
2487 | load_error_cnic0: | 2486 | load_error_cnic0: |
2488 | BNX2X_ERR("CNIC-related load failed\n"); | 2487 | BNX2X_ERR("CNIC-related load failed\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index d60a2ea3da19..51468227bf3b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, | |||
175 | #define EDC_MODE_LINEAR 0x0022 | 175 | #define EDC_MODE_LINEAR 0x0022 |
176 | #define EDC_MODE_LIMITING 0x0044 | 176 | #define EDC_MODE_LIMITING 0x0044 |
177 | #define EDC_MODE_PASSIVE_DAC 0x0055 | 177 | #define EDC_MODE_PASSIVE_DAC 0x0055 |
178 | #define EDC_MODE_ACTIVE_DAC 0x0066 | ||
178 | 179 | ||
179 | /* ETS defines*/ | 180 | /* ETS defines*/ |
180 | #define DCBX_INVALID_COS (0xFF) | 181 | #define DCBX_INVALID_COS (0xFF) |
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, | |||
3684 | bnx2x_update_link_attr(params, vars->link_attr_sync); | 3685 | bnx2x_update_link_attr(params, vars->link_attr_sync); |
3685 | } | 3686 | } |
3686 | 3687 | ||
3688 | static void bnx2x_disable_kr2(struct link_params *params, | ||
3689 | struct link_vars *vars, | ||
3690 | struct bnx2x_phy *phy) | ||
3691 | { | ||
3692 | struct bnx2x *bp = params->bp; | ||
3693 | int i; | ||
3694 | static struct bnx2x_reg_set reg_set[] = { | ||
3695 | /* Step 1 - Program the TX/RX alignment markers */ | ||
3696 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, | ||
3697 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, | ||
3698 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, | ||
3699 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, | ||
3700 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, | ||
3701 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, | ||
3702 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, | ||
3703 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, | ||
3704 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, | ||
3705 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, | ||
3706 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, | ||
3707 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, | ||
3708 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, | ||
3709 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, | ||
3710 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} | ||
3711 | }; | ||
3712 | DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); | ||
3713 | |||
3714 | for (i = 0; i < ARRAY_SIZE(reg_set); i++) | ||
3715 | bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, | ||
3716 | reg_set[i].val); | ||
3717 | vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; | ||
3718 | bnx2x_update_link_attr(params, vars->link_attr_sync); | ||
3719 | |||
3720 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; | ||
3721 | } | ||
3722 | |||
3687 | static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, | 3723 | static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, |
3688 | struct link_params *params) | 3724 | struct link_params *params) |
3689 | { | 3725 | { |
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3715 | struct link_params *params, | 3751 | struct link_params *params, |
3716 | struct link_vars *vars) { | 3752 | struct link_vars *vars) { |
3717 | u16 lane, i, cl72_ctrl, an_adv = 0; | 3753 | u16 lane, i, cl72_ctrl, an_adv = 0; |
3718 | u16 ucode_ver; | ||
3719 | struct bnx2x *bp = params->bp; | 3754 | struct bnx2x *bp = params->bp; |
3720 | static struct bnx2x_reg_set reg_set[] = { | 3755 | static struct bnx2x_reg_set reg_set[] = { |
3721 | {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, | 3756 | {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, |
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3806 | 3841 | ||
3807 | /* Advertise pause */ | 3842 | /* Advertise pause */ |
3808 | bnx2x_ext_phy_set_pause(params, phy, vars); | 3843 | bnx2x_ext_phy_set_pause(params, phy, vars); |
3809 | /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 | 3844 | vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; |
3810 | */ | ||
3811 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3812 | MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver); | ||
3813 | if (ucode_ver < 0xd108) { | ||
3814 | DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n", | ||
3815 | ucode_ver); | ||
3816 | vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; | ||
3817 | } | ||
3818 | bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, | 3845 | bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, |
3819 | MDIO_WC_REG_DIGITAL5_MISC7, 0x100); | 3846 | MDIO_WC_REG_DIGITAL5_MISC7, 0x100); |
3820 | 3847 | ||
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3838 | bnx2x_set_aer_mmd(params, phy); | 3865 | bnx2x_set_aer_mmd(params, phy); |
3839 | 3866 | ||
3840 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); | 3867 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); |
3868 | } else { | ||
3869 | bnx2x_disable_kr2(params, vars, phy); | ||
3841 | } | 3870 | } |
3842 | 3871 | ||
3843 | /* Enable Autoneg: only on the main lane */ | 3872 | /* Enable Autoneg: only on the main lane */ |
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, | |||
4347 | struct bnx2x *bp = params->bp; | 4376 | struct bnx2x *bp = params->bp; |
4348 | u32 serdes_net_if; | 4377 | u32 serdes_net_if; |
4349 | u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; | 4378 | u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; |
4350 | u16 lane = bnx2x_get_warpcore_lane(phy, params); | ||
4351 | 4379 | ||
4352 | vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; | 4380 | vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; |
4353 | 4381 | ||
4354 | if (!vars->turn_to_run_wc_rt) | 4382 | if (!vars->turn_to_run_wc_rt) |
4355 | return; | 4383 | return; |
4356 | 4384 | ||
4357 | /* Return if there is no link partner */ | ||
4358 | if (!(bnx2x_warpcore_get_sigdet(phy, params))) { | ||
4359 | DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n"); | ||
4360 | return; | ||
4361 | } | ||
4362 | |||
4363 | if (vars->rx_tx_asic_rst) { | 4385 | if (vars->rx_tx_asic_rst) { |
4386 | u16 lane = bnx2x_get_warpcore_lane(phy, params); | ||
4364 | serdes_net_if = (REG_RD(bp, params->shmem_base + | 4387 | serdes_net_if = (REG_RD(bp, params->shmem_base + |
4365 | offsetof(struct shmem_region, dev_info. | 4388 | offsetof(struct shmem_region, dev_info. |
4366 | port_hw_config[params->port].default_cfg)) & | 4389 | port_hw_config[params->port].default_cfg)) & |
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, | |||
4375 | /*10G KR*/ | 4398 | /*10G KR*/ |
4376 | lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; | 4399 | lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; |
4377 | 4400 | ||
4378 | DP(NETIF_MSG_LINK, | ||
4379 | "gp_status1 0x%x\n", gp_status1); | ||
4380 | |||
4381 | if (lnkup_kr || lnkup) { | 4401 | if (lnkup_kr || lnkup) { |
4382 | vars->rx_tx_asic_rst = 0; | 4402 | vars->rx_tx_asic_rst = 0; |
4383 | DP(NETIF_MSG_LINK, | ||
4384 | "link up, rx_tx_asic_rst 0x%x\n", | ||
4385 | vars->rx_tx_asic_rst); | ||
4386 | } else { | 4403 | } else { |
4387 | /* Reset the lane to see if link comes up.*/ | 4404 | /* Reset the lane to see if link comes up.*/ |
4388 | bnx2x_warpcore_reset_lane(bp, phy, 1); | 4405 | bnx2x_warpcore_reset_lane(bp, phy, 1); |
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, | |||
4507 | * enabled transmitter to avoid current leakage in case | 4524 | * enabled transmitter to avoid current leakage in case |
4508 | * no module is connected | 4525 | * no module is connected |
4509 | */ | 4526 | */ |
4510 | if (bnx2x_is_sfp_module_plugged(phy, params)) | 4527 | if ((params->loopback_mode == LOOPBACK_NONE) || |
4511 | bnx2x_sfp_module_detection(phy, params); | 4528 | (params->loopback_mode == LOOPBACK_EXT)) { |
4512 | else | 4529 | if (bnx2x_is_sfp_module_plugged(phy, params)) |
4513 | bnx2x_sfp_e3_set_transmitter(params, phy, 1); | 4530 | bnx2x_sfp_module_detection(phy, params); |
4531 | else | ||
4532 | bnx2x_sfp_e3_set_transmitter(params, | ||
4533 | phy, 1); | ||
4534 | } | ||
4514 | 4535 | ||
4515 | bnx2x_warpcore_config_sfi(phy, params); | 4536 | bnx2x_warpcore_config_sfi(phy, params); |
4516 | break; | 4537 | break; |
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, | |||
5757 | rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, | 5778 | rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, |
5758 | duplex); | 5779 | duplex); |
5759 | 5780 | ||
5781 | /* In case of KR link down, start up the recovering procedure */ | ||
5782 | if ((!link_up) && (phy->media_type == ETH_PHY_KR) && | ||
5783 | (!(phy->flags & FLAGS_WC_DUAL_MODE))) | ||
5784 | vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; | ||
5785 | |||
5760 | DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", | 5786 | DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", |
5761 | vars->duplex, vars->flow_ctrl, vars->link_status); | 5787 | vars->duplex, vars->flow_ctrl, vars->link_status); |
5762 | return rc; | 5788 | return rc; |
@@ -6507,6 +6533,11 @@ static int bnx2x_link_initialize(struct link_params *params, | |||
6507 | params->phy[INT_PHY].config_init(phy, params, vars); | 6533 | params->phy[INT_PHY].config_init(phy, params, vars); |
6508 | } | 6534 | } |
6509 | 6535 | ||
6536 | /* Re-read this value in case it was changed inside config_init due to | ||
6537 | * limitations of optic module | ||
6538 | */ | ||
6539 | vars->line_speed = params->phy[INT_PHY].req_line_speed; | ||
6540 | |||
6510 | /* Init external phy*/ | 6541 | /* Init external phy*/ |
6511 | if (non_ext_phy) { | 6542 | if (non_ext_phy) { |
6512 | if (params->phy[INT_PHY].supported & | 6543 | if (params->phy[INT_PHY].supported & |
@@ -8080,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8080 | if (copper_module_type & | 8111 | if (copper_module_type & |
8081 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { | 8112 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { |
8082 | DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); | 8113 | DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); |
8083 | check_limiting_mode = 1; | 8114 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) |
8115 | *edc_mode = EDC_MODE_ACTIVE_DAC; | ||
8116 | else | ||
8117 | check_limiting_mode = 1; | ||
8084 | } else if (copper_module_type & | 8118 | } else if (copper_module_type & |
8085 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { | 8119 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { |
8086 | DP(NETIF_MSG_LINK, | 8120 | DP(NETIF_MSG_LINK, |
@@ -8555,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, | |||
8555 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; | 8589 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; |
8556 | break; | 8590 | break; |
8557 | case EDC_MODE_PASSIVE_DAC: | 8591 | case EDC_MODE_PASSIVE_DAC: |
8592 | case EDC_MODE_ACTIVE_DAC: | ||
8558 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; | 8593 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; |
8559 | break; | 8594 | break; |
8560 | default: | 8595 | default: |
@@ -9730,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
9730 | MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, | 9765 | MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, |
9731 | an_1000_val); | 9766 | an_1000_val); |
9732 | 9767 | ||
9733 | /* set 100 speed advertisement */ | 9768 | /* Set 10/100 speed advertisement */ |
9734 | if ((phy->req_line_speed == SPEED_AUTO_NEG) && | 9769 | if (phy->req_line_speed == SPEED_AUTO_NEG) { |
9735 | (phy->speed_cap_mask & | 9770 | if (phy->speed_cap_mask & |
9736 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | | 9771 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { |
9737 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { | 9772 | /* Enable autoneg and restart autoneg for legacy speeds |
9738 | an_10_100_val |= (1<<7); | 9773 | */ |
9739 | /* Enable autoneg and restart autoneg for legacy speeds */ | 9774 | autoneg_val |= (1<<9 | 1<<12); |
9740 | autoneg_val |= (1<<9 | 1<<12); | ||
9741 | |||
9742 | if (phy->req_duplex == DUPLEX_FULL) | ||
9743 | an_10_100_val |= (1<<8); | 9775 | an_10_100_val |= (1<<8); |
9744 | DP(NETIF_MSG_LINK, "Advertising 100M\n"); | 9776 | DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); |
9745 | } | 9777 | } |
9746 | /* set 10 speed advertisement */ | 9778 | |
9747 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 9779 | if (phy->speed_cap_mask & |
9748 | (phy->speed_cap_mask & | 9780 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { |
9749 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | | 9781 | /* Enable autoneg and restart autoneg for legacy speeds |
9750 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && | 9782 | */ |
9751 | (phy->supported & | 9783 | autoneg_val |= (1<<9 | 1<<12); |
9752 | (SUPPORTED_10baseT_Half | | 9784 | an_10_100_val |= (1<<7); |
9753 | SUPPORTED_10baseT_Full)))) { | 9785 | DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); |
9754 | an_10_100_val |= (1<<5); | 9786 | } |
9755 | autoneg_val |= (1<<9 | 1<<12); | 9787 | |
9756 | if (phy->req_duplex == DUPLEX_FULL) | 9788 | if ((phy->speed_cap_mask & |
9789 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && | ||
9790 | (phy->supported & SUPPORTED_10baseT_Full)) { | ||
9757 | an_10_100_val |= (1<<6); | 9791 | an_10_100_val |= (1<<6); |
9758 | DP(NETIF_MSG_LINK, "Advertising 10M\n"); | 9792 | autoneg_val |= (1<<9 | 1<<12); |
9793 | DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); | ||
9794 | } | ||
9795 | |||
9796 | if ((phy->speed_cap_mask & | ||
9797 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) && | ||
9798 | (phy->supported & SUPPORTED_10baseT_Half)) { | ||
9799 | an_10_100_val |= (1<<5); | ||
9800 | autoneg_val |= (1<<9 | 1<<12); | ||
9801 | DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); | ||
9802 | } | ||
9759 | } | 9803 | } |
9760 | 9804 | ||
9761 | /* Only 10/100 are allowed to work in FORCE mode */ | 9805 | /* Only 10/100 are allowed to work in FORCE mode */ |
@@ -13432,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy, | |||
13432 | } | 13476 | } |
13433 | } | 13477 | } |
13434 | } | 13478 | } |
13435 | static void bnx2x_disable_kr2(struct link_params *params, | ||
13436 | struct link_vars *vars, | ||
13437 | struct bnx2x_phy *phy) | ||
13438 | { | ||
13439 | struct bnx2x *bp = params->bp; | ||
13440 | int i; | ||
13441 | static struct bnx2x_reg_set reg_set[] = { | ||
13442 | /* Step 1 - Program the TX/RX alignment markers */ | ||
13443 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, | ||
13444 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, | ||
13445 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, | ||
13446 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, | ||
13447 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, | ||
13448 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, | ||
13449 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, | ||
13450 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, | ||
13451 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, | ||
13452 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, | ||
13453 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, | ||
13454 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, | ||
13455 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, | ||
13456 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, | ||
13457 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} | ||
13458 | }; | ||
13459 | DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); | ||
13460 | |||
13461 | for (i = 0; i < ARRAY_SIZE(reg_set); i++) | ||
13462 | bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, | ||
13463 | reg_set[i].val); | ||
13464 | vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; | ||
13465 | bnx2x_update_link_attr(params, vars->link_attr_sync); | ||
13466 | |||
13467 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; | ||
13468 | /* Restart AN on leading lane */ | ||
13469 | bnx2x_warpcore_restart_AN_KR(phy, params); | ||
13470 | } | ||
13471 | |||
13472 | static void bnx2x_kr2_recovery(struct link_params *params, | 13479 | static void bnx2x_kr2_recovery(struct link_params *params, |
13473 | struct link_vars *vars, | 13480 | struct link_vars *vars, |
13474 | struct bnx2x_phy *phy) | 13481 | struct bnx2x_phy *phy) |
@@ -13546,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13546 | /* Disable KR2 on both lanes */ | 13553 | /* Disable KR2 on both lanes */ |
13547 | DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); | 13554 | DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); |
13548 | bnx2x_disable_kr2(params, vars, phy); | 13555 | bnx2x_disable_kr2(params, vars, phy); |
13556 | /* Restart AN on leading lane */ | ||
13557 | bnx2x_warpcore_restart_AN_KR(phy, params); | ||
13549 | return; | 13558 | return; |
13550 | } | 13559 | } |
13551 | } | 13560 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index a6704b555042..82b658d8c04c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -4703,6 +4703,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) | |||
4703 | attn.sig[3] = REG_RD(bp, | 4703 | attn.sig[3] = REG_RD(bp, |
4704 | MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + | 4704 | MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + |
4705 | port*4); | 4705 | port*4); |
4706 | /* Since MCP attentions can't be disabled inside the block, we need to | ||
4707 | * read AEU registers to see whether they're currently disabled | ||
4708 | */ | ||
4709 | attn.sig[3] &= ((REG_RD(bp, | ||
4710 | !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 | ||
4711 | : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & | ||
4712 | MISC_AEU_ENABLE_MCP_PRTY_BITS) | | ||
4713 | ~MISC_AEU_ENABLE_MCP_PRTY_BITS); | ||
4706 | 4714 | ||
4707 | if (!CHIP_IS_E1x(bp)) | 4715 | if (!CHIP_IS_E1x(bp)) |
4708 | attn.sig[4] = REG_RD(bp, | 4716 | attn.sig[4] = REG_RD(bp, |
@@ -5447,26 +5455,24 @@ static void bnx2x_timer(unsigned long data) | |||
5447 | if (IS_PF(bp) && | 5455 | if (IS_PF(bp) && |
5448 | !BP_NOMCP(bp)) { | 5456 | !BP_NOMCP(bp)) { |
5449 | int mb_idx = BP_FW_MB_IDX(bp); | 5457 | int mb_idx = BP_FW_MB_IDX(bp); |
5450 | u32 drv_pulse; | 5458 | u16 drv_pulse; |
5451 | u32 mcp_pulse; | 5459 | u16 mcp_pulse; |
5452 | 5460 | ||
5453 | ++bp->fw_drv_pulse_wr_seq; | 5461 | ++bp->fw_drv_pulse_wr_seq; |
5454 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; | 5462 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; |
5455 | /* TBD - add SYSTEM_TIME */ | ||
5456 | drv_pulse = bp->fw_drv_pulse_wr_seq; | 5463 | drv_pulse = bp->fw_drv_pulse_wr_seq; |
5457 | bnx2x_drv_pulse(bp); | 5464 | bnx2x_drv_pulse(bp); |
5458 | 5465 | ||
5459 | mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & | 5466 | mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & |
5460 | MCP_PULSE_SEQ_MASK); | 5467 | MCP_PULSE_SEQ_MASK); |
5461 | /* The delta between driver pulse and mcp response | 5468 | /* The delta between driver pulse and mcp response |
5462 | * should be 1 (before mcp response) or 0 (after mcp response) | 5469 | * should not get too big. If the MFW is more than 5 pulses |
5470 | * behind, we should worry about it enough to generate an error | ||
5471 | * log. | ||
5463 | */ | 5472 | */ |
5464 | if ((drv_pulse != mcp_pulse) && | 5473 | if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) |
5465 | (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { | 5474 | BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", |
5466 | /* someone lost a heartbeat... */ | ||
5467 | BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", | ||
5468 | drv_pulse, mcp_pulse); | 5475 | drv_pulse, mcp_pulse); |
5469 | } | ||
5470 | } | 5476 | } |
5471 | 5477 | ||
5472 | if (bp->state == BNX2X_STATE_OPEN) | 5478 | if (bp->state == BNX2X_STATE_OPEN) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2604b6204abe..9ad012bdd915 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) | |||
1819 | fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); | 1819 | fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); |
1820 | if (fid & IGU_FID_ENCODE_IS_PF) | 1820 | if (fid & IGU_FID_ENCODE_IS_PF) |
1821 | current_pf = fid & IGU_FID_PF_NUM_MASK; | 1821 | current_pf = fid & IGU_FID_PF_NUM_MASK; |
1822 | else if (current_pf == BP_ABS_FUNC(bp)) | 1822 | else if (current_pf == BP_FUNC(bp)) |
1823 | bnx2x_vf_set_igu_info(bp, sb_id, | 1823 | bnx2x_vf_set_igu_info(bp, sb_id, |
1824 | (fid & IGU_FID_VF_NUM_MASK)); | 1824 | (fid & IGU_FID_VF_NUM_MASK)); |
1825 | DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", | 1825 | DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", |
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp) | |||
3180 | /* set local queue arrays */ | 3180 | /* set local queue arrays */ |
3181 | vf->vfqs = &bp->vfdb->vfqs[qcount]; | 3181 | vf->vfqs = &bp->vfdb->vfqs[qcount]; |
3182 | qcount += vf_sb_count(vf); | 3182 | qcount += vf_sb_count(vf); |
3183 | bnx2x_iov_static_resc(bp, vf); | ||
3183 | } | 3184 | } |
3184 | 3185 | ||
3185 | /* prepare msix vectors in VF configuration space */ | 3186 | /* prepare msix vectors in VF configuration space */ |
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp) | |||
3187 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); | 3188 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); |
3188 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, | 3189 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, |
3189 | num_vf_queues); | 3190 | num_vf_queues); |
3191 | DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", | ||
3192 | vf_idx, num_vf_queues); | ||
3190 | } | 3193 | } |
3191 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); | 3194 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); |
3192 | 3195 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 6cfb88732452..da16953eb2ec 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1765 | switch (mbx->first_tlv.tl.type) { | 1765 | switch (mbx->first_tlv.tl.type) { |
1766 | case CHANNEL_TLV_ACQUIRE: | 1766 | case CHANNEL_TLV_ACQUIRE: |
1767 | bnx2x_vf_mbx_acquire(bp, vf, mbx); | 1767 | bnx2x_vf_mbx_acquire(bp, vf, mbx); |
1768 | break; | 1768 | return; |
1769 | case CHANNEL_TLV_INIT: | 1769 | case CHANNEL_TLV_INIT: |
1770 | bnx2x_vf_mbx_init_vf(bp, vf, mbx); | 1770 | bnx2x_vf_mbx_init_vf(bp, vf, mbx); |
1771 | break; | 1771 | return; |
1772 | case CHANNEL_TLV_SETUP_Q: | 1772 | case CHANNEL_TLV_SETUP_Q: |
1773 | bnx2x_vf_mbx_setup_q(bp, vf, mbx); | 1773 | bnx2x_vf_mbx_setup_q(bp, vf, mbx); |
1774 | break; | 1774 | return; |
1775 | case CHANNEL_TLV_SET_Q_FILTERS: | 1775 | case CHANNEL_TLV_SET_Q_FILTERS: |
1776 | bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); | 1776 | bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); |
1777 | break; | 1777 | return; |
1778 | case CHANNEL_TLV_TEARDOWN_Q: | 1778 | case CHANNEL_TLV_TEARDOWN_Q: |
1779 | bnx2x_vf_mbx_teardown_q(bp, vf, mbx); | 1779 | bnx2x_vf_mbx_teardown_q(bp, vf, mbx); |
1780 | break; | 1780 | return; |
1781 | case CHANNEL_TLV_CLOSE: | 1781 | case CHANNEL_TLV_CLOSE: |
1782 | bnx2x_vf_mbx_close_vf(bp, vf, mbx); | 1782 | bnx2x_vf_mbx_close_vf(bp, vf, mbx); |
1783 | break; | 1783 | return; |
1784 | case CHANNEL_TLV_RELEASE: | 1784 | case CHANNEL_TLV_RELEASE: |
1785 | bnx2x_vf_mbx_release_vf(bp, vf, mbx); | 1785 | bnx2x_vf_mbx_release_vf(bp, vf, mbx); |
1786 | break; | 1786 | return; |
1787 | case CHANNEL_TLV_UPDATE_RSS: | 1787 | case CHANNEL_TLV_UPDATE_RSS: |
1788 | bnx2x_vf_mbx_update_rss(bp, vf, mbx); | 1788 | bnx2x_vf_mbx_update_rss(bp, vf, mbx); |
1789 | break; | 1789 | return; |
1790 | } | 1790 | } |
1791 | 1791 | ||
1792 | } else { | 1792 | } else { |
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1802 | for (i = 0; i < 20; i++) | 1802 | for (i = 0; i < 20; i++) |
1803 | DP_CONT(BNX2X_MSG_IOV, "%x ", | 1803 | DP_CONT(BNX2X_MSG_IOV, "%x ", |
1804 | mbx->msg->req.tlv_buf_size.tlv_buffer[i]); | 1804 | mbx->msg->req.tlv_buf_size.tlv_buffer[i]); |
1805 | } | ||
1805 | 1806 | ||
1806 | /* test whether we can respond to the VF (do we have an address | 1807 | /* can we respond to VF (do we have an address for it?) */ |
1807 | * for it?) | 1808 | if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { |
1808 | */ | 1809 | /* mbx_resp uses the op_rc of the VF */ |
1809 | if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { | 1810 | vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; |
1810 | /* mbx_resp uses the op_rc of the VF */ | ||
1811 | vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; | ||
1812 | 1811 | ||
1813 | /* notify the VF that we do not support this request */ | 1812 | /* notify the VF that we do not support this request */ |
1814 | bnx2x_vf_mbx_resp(bp, vf); | 1813 | bnx2x_vf_mbx_resp(bp, vf); |
1815 | } else { | 1814 | } else { |
1816 | /* can't send a response since this VF is unknown to us | 1815 | /* can't send a response since this VF is unknown to us |
1817 | * just ack the FW to release the mailbox and unlock | 1816 | * just ack the FW to release the mailbox and unlock |
1818 | * the channel. | 1817 | * the channel. |
1819 | */ | 1818 | */ |
1820 | storm_memset_vf_mbx_ack(bp, vf->abs_vfid); | 1819 | storm_memset_vf_mbx_ack(bp, vf->abs_vfid); |
1821 | mmiowb(); | 1820 | /* Firmware ack should be written before unlocking channel */ |
1822 | bnx2x_unlock_vf_pf_channel(bp, vf, | 1821 | mmiowb(); |
1823 | mbx->first_tlv.tl.type); | 1822 | bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); |
1824 | } | ||
1825 | } | 1823 | } |
1826 | } | 1824 | } |
1827 | 1825 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index ace5050dba38..db020230bd0b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
88 | #define BE_MIN_MTU 256 | 88 | #define BE_MIN_MTU 256 |
89 | 89 | ||
90 | #define BE_NUM_VLANS_SUPPORTED 64 | 90 | #define BE_NUM_VLANS_SUPPORTED 64 |
91 | #define BE_UMC_NUM_VLANS_SUPPORTED 15 | ||
91 | #define BE_MAX_EQD 96u | 92 | #define BE_MAX_EQD 96u |
92 | #define BE_MAX_TX_FRAG_COUNT 30 | 93 | #define BE_MAX_TX_FRAG_COUNT 30 |
93 | 94 | ||
@@ -333,6 +334,7 @@ enum vf_state { | |||
333 | 334 | ||
334 | #define BE_FLAGS_LINK_STATUS_INIT 1 | 335 | #define BE_FLAGS_LINK_STATUS_INIT 1 |
335 | #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) | 336 | #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) |
337 | #define BE_FLAGS_VLAN_PROMISC (1 << 4) | ||
336 | #define BE_FLAGS_NAPI_ENABLED (1 << 9) | 338 | #define BE_FLAGS_NAPI_ENABLED (1 << 9) |
337 | #define BE_UC_PMAC_COUNT 30 | 339 | #define BE_UC_PMAC_COUNT 30 |
338 | #define BE_VF_UC_PMAC_COUNT 2 | 340 | #define BE_VF_UC_PMAC_COUNT 2 |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1ab5dab11eff..bd0e0c0bbcd8 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter, | |||
180 | dev_err(&adapter->pdev->dev, | 180 | dev_err(&adapter->pdev->dev, |
181 | "opcode %d-%d failed:status %d-%d\n", | 181 | "opcode %d-%d failed:status %d-%d\n", |
182 | opcode, subsystem, compl_status, extd_status); | 182 | opcode, subsystem, compl_status, extd_status); |
183 | |||
184 | if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) | ||
185 | return extd_status; | ||
183 | } | 186 | } |
184 | } | 187 | } |
185 | done: | 188 | done: |
@@ -1812,6 +1815,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) | |||
1812 | } else if (flags & IFF_ALLMULTI) { | 1815 | } else if (flags & IFF_ALLMULTI) { |
1813 | req->if_flags_mask = req->if_flags = | 1816 | req->if_flags_mask = req->if_flags = |
1814 | cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); | 1817 | cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); |
1818 | } else if (flags & BE_FLAGS_VLAN_PROMISC) { | ||
1819 | req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); | ||
1820 | |||
1821 | if (value == ON) | ||
1822 | req->if_flags = | ||
1823 | cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); | ||
1815 | } else { | 1824 | } else { |
1816 | struct netdev_hw_addr *ha; | 1825 | struct netdev_hw_addr *ha; |
1817 | int i = 0; | 1826 | int i = 0; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index d026226db88c..108ca8abf0af 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -60,6 +60,8 @@ enum { | |||
60 | MCC_STATUS_NOT_SUPPORTED = 66 | 60 | MCC_STATUS_NOT_SUPPORTED = 66 |
61 | }; | 61 | }; |
62 | 62 | ||
63 | #define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16 | ||
64 | |||
63 | #define CQE_STATUS_COMPL_MASK 0xFFFF | 65 | #define CQE_STATUS_COMPL_MASK 0xFFFF |
64 | #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ | 66 | #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ |
65 | #define CQE_STATUS_EXTD_MASK 0xFFFF | 67 | #define CQE_STATUS_EXTD_MASK 0xFFFF |
@@ -1791,7 +1793,7 @@ struct be_nic_res_desc { | |||
1791 | u8 acpi_params; | 1793 | u8 acpi_params; |
1792 | u8 wol_param; | 1794 | u8 wol_param; |
1793 | u16 rsvd7; | 1795 | u16 rsvd7; |
1794 | u32 rsvd8[3]; | 1796 | u32 rsvd8[7]; |
1795 | } __packed; | 1797 | } __packed; |
1796 | 1798 | ||
1797 | struct be_cmd_req_get_func_config { | 1799 | struct be_cmd_req_get_func_config { |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 100b528b9bd0..2c38cc402119 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | |||
855 | unsigned int eth_hdr_len; | 855 | unsigned int eth_hdr_len; |
856 | struct iphdr *ip; | 856 | struct iphdr *ip; |
857 | 857 | ||
858 | /* Lancer ASIC has a bug wherein packets that are 32 bytes or less | 858 | /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less |
859 | * may cause a transmit stall on that port. So the work-around is to | 859 | * may cause a transmit stall on that port. So the work-around is to |
860 | * pad such packets to a 36-byte length. | 860 | * pad short packets (<= 32 bytes) to a 36-byte length. |
861 | */ | 861 | */ |
862 | if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { | 862 | if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { |
863 | if (skb_padto(skb, 36)) | 863 | if (skb_padto(skb, 36)) |
864 | goto tx_drop; | 864 | goto tx_drop; |
865 | skb->len = 36; | 865 | skb->len = 36; |
@@ -1013,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter) | |||
1013 | status = be_cmd_vlan_config(adapter, adapter->if_handle, | 1013 | status = be_cmd_vlan_config(adapter, adapter->if_handle, |
1014 | vids, num, 1, 0); | 1014 | vids, num, 1, 0); |
1015 | 1015 | ||
1016 | /* Set to VLAN promisc mode as setting VLAN filter failed */ | ||
1017 | if (status) { | 1016 | if (status) { |
1018 | dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); | 1017 | /* Set to VLAN promisc mode as setting VLAN filter failed */ |
1019 | dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); | 1018 | if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) |
1020 | goto set_vlan_promisc; | 1019 | goto set_vlan_promisc; |
1020 | dev_err(&adapter->pdev->dev, | ||
1021 | "Setting HW VLAN filtering failed.\n"); | ||
1022 | } else { | ||
1023 | if (adapter->flags & BE_FLAGS_VLAN_PROMISC) { | ||
1024 | /* hw VLAN filtering re-enabled. */ | ||
1025 | status = be_cmd_rx_filter(adapter, | ||
1026 | BE_FLAGS_VLAN_PROMISC, OFF); | ||
1027 | if (!status) { | ||
1028 | dev_info(&adapter->pdev->dev, | ||
1029 | "Disabling VLAN Promiscuous mode.\n"); | ||
1030 | adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; | ||
1031 | dev_info(&adapter->pdev->dev, | ||
1032 | "Re-Enabling HW VLAN filtering\n"); | ||
1033 | } | ||
1034 | } | ||
1021 | } | 1035 | } |
1022 | 1036 | ||
1023 | return status; | 1037 | return status; |
1024 | 1038 | ||
1025 | set_vlan_promisc: | 1039 | set_vlan_promisc: |
1026 | status = be_cmd_vlan_config(adapter, adapter->if_handle, | 1040 | dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); |
1027 | NULL, 0, 1, 1); | 1041 | |
1042 | status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON); | ||
1043 | if (!status) { | ||
1044 | dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n"); | ||
1045 | dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n"); | ||
1046 | adapter->flags |= BE_FLAGS_VLAN_PROMISC; | ||
1047 | } else | ||
1048 | dev_err(&adapter->pdev->dev, | ||
1049 | "Failed to enable VLAN Promiscuous mode.\n"); | ||
1028 | return status; | 1050 | return status; |
1029 | } | 1051 | } |
1030 | 1052 | ||
@@ -1033,10 +1055,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
1033 | struct be_adapter *adapter = netdev_priv(netdev); | 1055 | struct be_adapter *adapter = netdev_priv(netdev); |
1034 | int status = 0; | 1056 | int status = 0; |
1035 | 1057 | ||
1036 | if (!lancer_chip(adapter) && !be_physfn(adapter)) { | ||
1037 | status = -EINVAL; | ||
1038 | goto ret; | ||
1039 | } | ||
1040 | 1058 | ||
1041 | /* Packets with VID 0 are always received by Lancer by default */ | 1059 | /* Packets with VID 0 are always received by Lancer by default */ |
1042 | if (lancer_chip(adapter) && vid == 0) | 1060 | if (lancer_chip(adapter) && vid == 0) |
@@ -1059,11 +1077,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
1059 | struct be_adapter *adapter = netdev_priv(netdev); | 1077 | struct be_adapter *adapter = netdev_priv(netdev); |
1060 | int status = 0; | 1078 | int status = 0; |
1061 | 1079 | ||
1062 | if (!lancer_chip(adapter) && !be_physfn(adapter)) { | ||
1063 | status = -EINVAL; | ||
1064 | goto ret; | ||
1065 | } | ||
1066 | |||
1067 | /* Packets with VID 0 are always received by Lancer by default */ | 1080 | /* Packets with VID 0 are always received by Lancer by default */ |
1068 | if (lancer_chip(adapter) && vid == 0) | 1081 | if (lancer_chip(adapter) && vid == 0) |
1069 | goto ret; | 1082 | goto ret; |
@@ -1188,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf, | |||
1188 | 1201 | ||
1189 | vi->vf = vf; | 1202 | vi->vf = vf; |
1190 | vi->tx_rate = vf_cfg->tx_rate; | 1203 | vi->tx_rate = vf_cfg->tx_rate; |
1191 | vi->vlan = vf_cfg->vlan_tag; | 1204 | vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; |
1192 | vi->qos = 0; | 1205 | vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; |
1193 | memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); | 1206 | memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); |
1194 | 1207 | ||
1195 | return 0; | 1208 | return 0; |
@@ -1199,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev, | |||
1199 | int vf, u16 vlan, u8 qos) | 1212 | int vf, u16 vlan, u8 qos) |
1200 | { | 1213 | { |
1201 | struct be_adapter *adapter = netdev_priv(netdev); | 1214 | struct be_adapter *adapter = netdev_priv(netdev); |
1215 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | ||
1202 | int status = 0; | 1216 | int status = 0; |
1203 | 1217 | ||
1204 | if (!sriov_enabled(adapter)) | 1218 | if (!sriov_enabled(adapter)) |
1205 | return -EPERM; | 1219 | return -EPERM; |
1206 | 1220 | ||
1207 | if (vf >= adapter->num_vfs || vlan > 4095) | 1221 | if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7) |
1208 | return -EINVAL; | 1222 | return -EINVAL; |
1209 | 1223 | ||
1210 | if (vlan) { | 1224 | if (vlan || qos) { |
1211 | if (adapter->vf_cfg[vf].vlan_tag != vlan) { | 1225 | vlan |= qos << VLAN_PRIO_SHIFT; |
1226 | if (vf_cfg->vlan_tag != vlan) { | ||
1212 | /* If this is new value, program it. Else skip. */ | 1227 | /* If this is new value, program it. Else skip. */ |
1213 | adapter->vf_cfg[vf].vlan_tag = vlan; | 1228 | vf_cfg->vlan_tag = vlan; |
1214 | 1229 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | |
1215 | status = be_cmd_set_hsw_config(adapter, vlan, | 1230 | vf_cfg->if_handle, 0); |
1216 | vf + 1, adapter->vf_cfg[vf].if_handle, 0); | ||
1217 | } | 1231 | } |
1218 | } else { | 1232 | } else { |
1219 | /* Reset Transparent Vlan Tagging. */ | 1233 | /* Reset Transparent Vlan Tagging. */ |
1220 | adapter->vf_cfg[vf].vlan_tag = 0; | 1234 | vf_cfg->vlan_tag = 0; |
1221 | vlan = adapter->vf_cfg[vf].def_vid; | 1235 | vlan = vf_cfg->def_vid; |
1222 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | 1236 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, |
1223 | adapter->vf_cfg[vf].if_handle, 0); | 1237 | vf_cfg->if_handle, 0); |
1224 | } | 1238 | } |
1225 | 1239 | ||
1226 | 1240 | ||
@@ -2963,6 +2977,8 @@ static void BEx_get_resources(struct be_adapter *adapter, | |||
2963 | 2977 | ||
2964 | if (adapter->function_mode & FLEX10_MODE) | 2978 | if (adapter->function_mode & FLEX10_MODE) |
2965 | res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; | 2979 | res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; |
2980 | else if (adapter->function_mode & UMC_ENABLED) | ||
2981 | res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED; | ||
2966 | else | 2982 | else |
2967 | res->max_vlans = BE_NUM_VLANS_SUPPORTED; | 2983 | res->max_vlans = BE_NUM_VLANS_SUPPORTED; |
2968 | res->max_mcast_mac = BE_MAX_MC; | 2984 | res->max_mcast_mac = BE_MAX_MC; |
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 098f133908ae..e006a09ba899 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c | |||
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev) | |||
452 | err = -ENODEV; | 452 | err = -ENODEV; |
453 | 453 | ||
454 | etsects->caps = ptp_gianfar_caps; | 454 | etsects->caps = ptp_gianfar_caps; |
455 | etsects->cksel = DEFAULT_CKSEL; | 455 | |
456 | if (get_of_u32(node, "fsl,cksel", &etsects->cksel)) | ||
457 | etsects->cksel = DEFAULT_CKSEL; | ||
456 | 458 | ||
457 | if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || | 459 | if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || |
458 | get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || | 460 | get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 0c524fa9f811..cfef7fc32cdd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c | |||
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, | |||
701 | 701 | ||
702 | details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); | 702 | details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); |
703 | if (cmd_details) { | 703 | if (cmd_details) { |
704 | memcpy(details, cmd_details, | 704 | *details = *cmd_details; |
705 | sizeof(struct i40e_asq_cmd_details)); | ||
706 | 705 | ||
707 | /* If the cmd_details are defined copy the cookie. The | 706 | /* If the cmd_details are defined copy the cookie. The |
708 | * cpu_to_le32 is not needed here because the data is ignored | 707 | * cpu_to_le32 is not needed here because the data is ignored |
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, | |||
760 | desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); | 759 | desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); |
761 | 760 | ||
762 | /* if the desc is available copy the temp desc to the right place */ | 761 | /* if the desc is available copy the temp desc to the right place */ |
763 | memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc)); | 762 | *desc_on_ring = *desc; |
764 | 763 | ||
765 | /* if buff is not NULL assume indirect command */ | 764 | /* if buff is not NULL assume indirect command */ |
766 | if (buff != NULL) { | 765 | if (buff != NULL) { |
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, | |||
807 | 806 | ||
808 | /* if ready, copy the desc back to temp */ | 807 | /* if ready, copy the desc back to temp */ |
809 | if (i40e_asq_done(hw)) { | 808 | if (i40e_asq_done(hw)) { |
810 | memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc)); | 809 | *desc = *desc_on_ring; |
811 | if (buff != NULL) | 810 | if (buff != NULL) |
812 | memcpy(buff, dma_buff->va, buff_size); | 811 | memcpy(buff, dma_buff->va, buff_size); |
813 | retval = le16_to_cpu(desc->retval); | 812 | retval = le16_to_cpu(desc->retval); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index c21df7bc3b1d..1e4ea134975a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c | |||
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, | |||
507 | 507 | ||
508 | /* save link status information */ | 508 | /* save link status information */ |
509 | if (link) | 509 | if (link) |
510 | memcpy(link, hw_link_info, sizeof(struct i40e_link_status)); | 510 | *link = *hw_link_info; |
511 | 511 | ||
512 | /* flag cleared so helper functions don't call AQ again */ | 512 | /* flag cleared so helper functions don't call AQ again */ |
513 | hw->phy.get_link_info = false; | 513 | hw->phy.get_link_info = false; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 601d482694ea..221aa4795017 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, | |||
101 | mem->size = ALIGN(size, alignment); | 101 | mem->size = ALIGN(size, alignment); |
102 | mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, | 102 | mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, |
103 | &mem->pa, GFP_KERNEL); | 103 | &mem->pa, GFP_KERNEL); |
104 | if (mem->va) | 104 | if (!mem->va) |
105 | return 0; | 105 | return -ENOMEM; |
106 | 106 | ||
107 | return -ENOMEM; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, | |||
136 | mem->size = size; | 136 | mem->size = size; |
137 | mem->va = kzalloc(size, GFP_KERNEL); | 137 | mem->va = kzalloc(size, GFP_KERNEL); |
138 | 138 | ||
139 | if (mem->va) | 139 | if (!mem->va) |
140 | return 0; | 140 | return -ENOMEM; |
141 | 141 | ||
142 | return -ENOMEM; | 142 | return 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | /** | 145 | /** |
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, | |||
174 | u16 needed, u16 id) | 174 | u16 needed, u16 id) |
175 | { | 175 | { |
176 | int ret = -ENOMEM; | 176 | int ret = -ENOMEM; |
177 | int i = 0; | 177 | int i, j; |
178 | int j = 0; | ||
179 | 178 | ||
180 | if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { | 179 | if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { |
181 | dev_info(&pf->pdev->dev, | 180 | dev_info(&pf->pdev->dev, |
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, | |||
186 | 185 | ||
187 | /* start the linear search with an imperfect hint */ | 186 | /* start the linear search with an imperfect hint */ |
188 | i = pile->search_hint; | 187 | i = pile->search_hint; |
189 | while (i < pile->num_entries && ret < 0) { | 188 | while (i < pile->num_entries) { |
190 | /* skip already allocated entries */ | 189 | /* skip already allocated entries */ |
191 | if (pile->list[i] & I40E_PILE_VALID_BIT) { | 190 | if (pile->list[i] & I40E_PILE_VALID_BIT) { |
192 | i++; | 191 | i++; |
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, | |||
205 | pile->list[i+j] = id | I40E_PILE_VALID_BIT; | 204 | pile->list[i+j] = id | I40E_PILE_VALID_BIT; |
206 | ret = i; | 205 | ret = i; |
207 | pile->search_hint = i + j; | 206 | pile->search_hint = i + j; |
207 | break; | ||
208 | } else { | 208 | } else { |
209 | /* not enough, so skip over it and continue looking */ | 209 | /* not enough, so skip over it and continue looking */ |
210 | i += j; | 210 | i += j; |
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |||
1388 | bool add_happened = false; | 1388 | bool add_happened = false; |
1389 | int filter_list_len = 0; | 1389 | int filter_list_len = 0; |
1390 | u32 changed_flags = 0; | 1390 | u32 changed_flags = 0; |
1391 | i40e_status ret = 0; | 1391 | i40e_status aq_ret = 0; |
1392 | struct i40e_pf *pf; | 1392 | struct i40e_pf *pf; |
1393 | int num_add = 0; | 1393 | int num_add = 0; |
1394 | int num_del = 0; | 1394 | int num_del = 0; |
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |||
1449 | 1449 | ||
1450 | /* flush a full buffer */ | 1450 | /* flush a full buffer */ |
1451 | if (num_del == filter_list_len) { | 1451 | if (num_del == filter_list_len) { |
1452 | ret = i40e_aq_remove_macvlan(&pf->hw, | 1452 | aq_ret = i40e_aq_remove_macvlan(&pf->hw, |
1453 | vsi->seid, del_list, num_del, | 1453 | vsi->seid, del_list, num_del, |
1454 | NULL); | 1454 | NULL); |
1455 | num_del = 0; | 1455 | num_del = 0; |
1456 | memset(del_list, 0, sizeof(*del_list)); | 1456 | memset(del_list, 0, sizeof(*del_list)); |
1457 | 1457 | ||
1458 | if (ret) | 1458 | if (aq_ret) |
1459 | dev_info(&pf->pdev->dev, | 1459 | dev_info(&pf->pdev->dev, |
1460 | "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", | 1460 | "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", |
1461 | ret, | 1461 | aq_ret, |
1462 | pf->hw.aq.asq_last_status); | 1462 | pf->hw.aq.asq_last_status); |
1463 | } | 1463 | } |
1464 | } | 1464 | } |
1465 | if (num_del) { | 1465 | if (num_del) { |
1466 | ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, | 1466 | aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, |
1467 | del_list, num_del, NULL); | 1467 | del_list, num_del, NULL); |
1468 | num_del = 0; | 1468 | num_del = 0; |
1469 | 1469 | ||
1470 | if (ret) | 1470 | if (aq_ret) |
1471 | dev_info(&pf->pdev->dev, | 1471 | dev_info(&pf->pdev->dev, |
1472 | "ignoring delete macvlan error, err %d, aq_err %d\n", | 1472 | "ignoring delete macvlan error, err %d, aq_err %d\n", |
1473 | ret, pf->hw.aq.asq_last_status); | 1473 | aq_ret, pf->hw.aq.asq_last_status); |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | kfree(del_list); | 1476 | kfree(del_list); |
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |||
1515 | 1515 | ||
1516 | /* flush a full buffer */ | 1516 | /* flush a full buffer */ |
1517 | if (num_add == filter_list_len) { | 1517 | if (num_add == filter_list_len) { |
1518 | ret = i40e_aq_add_macvlan(&pf->hw, | 1518 | aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, |
1519 | vsi->seid, | 1519 | add_list, num_add, |
1520 | add_list, | 1520 | NULL); |
1521 | num_add, | ||
1522 | NULL); | ||
1523 | num_add = 0; | 1521 | num_add = 0; |
1524 | 1522 | ||
1525 | if (ret) | 1523 | if (aq_ret) |
1526 | break; | 1524 | break; |
1527 | memset(add_list, 0, sizeof(*add_list)); | 1525 | memset(add_list, 0, sizeof(*add_list)); |
1528 | } | 1526 | } |
1529 | } | 1527 | } |
1530 | if (num_add) { | 1528 | if (num_add) { |
1531 | ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, | 1529 | aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, |
1532 | add_list, num_add, NULL); | 1530 | add_list, num_add, NULL); |
1533 | num_add = 0; | 1531 | num_add = 0; |
1534 | } | 1532 | } |
1535 | kfree(add_list); | 1533 | kfree(add_list); |
1536 | add_list = NULL; | 1534 | add_list = NULL; |
1537 | 1535 | ||
1538 | if (add_happened && (!ret)) { | 1536 | if (add_happened && (!aq_ret)) { |
1539 | /* do nothing */; | 1537 | /* do nothing */; |
1540 | } else if (add_happened && (ret)) { | 1538 | } else if (add_happened && (aq_ret)) { |
1541 | dev_info(&pf->pdev->dev, | 1539 | dev_info(&pf->pdev->dev, |
1542 | "add filter failed, err %d, aq_err %d\n", | 1540 | "add filter failed, err %d, aq_err %d\n", |
1543 | ret, pf->hw.aq.asq_last_status); | 1541 | aq_ret, pf->hw.aq.asq_last_status); |
1544 | if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && | 1542 | if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && |
1545 | !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, | 1543 | !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, |
1546 | &vsi->state)) { | 1544 | &vsi->state)) { |
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |||
1556 | if (changed_flags & IFF_ALLMULTI) { | 1554 | if (changed_flags & IFF_ALLMULTI) { |
1557 | bool cur_multipromisc; | 1555 | bool cur_multipromisc; |
1558 | cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); | 1556 | cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); |
1559 | ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, | 1557 | aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, |
1560 | vsi->seid, | 1558 | vsi->seid, |
1561 | cur_multipromisc, | 1559 | cur_multipromisc, |
1562 | NULL); | 1560 | NULL); |
1563 | if (ret) | 1561 | if (aq_ret) |
1564 | dev_info(&pf->pdev->dev, | 1562 | dev_info(&pf->pdev->dev, |
1565 | "set multi promisc failed, err %d, aq_err %d\n", | 1563 | "set multi promisc failed, err %d, aq_err %d\n", |
1566 | ret, pf->hw.aq.asq_last_status); | 1564 | aq_ret, pf->hw.aq.asq_last_status); |
1567 | } | 1565 | } |
1568 | if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { | 1566 | if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { |
1569 | bool cur_promisc; | 1567 | bool cur_promisc; |
1570 | cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || | 1568 | cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || |
1571 | test_bit(__I40E_FILTER_OVERFLOW_PROMISC, | 1569 | test_bit(__I40E_FILTER_OVERFLOW_PROMISC, |
1572 | &vsi->state)); | 1570 | &vsi->state)); |
1573 | ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, | 1571 | aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, |
1574 | vsi->seid, | 1572 | vsi->seid, |
1575 | cur_promisc, | 1573 | cur_promisc, NULL); |
1576 | NULL); | 1574 | if (aq_ret) |
1577 | if (ret) | ||
1578 | dev_info(&pf->pdev->dev, | 1575 | dev_info(&pf->pdev->dev, |
1579 | "set uni promisc failed, err %d, aq_err %d\n", | 1576 | "set uni promisc failed, err %d, aq_err %d\n", |
1580 | ret, pf->hw.aq.asq_last_status); | 1577 | aq_ret, pf->hw.aq.asq_last_status); |
1581 | } | 1578 | } |
1582 | 1579 | ||
1583 | clear_bit(__I40E_CONFIG_BUSY, &vsi->state); | 1580 | clear_bit(__I40E_CONFIG_BUSY, &vsi->state); |
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) | |||
1790 | * i40e_vsi_kill_vlan - Remove vsi membership for given vlan | 1787 | * i40e_vsi_kill_vlan - Remove vsi membership for given vlan |
1791 | * @vsi: the vsi being configured | 1788 | * @vsi: the vsi being configured |
1792 | * @vid: vlan id to be removed (0 = untagged only , -1 = any) | 1789 | * @vid: vlan id to be removed (0 = untagged only , -1 = any) |
1790 | * | ||
1791 | * Return: 0 on success or negative otherwise | ||
1793 | **/ | 1792 | **/ |
1794 | int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) | 1793 | int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) |
1795 | { | 1794 | { |
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) | |||
1863 | * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload | 1862 | * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload |
1864 | * @netdev: network interface to be adjusted | 1863 | * @netdev: network interface to be adjusted |
1865 | * @vid: vlan id to be added | 1864 | * @vid: vlan id to be added |
1865 | * | ||
1866 | * net_device_ops implementation for adding vlan ids | ||
1866 | **/ | 1867 | **/ |
1867 | static int i40e_vlan_rx_add_vid(struct net_device *netdev, | 1868 | static int i40e_vlan_rx_add_vid(struct net_device *netdev, |
1868 | __always_unused __be16 proto, u16 vid) | 1869 | __always_unused __be16 proto, u16 vid) |
1869 | { | 1870 | { |
1870 | struct i40e_netdev_priv *np = netdev_priv(netdev); | 1871 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
1871 | struct i40e_vsi *vsi = np->vsi; | 1872 | struct i40e_vsi *vsi = np->vsi; |
1872 | int ret; | 1873 | int ret = 0; |
1873 | 1874 | ||
1874 | if (vid > 4095) | 1875 | if (vid > 4095) |
1875 | return 0; | 1876 | return -EINVAL; |
1877 | |||
1878 | netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); | ||
1876 | 1879 | ||
1877 | netdev_info(vsi->netdev, "adding %pM vid=%d\n", | ||
1878 | netdev->dev_addr, vid); | ||
1879 | /* If the network stack called us with vid = 0, we should | 1880 | /* If the network stack called us with vid = 0, we should |
1880 | * indicate to i40e_vsi_add_vlan() that we want to receive | 1881 | * indicate to i40e_vsi_add_vlan() that we want to receive |
1881 | * any traffic (i.e. with any vlan tag, or untagged) | 1882 | * any traffic (i.e. with any vlan tag, or untagged) |
1882 | */ | 1883 | */ |
1883 | ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); | 1884 | ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); |
1884 | 1885 | ||
1885 | if (!ret) { | 1886 | if (!ret && (vid < VLAN_N_VID)) |
1886 | if (vid < VLAN_N_VID) | 1887 | set_bit(vid, vsi->active_vlans); |
1887 | set_bit(vid, vsi->active_vlans); | ||
1888 | } | ||
1889 | 1888 | ||
1890 | return 0; | 1889 | return ret; |
1891 | } | 1890 | } |
1892 | 1891 | ||
1893 | /** | 1892 | /** |
1894 | * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload | 1893 | * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload |
1895 | * @netdev: network interface to be adjusted | 1894 | * @netdev: network interface to be adjusted |
1896 | * @vid: vlan id to be removed | 1895 | * @vid: vlan id to be removed |
1896 | * | ||
1897 | * net_device_ops implementation for adding vlan ids | ||
1897 | **/ | 1898 | **/ |
1898 | static int i40e_vlan_rx_kill_vid(struct net_device *netdev, | 1899 | static int i40e_vlan_rx_kill_vid(struct net_device *netdev, |
1899 | __always_unused __be16 proto, u16 vid) | 1900 | __always_unused __be16 proto, u16 vid) |
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, | |||
1901 | struct i40e_netdev_priv *np = netdev_priv(netdev); | 1902 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
1902 | struct i40e_vsi *vsi = np->vsi; | 1903 | struct i40e_vsi *vsi = np->vsi; |
1903 | 1904 | ||
1904 | netdev_info(vsi->netdev, "removing %pM vid=%d\n", | 1905 | netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); |
1905 | netdev->dev_addr, vid); | 1906 | |
1906 | /* return code is ignored as there is nothing a user | 1907 | /* return code is ignored as there is nothing a user |
1907 | * can do about failure to remove and a log message was | 1908 | * can do about failure to remove and a log message was |
1908 | * already printed from another function | 1909 | * already printed from the other function |
1909 | */ | 1910 | */ |
1910 | i40e_vsi_kill_vlan(vsi, vid); | 1911 | i40e_vsi_kill_vlan(vsi, vid); |
1911 | 1912 | ||
1912 | clear_bit(vid, vsi->active_vlans); | 1913 | clear_bit(vid, vsi->active_vlans); |
1914 | |||
1913 | return 0; | 1915 | return 0; |
1914 | } | 1916 | } |
1915 | 1917 | ||
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) | |||
1936 | * @vsi: the vsi being adjusted | 1938 | * @vsi: the vsi being adjusted |
1937 | * @vid: the vlan id to set as a PVID | 1939 | * @vid: the vlan id to set as a PVID |
1938 | **/ | 1940 | **/ |
1939 | i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) | 1941 | int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) |
1940 | { | 1942 | { |
1941 | struct i40e_vsi_context ctxt; | 1943 | struct i40e_vsi_context ctxt; |
1942 | i40e_status ret; | 1944 | i40e_status aq_ret; |
1943 | 1945 | ||
1944 | vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); | 1946 | vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); |
1945 | vsi->info.pvid = cpu_to_le16(vid); | 1947 | vsi->info.pvid = cpu_to_le16(vid); |
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) | |||
1948 | 1950 | ||
1949 | ctxt.seid = vsi->seid; | 1951 | ctxt.seid = vsi->seid; |
1950 | memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); | 1952 | memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); |
1951 | ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); | 1953 | aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); |
1952 | if (ret) { | 1954 | if (aq_ret) { |
1953 | dev_info(&vsi->back->pdev->dev, | 1955 | dev_info(&vsi->back->pdev->dev, |
1954 | "%s: update vsi failed, aq_err=%d\n", | 1956 | "%s: update vsi failed, aq_err=%d\n", |
1955 | __func__, vsi->back->hw.aq.asq_last_status); | 1957 | __func__, vsi->back->hw.aq.asq_last_status); |
1958 | return -ENOENT; | ||
1956 | } | 1959 | } |
1957 | 1960 | ||
1958 | return ret; | 1961 | return 0; |
1959 | } | 1962 | } |
1960 | 1963 | ||
1961 | /** | 1964 | /** |
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) | |||
3326 | **/ | 3329 | **/ |
3327 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | 3330 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) |
3328 | { | 3331 | { |
3329 | int num_tc = 0, i; | 3332 | u8 num_tc = 0; |
3333 | int i; | ||
3330 | 3334 | ||
3331 | /* Scan the ETS Config Priority Table to find | 3335 | /* Scan the ETS Config Priority Table to find |
3332 | * traffic class enabled for a given priority | 3336 | * traffic class enabled for a given priority |
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | |||
3341 | /* Traffic class index starts from zero so | 3345 | /* Traffic class index starts from zero so |
3342 | * increment to return the actual count | 3346 | * increment to return the actual count |
3343 | */ | 3347 | */ |
3344 | num_tc++; | 3348 | return num_tc + 1; |
3345 | |||
3346 | return num_tc; | ||
3347 | } | 3349 | } |
3348 | 3350 | ||
3349 | /** | 3351 | /** |
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) | |||
3451 | struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; | 3453 | struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; |
3452 | struct i40e_pf *pf = vsi->back; | 3454 | struct i40e_pf *pf = vsi->back; |
3453 | struct i40e_hw *hw = &pf->hw; | 3455 | struct i40e_hw *hw = &pf->hw; |
3456 | i40e_status aq_ret; | ||
3454 | u32 tc_bw_max; | 3457 | u32 tc_bw_max; |
3455 | int ret; | ||
3456 | int i; | 3458 | int i; |
3457 | 3459 | ||
3458 | /* Get the VSI level BW configuration */ | 3460 | /* Get the VSI level BW configuration */ |
3459 | ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); | 3461 | aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); |
3460 | if (ret) { | 3462 | if (aq_ret) { |
3461 | dev_info(&pf->pdev->dev, | 3463 | dev_info(&pf->pdev->dev, |
3462 | "couldn't get pf vsi bw config, err %d, aq_err %d\n", | 3464 | "couldn't get pf vsi bw config, err %d, aq_err %d\n", |
3463 | ret, pf->hw.aq.asq_last_status); | 3465 | aq_ret, pf->hw.aq.asq_last_status); |
3464 | return ret; | 3466 | return -EINVAL; |
3465 | } | 3467 | } |
3466 | 3468 | ||
3467 | /* Get the VSI level BW configuration per TC */ | 3469 | /* Get the VSI level BW configuration per TC */ |
3468 | ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, | 3470 | aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, |
3469 | &bw_ets_config, | 3471 | NULL); |
3470 | NULL); | 3472 | if (aq_ret) { |
3471 | if (ret) { | ||
3472 | dev_info(&pf->pdev->dev, | 3473 | dev_info(&pf->pdev->dev, |
3473 | "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", | 3474 | "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", |
3474 | ret, pf->hw.aq.asq_last_status); | 3475 | aq_ret, pf->hw.aq.asq_last_status); |
3475 | return ret; | 3476 | return -EINVAL; |
3476 | } | 3477 | } |
3477 | 3478 | ||
3478 | if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { | 3479 | if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { |
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) | |||
3494 | /* 3 bits out of 4 for each TC */ | 3495 | /* 3 bits out of 4 for each TC */ |
3495 | vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); | 3496 | vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); |
3496 | } | 3497 | } |
3497 | return ret; | 3498 | |
3499 | return 0; | ||
3498 | } | 3500 | } |
3499 | 3501 | ||
3500 | /** | 3502 | /** |
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) | |||
3505 | * | 3507 | * |
3506 | * Returns 0 on success, negative value on failure | 3508 | * Returns 0 on success, negative value on failure |
3507 | **/ | 3509 | **/ |
3508 | static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, | 3510 | static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, |
3509 | u8 enabled_tc, | ||
3510 | u8 *bw_share) | 3511 | u8 *bw_share) |
3511 | { | 3512 | { |
3512 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; | 3513 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; |
3513 | int i, ret = 0; | 3514 | i40e_status aq_ret; |
3515 | int i; | ||
3514 | 3516 | ||
3515 | bw_data.tc_valid_bits = enabled_tc; | 3517 | bw_data.tc_valid_bits = enabled_tc; |
3516 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | 3518 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) |
3517 | bw_data.tc_bw_credits[i] = bw_share[i]; | 3519 | bw_data.tc_bw_credits[i] = bw_share[i]; |
3518 | 3520 | ||
3519 | ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, | 3521 | aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, |
3520 | &bw_data, NULL); | 3522 | NULL); |
3521 | if (ret) { | 3523 | if (aq_ret) { |
3522 | dev_info(&vsi->back->pdev->dev, | 3524 | dev_info(&vsi->back->pdev->dev, |
3523 | "%s: AQ command Config VSI BW allocation per TC failed = %d\n", | 3525 | "%s: AQ command Config VSI BW allocation per TC failed = %d\n", |
3524 | __func__, vsi->back->hw.aq.asq_last_status); | 3526 | __func__, vsi->back->hw.aq.asq_last_status); |
3525 | return ret; | 3527 | return -EINVAL; |
3526 | } | 3528 | } |
3527 | 3529 | ||
3528 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | 3530 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) |
3529 | vsi->info.qs_handle[i] = bw_data.qs_handles[i]; | 3531 | vsi->info.qs_handle[i] = bw_data.qs_handles[i]; |
3530 | 3532 | ||
3531 | return ret; | 3533 | return 0; |
3532 | } | 3534 | } |
3533 | 3535 | ||
3534 | /** | 3536 | /** |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 48cbc833b051..86d51429a189 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) | |||
1607 | igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); | 1607 | igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); |
1608 | igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); | 1608 | igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); |
1609 | } | 1609 | } |
1610 | } else if (hw->phy.type == e1000_phy_82580) { | ||
1611 | /* enable MII loopback */ | ||
1612 | igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); | ||
1610 | } | 1613 | } |
1611 | 1614 | ||
1612 | /* add small delay to avoid loopback test failure */ | 1615 | /* add small delay to avoid loopback test failure */ |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 1a9c4f6269ea..ecc7f7b696b8 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3086 | PCI_DMA_FROMDEVICE); | 3086 | PCI_DMA_FROMDEVICE); |
3087 | skge_rx_reuse(e, skge->rx_buf_size); | 3087 | skge_rx_reuse(e, skge->rx_buf_size); |
3088 | } else { | 3088 | } else { |
3089 | struct skge_element ee; | ||
3089 | struct sk_buff *nskb; | 3090 | struct sk_buff *nskb; |
3090 | 3091 | ||
3091 | nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); | 3092 | nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); |
3092 | if (!nskb) | 3093 | if (!nskb) |
3093 | goto resubmit; | 3094 | goto resubmit; |
3094 | 3095 | ||
3095 | skb = e->skb; | 3096 | ee = *e; |
3097 | |||
3098 | skb = ee.skb; | ||
3096 | prefetch(skb->data); | 3099 | prefetch(skb->data); |
3097 | 3100 | ||
3098 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { | 3101 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { |
@@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3101 | } | 3104 | } |
3102 | 3105 | ||
3103 | pci_unmap_single(skge->hw->pdev, | 3106 | pci_unmap_single(skge->hw->pdev, |
3104 | dma_unmap_addr(e, mapaddr), | 3107 | dma_unmap_addr(&ee, mapaddr), |
3105 | dma_unmap_len(e, maplen), | 3108 | dma_unmap_len(&ee, maplen), |
3106 | PCI_DMA_FROMDEVICE); | 3109 | PCI_DMA_FROMDEVICE); |
3107 | } | 3110 | } |
3108 | 3111 | ||
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 83c2091c9c23..bd1a2d2bc2ae 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c | |||
@@ -543,7 +543,7 @@ static const struct of_device_id moxart_mac_match[] = { | |||
543 | { } | 543 | { } |
544 | }; | 544 | }; |
545 | 545 | ||
546 | struct __initdata platform_driver moxart_mac_driver = { | 546 | static struct platform_driver moxart_mac_driver = { |
547 | .probe = moxart_mac_probe, | 547 | .probe = moxart_mac_probe, |
548 | .remove = moxart_remove, | 548 | .remove = moxart_remove, |
549 | .driver = { | 549 | .driver = { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 4d7ad0074d1c..ebe4c86e5230 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = { | |||
1794 | .set_msglevel = qlcnic_set_msglevel, | 1794 | .set_msglevel = qlcnic_set_msglevel, |
1795 | .get_msglevel = qlcnic_get_msglevel, | 1795 | .get_msglevel = qlcnic_get_msglevel, |
1796 | }; | 1796 | }; |
1797 | |||
1798 | const struct ethtool_ops qlcnic_ethtool_failed_ops = { | ||
1799 | .get_settings = qlcnic_get_settings, | ||
1800 | .get_drvinfo = qlcnic_get_drvinfo, | ||
1801 | .set_msglevel = qlcnic_set_msglevel, | ||
1802 | .get_msglevel = qlcnic_get_msglevel, | ||
1803 | .set_dump = qlcnic_set_dump, | ||
1804 | }; | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index c4c5023e1fdf..21d00a0449a1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter) | |||
431 | while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) | 431 | while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) |
432 | usleep_range(10000, 11000); | 432 | usleep_range(10000, 11000); |
433 | 433 | ||
434 | if (!adapter->fw_work.work.func) | ||
435 | return; | ||
436 | |||
434 | cancel_delayed_work_sync(&adapter->fw_work); | 437 | cancel_delayed_work_sync(&adapter->fw_work); |
435 | } | 438 | } |
436 | 439 | ||
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2275 | adapter->portnum = adapter->ahw->pci_func; | 2278 | adapter->portnum = adapter->ahw->pci_func; |
2276 | err = qlcnic_start_firmware(adapter); | 2279 | err = qlcnic_start_firmware(adapter); |
2277 | if (err) { | 2280 | if (err) { |
2278 | dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); | 2281 | dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" |
2279 | goto err_out_free_hw; | 2282 | "\t\tIf reboot doesn't help, try flashing the card\n"); |
2283 | goto err_out_maintenance_mode; | ||
2280 | } | 2284 | } |
2281 | 2285 | ||
2282 | qlcnic_get_multiq_capability(adapter); | 2286 | qlcnic_get_multiq_capability(adapter); |
@@ -2408,6 +2412,22 @@ err_out_disable_pdev: | |||
2408 | pci_set_drvdata(pdev, NULL); | 2412 | pci_set_drvdata(pdev, NULL); |
2409 | pci_disable_device(pdev); | 2413 | pci_disable_device(pdev); |
2410 | return err; | 2414 | return err; |
2415 | |||
2416 | err_out_maintenance_mode: | ||
2417 | netdev->netdev_ops = &qlcnic_netdev_failed_ops; | ||
2418 | SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops); | ||
2419 | err = register_netdev(netdev); | ||
2420 | |||
2421 | if (err) { | ||
2422 | dev_err(&pdev->dev, "Failed to register net device\n"); | ||
2423 | qlcnic_clr_all_drv_state(adapter, 0); | ||
2424 | goto err_out_free_hw; | ||
2425 | } | ||
2426 | |||
2427 | pci_set_drvdata(pdev, adapter); | ||
2428 | qlcnic_add_sysfs(adapter); | ||
2429 | |||
2430 | return 0; | ||
2411 | } | 2431 | } |
2412 | 2432 | ||
2413 | static void qlcnic_remove(struct pci_dev *pdev) | 2433 | static void qlcnic_remove(struct pci_dev *pdev) |
@@ -2518,8 +2538,16 @@ static int qlcnic_resume(struct pci_dev *pdev) | |||
2518 | static int qlcnic_open(struct net_device *netdev) | 2538 | static int qlcnic_open(struct net_device *netdev) |
2519 | { | 2539 | { |
2520 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 2540 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
2541 | u32 state; | ||
2521 | int err; | 2542 | int err; |
2522 | 2543 | ||
2544 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); | ||
2545 | if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) { | ||
2546 | netdev_err(netdev, "%s: Device is in FAILED state\n", __func__); | ||
2547 | |||
2548 | return -EIO; | ||
2549 | } | ||
2550 | |||
2523 | netif_carrier_off(netdev); | 2551 | netif_carrier_off(netdev); |
2524 | 2552 | ||
2525 | err = qlcnic_attach(adapter); | 2553 | err = qlcnic_attach(adapter); |
@@ -3228,6 +3256,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) | |||
3228 | return; | 3256 | return; |
3229 | 3257 | ||
3230 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); | 3258 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); |
3259 | if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) { | ||
3260 | netdev_err(adapter->netdev, "%s: Device is in FAILED state\n", | ||
3261 | __func__); | ||
3262 | qlcnic_api_unlock(adapter); | ||
3263 | |||
3264 | return; | ||
3265 | } | ||
3231 | 3266 | ||
3232 | if (state == QLCNIC_DEV_READY) { | 3267 | if (state == QLCNIC_DEV_READY) { |
3233 | QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, | 3268 | QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 330d9a8774ad..686f460b1502 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) | |||
397 | { | 397 | { |
398 | struct net_device *netdev = adapter->netdev; | 398 | struct net_device *netdev = adapter->netdev; |
399 | 399 | ||
400 | rtnl_lock(); | ||
400 | if (netif_running(netdev)) | 401 | if (netif_running(netdev)) |
401 | __qlcnic_down(adapter, netdev); | 402 | __qlcnic_down(adapter, netdev); |
402 | 403 | ||
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) | |||
407 | /* After disabling SRIOV re-init the driver in default mode | 408 | /* After disabling SRIOV re-init the driver in default mode |
408 | configure opmode based on op_mode of function | 409 | configure opmode based on op_mode of function |
409 | */ | 410 | */ |
410 | if (qlcnic_83xx_configure_opmode(adapter)) | 411 | if (qlcnic_83xx_configure_opmode(adapter)) { |
412 | rtnl_unlock(); | ||
411 | return -EIO; | 413 | return -EIO; |
414 | } | ||
412 | 415 | ||
413 | if (netif_running(netdev)) | 416 | if (netif_running(netdev)) |
414 | __qlcnic_up(adapter, netdev); | 417 | __qlcnic_up(adapter, netdev); |
415 | 418 | ||
419 | rtnl_unlock(); | ||
416 | return 0; | 420 | return 0; |
417 | } | 421 | } |
418 | 422 | ||
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) | |||
533 | return -EIO; | 537 | return -EIO; |
534 | } | 538 | } |
535 | 539 | ||
540 | rtnl_lock(); | ||
536 | if (netif_running(netdev)) | 541 | if (netif_running(netdev)) |
537 | __qlcnic_down(adapter, netdev); | 542 | __qlcnic_down(adapter, netdev); |
538 | 543 | ||
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) | |||
555 | __qlcnic_up(adapter, netdev); | 560 | __qlcnic_up(adapter, netdev); |
556 | 561 | ||
557 | error: | 562 | error: |
563 | rtnl_unlock(); | ||
558 | return err; | 564 | return err; |
559 | } | 565 | } |
560 | 566 | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index c6165d05cc13..019f4377307f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) | |||
1272 | void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) | 1272 | void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) |
1273 | { | 1273 | { |
1274 | struct device *dev = &adapter->pdev->dev; | 1274 | struct device *dev = &adapter->pdev->dev; |
1275 | u32 state; | ||
1275 | 1276 | ||
1276 | if (device_create_bin_file(dev, &bin_attr_port_stats)) | 1277 | if (device_create_bin_file(dev, &bin_attr_port_stats)) |
1277 | dev_info(dev, "failed to create port stats sysfs entry"); | 1278 | dev_info(dev, "failed to create port stats sysfs entry"); |
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) | |||
1285 | if (device_create_bin_file(dev, &bin_attr_mem)) | 1286 | if (device_create_bin_file(dev, &bin_attr_mem)) |
1286 | dev_info(dev, "failed to create mem sysfs entry\n"); | 1287 | dev_info(dev, "failed to create mem sysfs entry\n"); |
1287 | 1288 | ||
1289 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); | ||
1290 | if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) | ||
1291 | return; | ||
1292 | |||
1288 | if (device_create_bin_file(dev, &bin_attr_pci_config)) | 1293 | if (device_create_bin_file(dev, &bin_attr_pci_config)) |
1289 | dev_info(dev, "failed to create pci config sysfs entry"); | 1294 | dev_info(dev, "failed to create pci config sysfs entry"); |
1295 | |||
1290 | if (device_create_file(dev, &dev_attr_beacon)) | 1296 | if (device_create_file(dev, &dev_attr_beacon)) |
1291 | dev_info(dev, "failed to create beacon sysfs entry"); | 1297 | dev_info(dev, "failed to create beacon sysfs entry"); |
1292 | 1298 | ||
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) | |||
1307 | void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) | 1313 | void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) |
1308 | { | 1314 | { |
1309 | struct device *dev = &adapter->pdev->dev; | 1315 | struct device *dev = &adapter->pdev->dev; |
1316 | u32 state; | ||
1310 | 1317 | ||
1311 | device_remove_bin_file(dev, &bin_attr_port_stats); | 1318 | device_remove_bin_file(dev, &bin_attr_port_stats); |
1312 | 1319 | ||
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) | |||
1315 | device_remove_file(dev, &dev_attr_diag_mode); | 1322 | device_remove_file(dev, &dev_attr_diag_mode); |
1316 | device_remove_bin_file(dev, &bin_attr_crb); | 1323 | device_remove_bin_file(dev, &bin_attr_crb); |
1317 | device_remove_bin_file(dev, &bin_attr_mem); | 1324 | device_remove_bin_file(dev, &bin_attr_mem); |
1325 | |||
1326 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); | ||
1327 | if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) | ||
1328 | return; | ||
1329 | |||
1318 | device_remove_bin_file(dev, &bin_attr_pci_config); | 1330 | device_remove_bin_file(dev, &bin_attr_pci_config); |
1319 | device_remove_file(dev, &dev_attr_beacon); | 1331 | device_remove_file(dev, &dev_attr_beacon); |
1320 | if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) | 1332 | if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index 10093f0c4c0f..6bc5db703920 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c | |||
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
740 | int i; | 740 | int i; |
741 | 741 | ||
742 | if (!mpi_coredump) { | 742 | if (!mpi_coredump) { |
743 | netif_err(qdev, drv, qdev->ndev, "No memory available\n"); | 743 | netif_err(qdev, drv, qdev->ndev, "No memory allocated\n"); |
744 | return -ENOMEM; | 744 | return -EINVAL; |
745 | } | 745 | } |
746 | 746 | ||
747 | /* Try to get the spinlock, but dont worry if | 747 | /* Try to get the spinlock, but dont worry if |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c index ff2bf8a4e247..7ad146080c36 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c | |||
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work) | |||
1274 | return; | 1274 | return; |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | if (!ql_core_dump(qdev, qdev->mpi_coredump)) { | 1277 | if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) { |
1278 | netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); | 1278 | netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); |
1279 | qdev->core_is_dumped = 1; | 1279 | qdev->core_is_dumped = 1; |
1280 | queue_delayed_work(qdev->workqueue, | 1280 | queue_delayed_work(qdev->workqueue, |
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 128d7cdf9eb2..c082562dbf4e 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c | |||
@@ -27,10 +27,10 @@ | |||
27 | 27 | ||
28 | /* A reboot/assertion causes the MCDI status word to be set after the | 28 | /* A reboot/assertion causes the MCDI status word to be set after the |
29 | * command word is set or a REBOOT event is sent. If we notice a reboot | 29 | * command word is set or a REBOOT event is sent. If we notice a reboot |
30 | * via these mechanisms then wait 20ms for the status word to be set. | 30 | * via these mechanisms then wait 250ms for the status word to be set. |
31 | */ | 31 | */ |
32 | #define MCDI_STATUS_DELAY_US 100 | 32 | #define MCDI_STATUS_DELAY_US 100 |
33 | #define MCDI_STATUS_DELAY_COUNT 200 | 33 | #define MCDI_STATUS_DELAY_COUNT 2500 |
34 | #define MCDI_STATUS_SLEEP_MS \ | 34 | #define MCDI_STATUS_SLEEP_MS \ |
35 | (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) | 35 | (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) |
36 | 36 | ||
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | |||
800 | } else { | 800 | } else { |
801 | int count; | 801 | int count; |
802 | 802 | ||
803 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | ||
804 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
805 | |||
806 | /* Consume the status word since efx_mcdi_rpc_finish() won't */ | 803 | /* Consume the status word since efx_mcdi_rpc_finish() won't */ |
807 | for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { | 804 | for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { |
808 | if (efx_mcdi_poll_reboot(efx)) | 805 | if (efx_mcdi_poll_reboot(efx)) |
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | |||
810 | udelay(MCDI_STATUS_DELAY_US); | 807 | udelay(MCDI_STATUS_DELAY_US); |
811 | } | 808 | } |
812 | mcdi->new_epoch = true; | 809 | mcdi->new_epoch = true; |
810 | |||
811 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | ||
812 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
813 | } | 813 | } |
814 | 814 | ||
815 | spin_unlock(&mcdi->iface_lock); | 815 | spin_unlock(&mcdi->iface_lock); |
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index c8f088ab5fdf..bdf697b184ae 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
33 | 33 | ||
34 | #define DRV_NAME "via-rhine" | 34 | #define DRV_NAME "via-rhine" |
35 | #define DRV_VERSION "1.5.0" | 35 | #define DRV_VERSION "1.5.1" |
36 | #define DRV_RELDATE "2010-10-09" | 36 | #define DRV_RELDATE "2010-10-09" |
37 | 37 | ||
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | |||
1704 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); | 1704 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); |
1705 | 1705 | ||
1706 | if (unlikely(vlan_tx_tag_present(skb))) { | 1706 | if (unlikely(vlan_tx_tag_present(skb))) { |
1707 | rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); | 1707 | u16 vid_pcp = vlan_tx_tag_get(skb); |
1708 | |||
1709 | /* drop CFI/DEI bit, register needs VID and PCP */ | ||
1710 | vid_pcp = (vid_pcp & VLAN_VID_MASK) | | ||
1711 | ((vid_pcp & VLAN_PRIO_MASK) >> 1); | ||
1712 | rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); | ||
1708 | /* request tagging */ | 1713 | /* request tagging */ |
1709 | rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); | 1714 | rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); |
1710 | } | 1715 | } |
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index b88121f240ca..0029148077a9 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev) | |||
297 | lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); | 297 | lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); |
298 | lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); | 298 | lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); |
299 | 299 | ||
300 | /* Init descriptor indexes */ | ||
301 | lp->tx_bd_ci = 0; | ||
302 | lp->tx_bd_next = 0; | ||
303 | lp->tx_bd_tail = 0; | ||
304 | lp->rx_bd_ci = 0; | ||
305 | |||
300 | return 0; | 306 | return 0; |
301 | 307 | ||
302 | out: | 308 | out: |
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index a34d6bf5e43b..cc70ecfc7062 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c | |||
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty) | |||
429 | if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) | 429 | if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) |
430 | return; | 430 | return; |
431 | 431 | ||
432 | spin_lock(&sl->lock); | ||
432 | if (sl->xleft <= 0) { | 433 | if (sl->xleft <= 0) { |
433 | /* Now serial buffer is almost free & we can start | 434 | /* Now serial buffer is almost free & we can start |
434 | * transmission of another packet */ | 435 | * transmission of another packet */ |
435 | sl->dev->stats.tx_packets++; | 436 | sl->dev->stats.tx_packets++; |
436 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 437 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
438 | spin_unlock(&sl->lock); | ||
437 | sl_unlock(sl); | 439 | sl_unlock(sl); |
438 | return; | 440 | return; |
439 | } | 441 | } |
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty) | |||
441 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); | 443 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); |
442 | sl->xleft -= actual; | 444 | sl->xleft -= actual; |
443 | sl->xhead += actual; | 445 | sl->xhead += actual; |
446 | spin_unlock(&sl->lock); | ||
444 | } | 447 | } |
445 | 448 | ||
446 | static void sl_tx_timeout(struct net_device *dev) | 449 | static void sl_tx_timeout(struct net_device *dev) |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 2dbb9460349d..c6867f926cff 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net) | |||
303 | rx_ctl |= 0x02; | 303 | rx_ctl |= 0x02; |
304 | } else if (net->flags & IFF_ALLMULTI || | 304 | } else if (net->flags & IFF_ALLMULTI || |
305 | netdev_mc_count(net) > DM_MAX_MCAST) { | 305 | netdev_mc_count(net) > DM_MAX_MCAST) { |
306 | rx_ctl |= 0x04; | 306 | rx_ctl |= 0x08; |
307 | } else if (!netdev_mc_empty(net)) { | 307 | } else if (!netdev_mc_empty(net)) { |
308 | struct netdev_hw_addr *ha; | 308 | struct netdev_hw_addr *ha; |
309 | 309 | ||
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6312332afeba..3d6aaf79d8b2 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -714,7 +714,7 @@ static const struct usb_device_id products[] = { | |||
714 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 714 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
715 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ | 715 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ |
716 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ | 716 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
717 | {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ | 717 | {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ |
718 | 718 | ||
719 | /* 4. Gobi 1000 devices */ | 719 | /* 4. Gobi 1000 devices */ |
720 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 720 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 7b331e613e02..bf94e10a37c8 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) | |||
1241 | if (num_sgs == 1) | 1241 | if (num_sgs == 1) |
1242 | return 0; | 1242 | return 0; |
1243 | 1243 | ||
1244 | urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC); | 1244 | /* reserve one for zero packet */ |
1245 | urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist), | ||
1246 | GFP_ATOMIC); | ||
1245 | if (!urb->sg) | 1247 | if (!urb->sg) |
1246 | return -ENOMEM; | 1248 | return -ENOMEM; |
1247 | 1249 | ||
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1305 | if (build_dma_sg(skb, urb) < 0) | 1307 | if (build_dma_sg(skb, urb) < 0) |
1306 | goto drop; | 1308 | goto drop; |
1307 | } | 1309 | } |
1308 | entry->length = length = urb->transfer_buffer_length; | 1310 | length = urb->transfer_buffer_length; |
1309 | 1311 | ||
1310 | /* don't assume the hardware handles USB_ZERO_PACKET | 1312 | /* don't assume the hardware handles USB_ZERO_PACKET |
1311 | * NOTE: strictly conforming cdc-ether devices should expect | 1313 | * NOTE: strictly conforming cdc-ether devices should expect |
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1317 | if (length % dev->maxpacket == 0) { | 1319 | if (length % dev->maxpacket == 0) { |
1318 | if (!(info->flags & FLAG_SEND_ZLP)) { | 1320 | if (!(info->flags & FLAG_SEND_ZLP)) { |
1319 | if (!(info->flags & FLAG_MULTI_PACKET)) { | 1321 | if (!(info->flags & FLAG_MULTI_PACKET)) { |
1320 | urb->transfer_buffer_length++; | 1322 | length++; |
1321 | if (skb_tailroom(skb)) { | 1323 | if (skb_tailroom(skb) && !urb->num_sgs) { |
1322 | skb->data[skb->len] = 0; | 1324 | skb->data[skb->len] = 0; |
1323 | __skb_put(skb, 1); | 1325 | __skb_put(skb, 1); |
1324 | } | 1326 | } else if (urb->num_sgs) |
1327 | sg_set_buf(&urb->sg[urb->num_sgs++], | ||
1328 | dev->padding_pkt, 1); | ||
1325 | } | 1329 | } |
1326 | } else | 1330 | } else |
1327 | urb->transfer_flags |= URB_ZERO_PACKET; | 1331 | urb->transfer_flags |= URB_ZERO_PACKET; |
1328 | } | 1332 | } |
1333 | entry->length = urb->transfer_buffer_length = length; | ||
1329 | 1334 | ||
1330 | spin_lock_irqsave(&dev->txq.lock, flags); | 1335 | spin_lock_irqsave(&dev->txq.lock, flags); |
1331 | retval = usb_autopm_get_interface_async(dev->intf); | 1336 | retval = usb_autopm_get_interface_async(dev->intf); |
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf) | |||
1509 | 1514 | ||
1510 | usb_kill_urb(dev->interrupt); | 1515 | usb_kill_urb(dev->interrupt); |
1511 | usb_free_urb(dev->interrupt); | 1516 | usb_free_urb(dev->interrupt); |
1517 | kfree(dev->padding_pkt); | ||
1512 | 1518 | ||
1513 | free_netdev(net); | 1519 | free_netdev(net); |
1514 | } | 1520 | } |
@@ -1679,9 +1685,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1679 | /* initialize max rx_qlen and tx_qlen */ | 1685 | /* initialize max rx_qlen and tx_qlen */ |
1680 | usbnet_update_max_qlen(dev); | 1686 | usbnet_update_max_qlen(dev); |
1681 | 1687 | ||
1688 | if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && | ||
1689 | !(info->flags & FLAG_MULTI_PACKET)) { | ||
1690 | dev->padding_pkt = kzalloc(1, GFP_KERNEL); | ||
1691 | if (!dev->padding_pkt) | ||
1692 | goto out4; | ||
1693 | } | ||
1694 | |||
1682 | status = register_netdev (net); | 1695 | status = register_netdev (net); |
1683 | if (status) | 1696 | if (status) |
1684 | goto out4; | 1697 | goto out5; |
1685 | netif_info(dev, probe, dev->net, | 1698 | netif_info(dev, probe, dev->net, |
1686 | "register '%s' at usb-%s-%s, %s, %pM\n", | 1699 | "register '%s' at usb-%s-%s, %s, %pM\n", |
1687 | udev->dev.driver->name, | 1700 | udev->dev.driver->name, |
@@ -1699,6 +1712,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1699 | 1712 | ||
1700 | return 0; | 1713 | return 0; |
1701 | 1714 | ||
1715 | out5: | ||
1716 | kfree(dev->padding_pkt); | ||
1702 | out4: | 1717 | out4: |
1703 | usb_free_urb(dev->interrupt); | 1718 | usb_free_urb(dev->interrupt); |
1704 | out3: | 1719 | out3: |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d1292fe746bc..2ef5b6219f3f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs) | |||
952 | 952 | ||
953 | spin_lock(&vn->sock_lock); | 953 | spin_lock(&vn->sock_lock); |
954 | hlist_del_rcu(&vs->hlist); | 954 | hlist_del_rcu(&vs->hlist); |
955 | smp_wmb(); | 955 | rcu_assign_sk_user_data(vs->sock->sk, NULL); |
956 | vs->sock->sk->sk_user_data = NULL; | ||
957 | vxlan_notify_del_rx_port(sk); | 956 | vxlan_notify_del_rx_port(sk); |
958 | spin_unlock(&vn->sock_lock); | 957 | spin_unlock(&vn->sock_lock); |
959 | 958 | ||
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
1048 | 1047 | ||
1049 | port = inet_sk(sk)->inet_sport; | 1048 | port = inet_sk(sk)->inet_sport; |
1050 | 1049 | ||
1051 | smp_read_barrier_depends(); | 1050 | vs = rcu_dereference_sk_user_data(sk); |
1052 | vs = (struct vxlan_sock *)sk->sk_user_data; | ||
1053 | if (!vs) | 1051 | if (!vs) |
1054 | goto drop; | 1052 | goto drop; |
1055 | 1053 | ||
@@ -2302,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, | |||
2302 | atomic_set(&vs->refcnt, 1); | 2300 | atomic_set(&vs->refcnt, 1); |
2303 | vs->rcv = rcv; | 2301 | vs->rcv = rcv; |
2304 | vs->data = data; | 2302 | vs->data = data; |
2305 | smp_wmb(); | 2303 | rcu_assign_sk_user_data(vs->sock->sk, vs); |
2306 | vs->sock->sk->sk_user_data = vs; | ||
2307 | 2304 | ||
2308 | spin_lock(&vn->sock_lock); | 2305 | spin_lock(&vn->sock_lock); |
2309 | hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); | 2306 | hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 4ee472a5a4e4..ab9e3a8410bc 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -1270,13 +1270,6 @@ static void ath9k_antenna_check(struct ath_softc *sc, | |||
1270 | return; | 1270 | return; |
1271 | 1271 | ||
1272 | /* | 1272 | /* |
1273 | * All MPDUs in an aggregate will use the same LNA | ||
1274 | * as the first MPDU. | ||
1275 | */ | ||
1276 | if (rs->rs_isaggr && !rs->rs_firstaggr) | ||
1277 | return; | ||
1278 | |||
1279 | /* | ||
1280 | * Change the default rx antenna if rx diversity | 1273 | * Change the default rx antenna if rx diversity |
1281 | * chooses the other antenna 3 times in a row. | 1274 | * chooses the other antenna 3 times in a row. |
1282 | */ | 1275 | */ |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 35b515fe3ffa..5ac713d2ff5d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) | |||
399 | tbf->bf_buf_addr = bf->bf_buf_addr; | 399 | tbf->bf_buf_addr = bf->bf_buf_addr; |
400 | memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); | 400 | memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); |
401 | tbf->bf_state = bf->bf_state; | 401 | tbf->bf_state = bf->bf_state; |
402 | tbf->bf_state.stale = false; | ||
402 | 403 | ||
403 | return tbf; | 404 | return tbf; |
404 | } | 405 | } |
@@ -1389,11 +1390,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, | |||
1389 | u16 tid, u16 *ssn) | 1390 | u16 tid, u16 *ssn) |
1390 | { | 1391 | { |
1391 | struct ath_atx_tid *txtid; | 1392 | struct ath_atx_tid *txtid; |
1393 | struct ath_txq *txq; | ||
1392 | struct ath_node *an; | 1394 | struct ath_node *an; |
1393 | u8 density; | 1395 | u8 density; |
1394 | 1396 | ||
1395 | an = (struct ath_node *)sta->drv_priv; | 1397 | an = (struct ath_node *)sta->drv_priv; |
1396 | txtid = ATH_AN_2_TID(an, tid); | 1398 | txtid = ATH_AN_2_TID(an, tid); |
1399 | txq = txtid->ac->txq; | ||
1400 | |||
1401 | ath_txq_lock(sc, txq); | ||
1397 | 1402 | ||
1398 | /* update ampdu factor/density, they may have changed. This may happen | 1403 | /* update ampdu factor/density, they may have changed. This may happen |
1399 | * in HT IBSS when a beacon with HT-info is received after the station | 1404 | * in HT IBSS when a beacon with HT-info is received after the station |
@@ -1417,6 +1422,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, | |||
1417 | memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); | 1422 | memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); |
1418 | txtid->baw_head = txtid->baw_tail = 0; | 1423 | txtid->baw_head = txtid->baw_tail = 0; |
1419 | 1424 | ||
1425 | ath_txq_unlock_complete(sc, txq); | ||
1426 | |||
1420 | return 0; | 1427 | return 0; |
1421 | } | 1428 | } |
1422 | 1429 | ||
@@ -1555,8 +1562,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, | |||
1555 | __skb_unlink(bf->bf_mpdu, tid_q); | 1562 | __skb_unlink(bf->bf_mpdu, tid_q); |
1556 | list_add_tail(&bf->list, &bf_q); | 1563 | list_add_tail(&bf->list, &bf_q); |
1557 | ath_set_rates(tid->an->vif, tid->an->sta, bf); | 1564 | ath_set_rates(tid->an->vif, tid->an->sta, bf); |
1558 | ath_tx_addto_baw(sc, tid, bf); | 1565 | if (bf_isampdu(bf)) { |
1559 | bf->bf_state.bf_type &= ~BUF_AGGR; | 1566 | ath_tx_addto_baw(sc, tid, bf); |
1567 | bf->bf_state.bf_type &= ~BUF_AGGR; | ||
1568 | } | ||
1560 | if (bf_tail) | 1569 | if (bf_tail) |
1561 | bf_tail->bf_next = bf; | 1570 | bf_tail->bf_next = bf; |
1562 | 1571 | ||
@@ -1950,7 +1959,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, | |||
1950 | if (bf_is_ampdu_not_probing(bf)) | 1959 | if (bf_is_ampdu_not_probing(bf)) |
1951 | txq->axq_ampdu_depth++; | 1960 | txq->axq_ampdu_depth++; |
1952 | 1961 | ||
1953 | bf = bf->bf_lastbf->bf_next; | 1962 | bf_last = bf->bf_lastbf; |
1963 | bf = bf_last->bf_next; | ||
1964 | bf_last->bf_next = NULL; | ||
1954 | } | 1965 | } |
1955 | } | 1966 | } |
1956 | } | 1967 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 64f4a2bc8dde..c3462b75bd08 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c | |||
@@ -464,8 +464,6 @@ static struct sdio_driver brcmf_sdmmc_driver = { | |||
464 | 464 | ||
465 | static int brcmf_sdio_pd_probe(struct platform_device *pdev) | 465 | static int brcmf_sdio_pd_probe(struct platform_device *pdev) |
466 | { | 466 | { |
467 | int ret; | ||
468 | |||
469 | brcmf_dbg(SDIO, "Enter\n"); | 467 | brcmf_dbg(SDIO, "Enter\n"); |
470 | 468 | ||
471 | brcmfmac_sdio_pdata = pdev->dev.platform_data; | 469 | brcmfmac_sdio_pdata = pdev->dev.platform_data; |
@@ -473,11 +471,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev) | |||
473 | if (brcmfmac_sdio_pdata->power_on) | 471 | if (brcmfmac_sdio_pdata->power_on) |
474 | brcmfmac_sdio_pdata->power_on(); | 472 | brcmfmac_sdio_pdata->power_on(); |
475 | 473 | ||
476 | ret = sdio_register_driver(&brcmf_sdmmc_driver); | 474 | return 0; |
477 | if (ret) | ||
478 | brcmf_err("sdio_register_driver failed: %d\n", ret); | ||
479 | |||
480 | return ret; | ||
481 | } | 475 | } |
482 | 476 | ||
483 | static int brcmf_sdio_pd_remove(struct platform_device *pdev) | 477 | static int brcmf_sdio_pd_remove(struct platform_device *pdev) |
@@ -500,6 +494,15 @@ static struct platform_driver brcmf_sdio_pd = { | |||
500 | } | 494 | } |
501 | }; | 495 | }; |
502 | 496 | ||
497 | void brcmf_sdio_register(void) | ||
498 | { | ||
499 | int ret; | ||
500 | |||
501 | ret = sdio_register_driver(&brcmf_sdmmc_driver); | ||
502 | if (ret) | ||
503 | brcmf_err("sdio_register_driver failed: %d\n", ret); | ||
504 | } | ||
505 | |||
503 | void brcmf_sdio_exit(void) | 506 | void brcmf_sdio_exit(void) |
504 | { | 507 | { |
505 | brcmf_dbg(SDIO, "Enter\n"); | 508 | brcmf_dbg(SDIO, "Enter\n"); |
@@ -510,18 +513,13 @@ void brcmf_sdio_exit(void) | |||
510 | sdio_unregister_driver(&brcmf_sdmmc_driver); | 513 | sdio_unregister_driver(&brcmf_sdmmc_driver); |
511 | } | 514 | } |
512 | 515 | ||
513 | void brcmf_sdio_init(void) | 516 | void __init brcmf_sdio_init(void) |
514 | { | 517 | { |
515 | int ret; | 518 | int ret; |
516 | 519 | ||
517 | brcmf_dbg(SDIO, "Enter\n"); | 520 | brcmf_dbg(SDIO, "Enter\n"); |
518 | 521 | ||
519 | ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); | 522 | ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); |
520 | if (ret == -ENODEV) { | 523 | if (ret == -ENODEV) |
521 | brcmf_dbg(SDIO, "No platform data available, registering without.\n"); | 524 | brcmf_dbg(SDIO, "No platform data available.\n"); |
522 | ret = sdio_register_driver(&brcmf_sdmmc_driver); | ||
523 | } | ||
524 | |||
525 | if (ret) | ||
526 | brcmf_err("driver registration failed: %d\n", ret); | ||
527 | } | 525 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h index f7c1985844e4..74156f84180c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h | |||
@@ -156,10 +156,11 @@ extern int brcmf_bus_start(struct device *dev); | |||
156 | #ifdef CONFIG_BRCMFMAC_SDIO | 156 | #ifdef CONFIG_BRCMFMAC_SDIO |
157 | extern void brcmf_sdio_exit(void); | 157 | extern void brcmf_sdio_exit(void); |
158 | extern void brcmf_sdio_init(void); | 158 | extern void brcmf_sdio_init(void); |
159 | extern void brcmf_sdio_register(void); | ||
159 | #endif | 160 | #endif |
160 | #ifdef CONFIG_BRCMFMAC_USB | 161 | #ifdef CONFIG_BRCMFMAC_USB |
161 | extern void brcmf_usb_exit(void); | 162 | extern void brcmf_usb_exit(void); |
162 | extern void brcmf_usb_init(void); | 163 | extern void brcmf_usb_register(void); |
163 | #endif | 164 | #endif |
164 | 165 | ||
165 | #endif /* _BRCMF_BUS_H_ */ | 166 | #endif /* _BRCMF_BUS_H_ */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index e067aec1fbf1..40e7f854e10f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c | |||
@@ -1231,21 +1231,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp) | |||
1231 | return bus->chip << 4 | bus->chiprev; | 1231 | return bus->chip << 4 | bus->chiprev; |
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | static void brcmf_driver_init(struct work_struct *work) | 1234 | static void brcmf_driver_register(struct work_struct *work) |
1235 | { | 1235 | { |
1236 | brcmf_debugfs_init(); | ||
1237 | |||
1238 | #ifdef CONFIG_BRCMFMAC_SDIO | 1236 | #ifdef CONFIG_BRCMFMAC_SDIO |
1239 | brcmf_sdio_init(); | 1237 | brcmf_sdio_register(); |
1240 | #endif | 1238 | #endif |
1241 | #ifdef CONFIG_BRCMFMAC_USB | 1239 | #ifdef CONFIG_BRCMFMAC_USB |
1242 | brcmf_usb_init(); | 1240 | brcmf_usb_register(); |
1243 | #endif | 1241 | #endif |
1244 | } | 1242 | } |
1245 | static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init); | 1243 | static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register); |
1246 | 1244 | ||
1247 | static int __init brcmfmac_module_init(void) | 1245 | static int __init brcmfmac_module_init(void) |
1248 | { | 1246 | { |
1247 | brcmf_debugfs_init(); | ||
1248 | #ifdef CONFIG_BRCMFMAC_SDIO | ||
1249 | brcmf_sdio_init(); | ||
1250 | #endif | ||
1249 | if (!schedule_work(&brcmf_driver_work)) | 1251 | if (!schedule_work(&brcmf_driver_work)) |
1250 | return -EBUSY; | 1252 | return -EBUSY; |
1251 | 1253 | ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 39e01a7c8556..f4aea47e0730 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c | |||
@@ -1539,7 +1539,7 @@ void brcmf_usb_exit(void) | |||
1539 | brcmf_release_fw(&fw_image_list); | 1539 | brcmf_release_fw(&fw_image_list); |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | void brcmf_usb_init(void) | 1542 | void brcmf_usb_register(void) |
1543 | { | 1543 | { |
1544 | brcmf_dbg(USB, "Enter\n"); | 1544 | brcmf_dbg(USB, "Enter\n"); |
1545 | INIT_LIST_HEAD(&fw_image_list); | 1545 | INIT_LIST_HEAD(&fw_image_list); |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 3a6544710c8a..edc5d105ff98 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c | |||
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw) | |||
457 | if (err != 0) | 457 | if (err != 0) |
458 | brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", | 458 | brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", |
459 | __func__, err); | 459 | __func__, err); |
460 | |||
461 | bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true); | ||
460 | return err; | 462 | return err; |
461 | } | 463 | } |
462 | 464 | ||
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw) | |||
479 | return; | 481 | return; |
480 | } | 482 | } |
481 | 483 | ||
484 | bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false); | ||
485 | |||
482 | /* put driver in down state */ | 486 | /* put driver in down state */ |
483 | spin_lock_bh(&wl->lock); | 487 | spin_lock_bh(&wl->lock); |
484 | brcms_down(wl); | 488 | brcms_down(wl); |
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index f5e6b489ed32..899cad34ccd3 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c | |||
@@ -42,7 +42,6 @@ struct hwbus_priv { | |||
42 | spinlock_t lock; /* Serialize all bus operations */ | 42 | spinlock_t lock; /* Serialize all bus operations */ |
43 | wait_queue_head_t wq; | 43 | wait_queue_head_t wq; |
44 | int claimed; | 44 | int claimed; |
45 | int irq_disabled; | ||
46 | }; | 45 | }; |
47 | 46 | ||
48 | #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) | 47 | #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) |
@@ -238,8 +237,6 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id) | |||
238 | struct hwbus_priv *self = dev_id; | 237 | struct hwbus_priv *self = dev_id; |
239 | 238 | ||
240 | if (self->core) { | 239 | if (self->core) { |
241 | disable_irq_nosync(self->func->irq); | ||
242 | self->irq_disabled = 1; | ||
243 | cw1200_irq_handler(self->core); | 240 | cw1200_irq_handler(self->core); |
244 | return IRQ_HANDLED; | 241 | return IRQ_HANDLED; |
245 | } else { | 242 | } else { |
@@ -253,9 +250,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self) | |||
253 | 250 | ||
254 | pr_debug("SW IRQ subscribe\n"); | 251 | pr_debug("SW IRQ subscribe\n"); |
255 | 252 | ||
256 | ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, | 253 | ret = request_threaded_irq(self->func->irq, NULL, |
257 | IRQF_TRIGGER_HIGH, | 254 | cw1200_spi_irq_handler, |
258 | "cw1200_wlan_irq", self); | 255 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, |
256 | "cw1200_wlan_irq", self); | ||
259 | if (WARN_ON(ret < 0)) | 257 | if (WARN_ON(ret < 0)) |
260 | goto exit; | 258 | goto exit; |
261 | 259 | ||
@@ -273,22 +271,13 @@ exit: | |||
273 | 271 | ||
274 | static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) | 272 | static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) |
275 | { | 273 | { |
274 | int ret = 0; | ||
275 | |||
276 | pr_debug("SW IRQ unsubscribe\n"); | 276 | pr_debug("SW IRQ unsubscribe\n"); |
277 | disable_irq_wake(self->func->irq); | 277 | disable_irq_wake(self->func->irq); |
278 | free_irq(self->func->irq, self); | 278 | free_irq(self->func->irq, self); |
279 | 279 | ||
280 | return 0; | 280 | return ret; |
281 | } | ||
282 | |||
283 | static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable) | ||
284 | { | ||
285 | /* Disables are handled by the interrupt handler */ | ||
286 | if (enable && self->irq_disabled) { | ||
287 | enable_irq(self->func->irq); | ||
288 | self->irq_disabled = 0; | ||
289 | } | ||
290 | |||
291 | return 0; | ||
292 | } | 281 | } |
293 | 282 | ||
294 | static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) | 283 | static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) |
@@ -368,7 +357,6 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = { | |||
368 | .unlock = cw1200_spi_unlock, | 357 | .unlock = cw1200_spi_unlock, |
369 | .align_size = cw1200_spi_align_size, | 358 | .align_size = cw1200_spi_align_size, |
370 | .power_mgmt = cw1200_spi_pm, | 359 | .power_mgmt = cw1200_spi_pm, |
371 | .irq_enable = cw1200_spi_irq_enable, | ||
372 | }; | 360 | }; |
373 | 361 | ||
374 | /* Probe Function to be called by SPI stack when device is discovered */ | 362 | /* Probe Function to be called by SPI stack when device is discovered */ |
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c index 0b2061bbc68b..acdff0f7f952 100644 --- a/drivers/net/wireless/cw1200/fwio.c +++ b/drivers/net/wireless/cw1200/fwio.c | |||
@@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv) | |||
485 | 485 | ||
486 | /* Enable interrupt signalling */ | 486 | /* Enable interrupt signalling */ |
487 | priv->hwbus_ops->lock(priv->hwbus_priv); | 487 | priv->hwbus_ops->lock(priv->hwbus_priv); |
488 | ret = __cw1200_irq_enable(priv, 2); | 488 | ret = __cw1200_irq_enable(priv, 1); |
489 | priv->hwbus_ops->unlock(priv->hwbus_priv); | 489 | priv->hwbus_ops->unlock(priv->hwbus_priv); |
490 | if (ret < 0) | 490 | if (ret < 0) |
491 | goto unsubscribe; | 491 | goto unsubscribe; |
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h index 51dfb3a90735..8b2fc831c3de 100644 --- a/drivers/net/wireless/cw1200/hwbus.h +++ b/drivers/net/wireless/cw1200/hwbus.h | |||
@@ -28,7 +28,6 @@ struct hwbus_ops { | |||
28 | void (*unlock)(struct hwbus_priv *self); | 28 | void (*unlock)(struct hwbus_priv *self); |
29 | size_t (*align_size)(struct hwbus_priv *self, size_t size); | 29 | size_t (*align_size)(struct hwbus_priv *self, size_t size); |
30 | int (*power_mgmt)(struct hwbus_priv *self, bool suspend); | 30 | int (*power_mgmt)(struct hwbus_priv *self, bool suspend); |
31 | int (*irq_enable)(struct hwbus_priv *self, int enable); | ||
32 | }; | 31 | }; |
33 | 32 | ||
34 | #endif /* CW1200_HWBUS_H */ | 33 | #endif /* CW1200_HWBUS_H */ |
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c index 41bd7615ccaa..ff230b7aeedd 100644 --- a/drivers/net/wireless/cw1200/hwio.c +++ b/drivers/net/wireless/cw1200/hwio.c | |||
@@ -273,21 +273,6 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable) | |||
273 | u16 val16; | 273 | u16 val16; |
274 | int ret; | 274 | int ret; |
275 | 275 | ||
276 | /* We need to do this hack because the SPI layer can sleep on I/O | ||
277 | and the general path involves I/O to the device in interrupt | ||
278 | context. | ||
279 | |||
280 | However, the initial enable call needs to go to the hardware. | ||
281 | |||
282 | We don't worry about shutdown because we do a full reset which | ||
283 | clears the interrupt enabled bits. | ||
284 | */ | ||
285 | if (priv->hwbus_ops->irq_enable) { | ||
286 | ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable); | ||
287 | if (ret || enable < 2) | ||
288 | return ret; | ||
289 | } | ||
290 | |||
291 | if (HIF_8601_SILICON == priv->hw_type) { | 276 | if (HIF_8601_SILICON == priv->hw_type) { |
292 | ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); | 277 | ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); |
293 | if (ret < 0) { | 278 | if (ret < 0) { |
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 21c688264708..1214c587fd08 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c | |||
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, | |||
150 | */ | 150 | */ |
151 | int | 151 | int |
152 | mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, | 152 | mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
153 | struct mwifiex_ra_list_tbl *pra_list, int headroom, | 153 | struct mwifiex_ra_list_tbl *pra_list, |
154 | int ptrindex, unsigned long ra_list_flags) | 154 | int ptrindex, unsigned long ra_list_flags) |
155 | __releases(&priv->wmm.ra_list_spinlock) | 155 | __releases(&priv->wmm.ra_list_spinlock) |
156 | { | 156 | { |
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, | |||
160 | int pad = 0, ret; | 160 | int pad = 0, ret; |
161 | struct mwifiex_tx_param tx_param; | 161 | struct mwifiex_tx_param tx_param; |
162 | struct txpd *ptx_pd = NULL; | 162 | struct txpd *ptx_pd = NULL; |
163 | int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN; | ||
163 | 164 | ||
164 | skb_src = skb_peek(&pra_list->skb_head); | 165 | skb_src = skb_peek(&pra_list->skb_head); |
165 | if (!skb_src) { | 166 | if (!skb_src) { |
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h index 900e1c62a0cc..892098d6a696 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.h +++ b/drivers/net/wireless/mwifiex/11n_aggr.h | |||
@@ -26,7 +26,7 @@ | |||
26 | int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, | 26 | int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, |
27 | struct sk_buff *skb); | 27 | struct sk_buff *skb); |
28 | int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, | 28 | int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
29 | struct mwifiex_ra_list_tbl *ptr, int headroom, | 29 | struct mwifiex_ra_list_tbl *ptr, |
30 | int ptr_index, unsigned long flags) | 30 | int ptr_index, unsigned long flags) |
31 | __releases(&priv->wmm.ra_list_spinlock); | 31 | __releases(&priv->wmm.ra_list_spinlock); |
32 | 32 | ||
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 2d761477d15e..a6c46f3b6e3a 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c | |||
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, | |||
1155 | uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); | 1155 | uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); |
1156 | 1156 | ||
1157 | if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && | 1157 | if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && |
1158 | adapter->iface_type == MWIFIEX_SDIO) { | 1158 | adapter->iface_type != MWIFIEX_USB) { |
1159 | mwifiex_hs_activated_event(priv, true); | 1159 | mwifiex_hs_activated_event(priv, true); |
1160 | return 0; | 1160 | return 0; |
1161 | } else { | 1161 | } else { |
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, | |||
1167 | } | 1167 | } |
1168 | if (conditions != HS_CFG_CANCEL) { | 1168 | if (conditions != HS_CFG_CANCEL) { |
1169 | adapter->is_hs_configured = true; | 1169 | adapter->is_hs_configured = true; |
1170 | if (adapter->iface_type == MWIFIEX_USB || | 1170 | if (adapter->iface_type == MWIFIEX_USB) |
1171 | adapter->iface_type == MWIFIEX_PCIE) | ||
1172 | mwifiex_hs_activated_event(priv, true); | 1171 | mwifiex_hs_activated_event(priv, true); |
1173 | } else { | 1172 | } else { |
1174 | adapter->is_hs_configured = false; | 1173 | adapter->is_hs_configured = false; |
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 2472d4b7f00e..1c70b8d09227 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c | |||
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) | |||
447 | */ | 447 | */ |
448 | adapter->is_suspended = true; | 448 | adapter->is_suspended = true; |
449 | 449 | ||
450 | for (i = 0; i < adapter->priv_num; i++) | ||
451 | netif_carrier_off(adapter->priv[i]->netdev); | ||
452 | |||
453 | if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) | 450 | if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) |
454 | usb_kill_urb(card->rx_cmd.urb); | 451 | usb_kill_urb(card->rx_cmd.urb); |
455 | 452 | ||
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf) | |||
509 | MWIFIEX_RX_CMD_BUF_SIZE); | 506 | MWIFIEX_RX_CMD_BUF_SIZE); |
510 | } | 507 | } |
511 | 508 | ||
512 | for (i = 0; i < adapter->priv_num; i++) | ||
513 | if (adapter->priv[i]->media_connected) | ||
514 | netif_carrier_on(adapter->priv[i]->netdev); | ||
515 | |||
516 | /* Disable Host Sleep */ | 509 | /* Disable Host Sleep */ |
517 | if (adapter->hs_activated) | 510 | if (adapter->hs_activated) |
518 | mwifiex_cancel_hs(mwifiex_get_priv(adapter, | 511 | mwifiex_cancel_hs(mwifiex_get_priv(adapter, |
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 2e8f9cdea54d..95fa3599b407 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c | |||
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) | |||
1239 | if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && | 1239 | if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && |
1240 | mwifiex_is_11n_aggragation_possible(priv, ptr, | 1240 | mwifiex_is_11n_aggragation_possible(priv, ptr, |
1241 | adapter->tx_buf_size)) | 1241 | adapter->tx_buf_size)) |
1242 | mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, | 1242 | mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); |
1243 | ptr_index, flags); | ||
1244 | /* ra_list_spinlock has been freed in | 1243 | /* ra_list_spinlock has been freed in |
1245 | mwifiex_11n_aggregate_pkt() */ | 1244 | mwifiex_11n_aggregate_pkt() */ |
1246 | else | 1245 | else |
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index b9deef66cf4b..e328d3058c41 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = { | |||
83 | {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ | 83 | {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ |
84 | {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ | 84 | {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ |
85 | {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ | 85 | {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ |
86 | {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */ | ||
86 | {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ | 87 | {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ |
87 | {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ | 88 | {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ |
88 | {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ | 89 | {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ |
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev, | |||
979 | if (err) { | 980 | if (err) { |
980 | dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " | 981 | dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " |
981 | "(%d)!\n", p54u_fwlist[i].fw, err); | 982 | "(%d)!\n", p54u_fwlist[i].fw, err); |
983 | usb_put_dev(udev); | ||
982 | } | 984 | } |
983 | 985 | ||
984 | return err; | 986 | return err; |
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index cc03e7c87cbe..703258742d28 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h | |||
@@ -2057,7 +2057,7 @@ struct rtl_priv { | |||
2057 | that it points to the data allocated | 2057 | that it points to the data allocated |
2058 | beyond this structure like: | 2058 | beyond this structure like: |
2059 | rtl_pci_priv or rtl_usb_priv */ | 2059 | rtl_pci_priv or rtl_usb_priv */ |
2060 | u8 priv[0]; | 2060 | u8 priv[0] __aligned(sizeof(void *)); |
2061 | }; | 2061 | }; |
2062 | 2062 | ||
2063 | #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) | 2063 | #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index a53782ef1540..b45bce20ad76 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
@@ -24,6 +24,12 @@ | |||
24 | struct backend_info { | 24 | struct backend_info { |
25 | struct xenbus_device *dev; | 25 | struct xenbus_device *dev; |
26 | struct xenvif *vif; | 26 | struct xenvif *vif; |
27 | |||
28 | /* This is the state that will be reflected in xenstore when any | ||
29 | * active hotplug script completes. | ||
30 | */ | ||
31 | enum xenbus_state state; | ||
32 | |||
27 | enum xenbus_state frontend_state; | 33 | enum xenbus_state frontend_state; |
28 | struct xenbus_watch hotplug_status_watch; | 34 | struct xenbus_watch hotplug_status_watch; |
29 | u8 have_hotplug_status_watch:1; | 35 | u8 have_hotplug_status_watch:1; |
@@ -136,6 +142,8 @@ static int netback_probe(struct xenbus_device *dev, | |||
136 | if (err) | 142 | if (err) |
137 | goto fail; | 143 | goto fail; |
138 | 144 | ||
145 | be->state = XenbusStateInitWait; | ||
146 | |||
139 | /* This kicks hotplug scripts, so do it immediately. */ | 147 | /* This kicks hotplug scripts, so do it immediately. */ |
140 | backend_create_xenvif(be); | 148 | backend_create_xenvif(be); |
141 | 149 | ||
@@ -208,24 +216,113 @@ static void backend_create_xenvif(struct backend_info *be) | |||
208 | kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); | 216 | kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); |
209 | } | 217 | } |
210 | 218 | ||
211 | 219 | static void backend_disconnect(struct backend_info *be) | |
212 | static void disconnect_backend(struct xenbus_device *dev) | ||
213 | { | 220 | { |
214 | struct backend_info *be = dev_get_drvdata(&dev->dev); | ||
215 | |||
216 | if (be->vif) | 221 | if (be->vif) |
217 | xenvif_disconnect(be->vif); | 222 | xenvif_disconnect(be->vif); |
218 | } | 223 | } |
219 | 224 | ||
220 | static void destroy_backend(struct xenbus_device *dev) | 225 | static void backend_connect(struct backend_info *be) |
221 | { | 226 | { |
222 | struct backend_info *be = dev_get_drvdata(&dev->dev); | 227 | if (be->vif) |
228 | connect(be); | ||
229 | } | ||
223 | 230 | ||
224 | if (be->vif) { | 231 | static inline void backend_switch_state(struct backend_info *be, |
225 | kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); | 232 | enum xenbus_state state) |
226 | xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); | 233 | { |
227 | xenvif_free(be->vif); | 234 | struct xenbus_device *dev = be->dev; |
228 | be->vif = NULL; | 235 | |
236 | pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state)); | ||
237 | be->state = state; | ||
238 | |||
239 | /* If we are waiting for a hotplug script then defer the | ||
240 | * actual xenbus state change. | ||
241 | */ | ||
242 | if (!be->have_hotplug_status_watch) | ||
243 | xenbus_switch_state(dev, state); | ||
244 | } | ||
245 | |||
246 | /* Handle backend state transitions: | ||
247 | * | ||
248 | * The backend state starts in InitWait and the following transitions are | ||
249 | * allowed. | ||
250 | * | ||
251 | * InitWait -> Connected | ||
252 | * | ||
253 | * ^ \ | | ||
254 | * | \ | | ||
255 | * | \ | | ||
256 | * | \ | | ||
257 | * | \ | | ||
258 | * | \ | | ||
259 | * | V V | ||
260 | * | ||
261 | * Closed <-> Closing | ||
262 | * | ||
263 | * The state argument specifies the eventual state of the backend and the | ||
264 | * function transitions to that state via the shortest path. | ||
265 | */ | ||
266 | static void set_backend_state(struct backend_info *be, | ||
267 | enum xenbus_state state) | ||
268 | { | ||
269 | while (be->state != state) { | ||
270 | switch (be->state) { | ||
271 | case XenbusStateClosed: | ||
272 | switch (state) { | ||
273 | case XenbusStateInitWait: | ||
274 | case XenbusStateConnected: | ||
275 | pr_info("%s: prepare for reconnect\n", | ||
276 | be->dev->nodename); | ||
277 | backend_switch_state(be, XenbusStateInitWait); | ||
278 | break; | ||
279 | case XenbusStateClosing: | ||
280 | backend_switch_state(be, XenbusStateClosing); | ||
281 | break; | ||
282 | default: | ||
283 | BUG(); | ||
284 | } | ||
285 | break; | ||
286 | case XenbusStateInitWait: | ||
287 | switch (state) { | ||
288 | case XenbusStateConnected: | ||
289 | backend_connect(be); | ||
290 | backend_switch_state(be, XenbusStateConnected); | ||
291 | break; | ||
292 | case XenbusStateClosing: | ||
293 | case XenbusStateClosed: | ||
294 | backend_switch_state(be, XenbusStateClosing); | ||
295 | break; | ||
296 | default: | ||
297 | BUG(); | ||
298 | } | ||
299 | break; | ||
300 | case XenbusStateConnected: | ||
301 | switch (state) { | ||
302 | case XenbusStateInitWait: | ||
303 | case XenbusStateClosing: | ||
304 | case XenbusStateClosed: | ||
305 | backend_disconnect(be); | ||
306 | backend_switch_state(be, XenbusStateClosing); | ||
307 | break; | ||
308 | default: | ||
309 | BUG(); | ||
310 | } | ||
311 | break; | ||
312 | case XenbusStateClosing: | ||
313 | switch (state) { | ||
314 | case XenbusStateInitWait: | ||
315 | case XenbusStateConnected: | ||
316 | case XenbusStateClosed: | ||
317 | backend_switch_state(be, XenbusStateClosed); | ||
318 | break; | ||
319 | default: | ||
320 | BUG(); | ||
321 | } | ||
322 | break; | ||
323 | default: | ||
324 | BUG(); | ||
325 | } | ||
229 | } | 326 | } |
230 | } | 327 | } |
231 | 328 | ||
@@ -237,40 +334,33 @@ static void frontend_changed(struct xenbus_device *dev, | |||
237 | { | 334 | { |
238 | struct backend_info *be = dev_get_drvdata(&dev->dev); | 335 | struct backend_info *be = dev_get_drvdata(&dev->dev); |
239 | 336 | ||
240 | pr_debug("frontend state %s\n", xenbus_strstate(frontend_state)); | 337 | pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state)); |
241 | 338 | ||
242 | be->frontend_state = frontend_state; | 339 | be->frontend_state = frontend_state; |
243 | 340 | ||
244 | switch (frontend_state) { | 341 | switch (frontend_state) { |
245 | case XenbusStateInitialising: | 342 | case XenbusStateInitialising: |
246 | if (dev->state == XenbusStateClosed) { | 343 | set_backend_state(be, XenbusStateInitWait); |
247 | pr_info("%s: prepare for reconnect\n", dev->nodename); | ||
248 | xenbus_switch_state(dev, XenbusStateInitWait); | ||
249 | } | ||
250 | break; | 344 | break; |
251 | 345 | ||
252 | case XenbusStateInitialised: | 346 | case XenbusStateInitialised: |
253 | break; | 347 | break; |
254 | 348 | ||
255 | case XenbusStateConnected: | 349 | case XenbusStateConnected: |
256 | if (dev->state == XenbusStateConnected) | 350 | set_backend_state(be, XenbusStateConnected); |
257 | break; | ||
258 | if (be->vif) | ||
259 | connect(be); | ||
260 | break; | 351 | break; |
261 | 352 | ||
262 | case XenbusStateClosing: | 353 | case XenbusStateClosing: |
263 | disconnect_backend(dev); | 354 | set_backend_state(be, XenbusStateClosing); |
264 | xenbus_switch_state(dev, XenbusStateClosing); | ||
265 | break; | 355 | break; |
266 | 356 | ||
267 | case XenbusStateClosed: | 357 | case XenbusStateClosed: |
268 | xenbus_switch_state(dev, XenbusStateClosed); | 358 | set_backend_state(be, XenbusStateClosed); |
269 | if (xenbus_dev_is_online(dev)) | 359 | if (xenbus_dev_is_online(dev)) |
270 | break; | 360 | break; |
271 | destroy_backend(dev); | ||
272 | /* fall through if not online */ | 361 | /* fall through if not online */ |
273 | case XenbusStateUnknown: | 362 | case XenbusStateUnknown: |
363 | set_backend_state(be, XenbusStateClosed); | ||
274 | device_unregister(&dev->dev); | 364 | device_unregister(&dev->dev); |
275 | break; | 365 | break; |
276 | 366 | ||
@@ -363,7 +453,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch, | |||
363 | if (IS_ERR(str)) | 453 | if (IS_ERR(str)) |
364 | return; | 454 | return; |
365 | if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { | 455 | if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { |
366 | xenbus_switch_state(be->dev, XenbusStateConnected); | 456 | /* Complete any pending state change */ |
457 | xenbus_switch_state(be->dev, be->state); | ||
458 | |||
367 | /* Not interested in this watch anymore. */ | 459 | /* Not interested in this watch anymore. */ |
368 | unregister_hotplug_status_watch(be); | 460 | unregister_hotplug_status_watch(be); |
369 | } | 461 | } |
@@ -393,12 +485,8 @@ static void connect(struct backend_info *be) | |||
393 | err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, | 485 | err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, |
394 | hotplug_status_changed, | 486 | hotplug_status_changed, |
395 | "%s/%s", dev->nodename, "hotplug-status"); | 487 | "%s/%s", dev->nodename, "hotplug-status"); |
396 | if (err) { | 488 | if (!err) |
397 | /* Switch now, since we can't do a watch. */ | ||
398 | xenbus_switch_state(dev, XenbusStateConnected); | ||
399 | } else { | ||
400 | be->have_hotplug_status_watch = 1; | 489 | be->have_hotplug_status_watch = 1; |
401 | } | ||
402 | 490 | ||
403 | netif_wake_queue(be->vif->dev); | 491 | netif_wake_queue(be->vif->dev); |
404 | } | 492 | } |
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index d66033f418c9..0333e605ea0d 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h | |||
@@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, | |||
242 | struct bcma_device *core, bool enable); | 242 | struct bcma_device *core, bool enable); |
243 | extern void bcma_core_pci_up(struct bcma_bus *bus); | 243 | extern void bcma_core_pci_up(struct bcma_bus *bus); |
244 | extern void bcma_core_pci_down(struct bcma_bus *bus); | 244 | extern void bcma_core_pci_down(struct bcma_bus *bus); |
245 | extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up); | ||
245 | 246 | ||
246 | extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); | 247 | extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); |
247 | extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); | 248 | extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 482ad2d84a32..672ddc4de4af 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte) | |||
439 | return buf; | 439 | return buf; |
440 | } | 440 | } |
441 | 441 | ||
442 | extern const char hex_asc_upper[]; | ||
443 | #define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] | ||
444 | #define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] | ||
445 | |||
446 | static inline char *hex_byte_pack_upper(char *buf, u8 byte) | ||
447 | { | ||
448 | *buf++ = hex_asc_upper_hi(byte); | ||
449 | *buf++ = hex_asc_upper_lo(byte); | ||
450 | return buf; | ||
451 | } | ||
452 | |||
442 | static inline char * __deprecated pack_hex_byte(char *buf, u8 byte) | 453 | static inline char * __deprecated pack_hex_byte(char *buf, u8 byte) |
443 | { | 454 | { |
444 | return hex_byte_pack(buf, byte); | 455 | return hex_byte_pack(buf, byte); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2ddb48d9312c..c2d89335f637 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -498,7 +498,7 @@ struct sk_buff { | |||
498 | * headers if needed | 498 | * headers if needed |
499 | */ | 499 | */ |
500 | __u8 encapsulation:1; | 500 | __u8 encapsulation:1; |
501 | /* 7/9 bit hole (depending on ndisc_nodetype presence) */ | 501 | /* 6/8 bit hole (depending on ndisc_nodetype presence) */ |
502 | kmemcheck_bitfield_end(flags2); | 502 | kmemcheck_bitfield_end(flags2); |
503 | 503 | ||
504 | #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL | 504 | #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 9cb2fe8ca944..e303eef94dd5 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
@@ -42,6 +42,7 @@ struct usbnet { | |||
42 | struct usb_host_endpoint *status; | 42 | struct usb_host_endpoint *status; |
43 | unsigned maxpacket; | 43 | unsigned maxpacket; |
44 | struct timer_list delay; | 44 | struct timer_list delay; |
45 | const char *padding_pkt; | ||
45 | 46 | ||
46 | /* protocol/interface state */ | 47 | /* protocol/interface state */ |
47 | struct net_device *net; | 48 | struct net_device *net; |
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index fb314de2b61b..86505bfa5d2c 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h | |||
@@ -67,6 +67,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, | |||
67 | int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); | 67 | int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | bool ipv6_chk_custom_prefix(const struct in6_addr *addr, | ||
71 | const unsigned int prefix_len, | ||
72 | struct net_device *dev); | ||
73 | |||
70 | int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); | 74 | int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); |
71 | 75 | ||
72 | struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, | 76 | struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, |
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index aaeaf0938ec0..15f10841e2b5 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h | |||
@@ -104,6 +104,7 @@ enum { | |||
104 | enum { | 104 | enum { |
105 | HCI_SETUP, | 105 | HCI_SETUP, |
106 | HCI_AUTO_OFF, | 106 | HCI_AUTO_OFF, |
107 | HCI_RFKILLED, | ||
107 | HCI_MGMT, | 108 | HCI_MGMT, |
108 | HCI_PAIRABLE, | 109 | HCI_PAIRABLE, |
109 | HCI_SERVICE_CACHE, | 110 | HCI_SERVICE_CACHE, |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index f0d70f066f3d..9c4d37ec45a1 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -723,8 +723,6 @@ struct ip_vs_dest_dst { | |||
723 | struct rcu_head rcu_head; | 723 | struct rcu_head rcu_head; |
724 | }; | 724 | }; |
725 | 725 | ||
726 | /* In grace period after removing */ | ||
727 | #define IP_VS_DEST_STATE_REMOVING 0x01 | ||
728 | /* | 726 | /* |
729 | * The real server destination forwarding entry | 727 | * The real server destination forwarding entry |
730 | * with ip address, port number, and so on. | 728 | * with ip address, port number, and so on. |
@@ -742,7 +740,7 @@ struct ip_vs_dest { | |||
742 | 740 | ||
743 | atomic_t refcnt; /* reference counter */ | 741 | atomic_t refcnt; /* reference counter */ |
744 | struct ip_vs_stats stats; /* statistics */ | 742 | struct ip_vs_stats stats; /* statistics */ |
745 | unsigned long state; /* state flags */ | 743 | unsigned long idle_start; /* start time, jiffies */ |
746 | 744 | ||
747 | /* connection counters and thresholds */ | 745 | /* connection counters and thresholds */ |
748 | atomic_t activeconns; /* active connections */ | 746 | atomic_t activeconns; /* active connections */ |
@@ -756,14 +754,13 @@ struct ip_vs_dest { | |||
756 | struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ | 754 | struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ |
757 | 755 | ||
758 | /* for virtual service */ | 756 | /* for virtual service */ |
759 | struct ip_vs_service *svc; /* service it belongs to */ | 757 | struct ip_vs_service __rcu *svc; /* service it belongs to */ |
760 | __u16 protocol; /* which protocol (TCP/UDP) */ | 758 | __u16 protocol; /* which protocol (TCP/UDP) */ |
761 | __be16 vport; /* virtual port number */ | 759 | __be16 vport; /* virtual port number */ |
762 | union nf_inet_addr vaddr; /* virtual IP address */ | 760 | union nf_inet_addr vaddr; /* virtual IP address */ |
763 | __u32 vfwmark; /* firewall mark of service */ | 761 | __u32 vfwmark; /* firewall mark of service */ |
764 | 762 | ||
765 | struct list_head t_list; /* in dest_trash */ | 763 | struct list_head t_list; /* in dest_trash */ |
766 | struct rcu_head rcu_head; | ||
767 | unsigned int in_rs_table:1; /* we are in rs_table */ | 764 | unsigned int in_rs_table:1; /* we are in rs_table */ |
768 | }; | 765 | }; |
769 | 766 | ||
@@ -1649,7 +1646,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) | |||
1649 | /* CONFIG_IP_VS_NFCT */ | 1646 | /* CONFIG_IP_VS_NFCT */ |
1650 | #endif | 1647 | #endif |
1651 | 1648 | ||
1652 | static inline unsigned int | 1649 | static inline int |
1653 | ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) | 1650 | ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) |
1654 | { | 1651 | { |
1655 | /* | 1652 | /* |
diff --git a/include/net/mrp.h b/include/net/mrp.h index 4fbf02aa2ec1..0f7558b638ae 100644 --- a/include/net/mrp.h +++ b/include/net/mrp.h | |||
@@ -112,6 +112,7 @@ struct mrp_applicant { | |||
112 | struct mrp_application *app; | 112 | struct mrp_application *app; |
113 | struct net_device *dev; | 113 | struct net_device *dev; |
114 | struct timer_list join_timer; | 114 | struct timer_list join_timer; |
115 | struct timer_list periodic_timer; | ||
115 | 116 | ||
116 | spinlock_t lock; | 117 | spinlock_t lock; |
117 | struct sk_buff_head queue; | 118 | struct sk_buff_head queue; |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 1313456a0994..9d22f08896c6 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -74,6 +74,7 @@ struct net { | |||
74 | struct hlist_head *dev_index_head; | 74 | struct hlist_head *dev_index_head; |
75 | unsigned int dev_base_seq; /* protected by rtnl_mutex */ | 75 | unsigned int dev_base_seq; /* protected by rtnl_mutex */ |
76 | int ifindex; | 76 | int ifindex; |
77 | unsigned int dev_unreg_count; | ||
77 | 78 | ||
78 | /* core fib_rules */ | 79 | /* core fib_rules */ |
79 | struct list_head rules_ops; | 80 | struct list_head rules_ops; |
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h index 806f54a290d6..f572f313d6f1 100644 --- a/include/net/netfilter/nf_conntrack_synproxy.h +++ b/include/net/netfilter/nf_conntrack_synproxy.h | |||
@@ -56,7 +56,7 @@ struct synproxy_options { | |||
56 | 56 | ||
57 | struct tcphdr; | 57 | struct tcphdr; |
58 | struct xt_synproxy_info; | 58 | struct xt_synproxy_info; |
59 | extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, | 59 | extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, |
60 | const struct tcphdr *th, | 60 | const struct tcphdr *th, |
61 | struct synproxy_options *opts); | 61 | struct synproxy_options *opts); |
62 | extern unsigned int synproxy_options_size(const struct synproxy_options *opts); | 62 | extern unsigned int synproxy_options_size(const struct synproxy_options *opts); |
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h index 6ca975bebd37..c2e542b27a5a 100644 --- a/include/net/secure_seq.h +++ b/include/net/secure_seq.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | extern void net_secret_init(void); | ||
7 | extern __u32 secure_ip_id(__be32 daddr); | 6 | extern __u32 secure_ip_id(__be32 daddr); |
8 | extern __u32 secure_ipv6_id(const __be32 daddr[4]); | 7 | extern __u32 secure_ipv6_id(const __be32 daddr[4]); |
9 | extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); | 8 | extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); |
diff --git a/include/net/sock.h b/include/net/sock.h index 6ba2e7b0e2b1..1d37a8086bed 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -409,6 +409,11 @@ struct sock { | |||
409 | void (*sk_destruct)(struct sock *sk); | 409 | void (*sk_destruct)(struct sock *sk); |
410 | }; | 410 | }; |
411 | 411 | ||
412 | #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) | ||
413 | |||
414 | #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) | ||
415 | #define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) | ||
416 | |||
412 | /* | 417 | /* |
413 | * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK | 418 | * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK |
414 | * or not whether his port will be reused by someone else. SK_FORCE_REUSE | 419 | * or not whether his port will be reused by someone else. SK_FORCE_REUSE |
diff --git a/lib/hexdump.c b/lib/hexdump.c index 3f0494c9d57a..8499c810909a 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c | |||
@@ -14,6 +14,8 @@ | |||
14 | 14 | ||
15 | const char hex_asc[] = "0123456789abcdef"; | 15 | const char hex_asc[] = "0123456789abcdef"; |
16 | EXPORT_SYMBOL(hex_asc); | 16 | EXPORT_SYMBOL(hex_asc); |
17 | const char hex_asc_upper[] = "0123456789ABCDEF"; | ||
18 | EXPORT_SYMBOL(hex_asc_upper); | ||
17 | 19 | ||
18 | /** | 20 | /** |
19 | * hex_to_bin - convert a hex digit to its real value | 21 | * hex_to_bin - convert a hex digit to its real value |
diff --git a/net/802/mrp.c b/net/802/mrp.c index 1eb05d80b07b..3ed616215870 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c | |||
@@ -24,6 +24,11 @@ | |||
24 | static unsigned int mrp_join_time __read_mostly = 200; | 24 | static unsigned int mrp_join_time __read_mostly = 200; |
25 | module_param(mrp_join_time, uint, 0644); | 25 | module_param(mrp_join_time, uint, 0644); |
26 | MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)"); | 26 | MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)"); |
27 | |||
28 | static unsigned int mrp_periodic_time __read_mostly = 1000; | ||
29 | module_param(mrp_periodic_time, uint, 0644); | ||
30 | MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)"); | ||
31 | |||
27 | MODULE_LICENSE("GPL"); | 32 | MODULE_LICENSE("GPL"); |
28 | 33 | ||
29 | static const u8 | 34 | static const u8 |
@@ -595,6 +600,24 @@ static void mrp_join_timer(unsigned long data) | |||
595 | mrp_join_timer_arm(app); | 600 | mrp_join_timer_arm(app); |
596 | } | 601 | } |
597 | 602 | ||
603 | static void mrp_periodic_timer_arm(struct mrp_applicant *app) | ||
604 | { | ||
605 | mod_timer(&app->periodic_timer, | ||
606 | jiffies + msecs_to_jiffies(mrp_periodic_time)); | ||
607 | } | ||
608 | |||
609 | static void mrp_periodic_timer(unsigned long data) | ||
610 | { | ||
611 | struct mrp_applicant *app = (struct mrp_applicant *)data; | ||
612 | |||
613 | spin_lock(&app->lock); | ||
614 | mrp_mad_event(app, MRP_EVENT_PERIODIC); | ||
615 | mrp_pdu_queue(app); | ||
616 | spin_unlock(&app->lock); | ||
617 | |||
618 | mrp_periodic_timer_arm(app); | ||
619 | } | ||
620 | |||
598 | static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) | 621 | static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) |
599 | { | 622 | { |
600 | __be16 endmark; | 623 | __be16 endmark; |
@@ -845,6 +868,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl) | |||
845 | rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); | 868 | rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); |
846 | setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app); | 869 | setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app); |
847 | mrp_join_timer_arm(app); | 870 | mrp_join_timer_arm(app); |
871 | setup_timer(&app->periodic_timer, mrp_periodic_timer, | ||
872 | (unsigned long)app); | ||
873 | mrp_periodic_timer_arm(app); | ||
848 | return 0; | 874 | return 0; |
849 | 875 | ||
850 | err3: | 876 | err3: |
@@ -870,6 +896,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) | |||
870 | * all pending messages before the applicant is gone. | 896 | * all pending messages before the applicant is gone. |
871 | */ | 897 | */ |
872 | del_timer_sync(&app->join_timer); | 898 | del_timer_sync(&app->join_timer); |
899 | del_timer_sync(&app->periodic_timer); | ||
873 | 900 | ||
874 | spin_lock_bh(&app->lock); | 901 | spin_lock_bh(&app->lock); |
875 | mrp_mad_event(app, MRP_EVENT_TX); | 902 | mrp_mad_event(app, MRP_EVENT_TX); |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 634debab4d54..fb7356fcfe51 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -1146,7 +1146,11 @@ int hci_dev_open(__u16 dev) | |||
1146 | goto done; | 1146 | goto done; |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { | 1149 | /* Check for rfkill but allow the HCI setup stage to proceed |
1150 | * (which in itself doesn't cause any RF activity). | ||
1151 | */ | ||
1152 | if (test_bit(HCI_RFKILLED, &hdev->dev_flags) && | ||
1153 | !test_bit(HCI_SETUP, &hdev->dev_flags)) { | ||
1150 | ret = -ERFKILL; | 1154 | ret = -ERFKILL; |
1151 | goto done; | 1155 | goto done; |
1152 | } | 1156 | } |
@@ -1566,10 +1570,13 @@ static int hci_rfkill_set_block(void *data, bool blocked) | |||
1566 | 1570 | ||
1567 | BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); | 1571 | BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); |
1568 | 1572 | ||
1569 | if (!blocked) | 1573 | if (blocked) { |
1570 | return 0; | 1574 | set_bit(HCI_RFKILLED, &hdev->dev_flags); |
1571 | 1575 | if (!test_bit(HCI_SETUP, &hdev->dev_flags)) | |
1572 | hci_dev_do_close(hdev); | 1576 | hci_dev_do_close(hdev); |
1577 | } else { | ||
1578 | clear_bit(HCI_RFKILLED, &hdev->dev_flags); | ||
1579 | } | ||
1573 | 1580 | ||
1574 | return 0; | 1581 | return 0; |
1575 | } | 1582 | } |
@@ -1591,9 +1598,13 @@ static void hci_power_on(struct work_struct *work) | |||
1591 | return; | 1598 | return; |
1592 | } | 1599 | } |
1593 | 1600 | ||
1594 | if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) | 1601 | if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { |
1602 | clear_bit(HCI_AUTO_OFF, &hdev->dev_flags); | ||
1603 | hci_dev_do_close(hdev); | ||
1604 | } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { | ||
1595 | queue_delayed_work(hdev->req_workqueue, &hdev->power_off, | 1605 | queue_delayed_work(hdev->req_workqueue, &hdev->power_off, |
1596 | HCI_AUTO_OFF_TIMEOUT); | 1606 | HCI_AUTO_OFF_TIMEOUT); |
1607 | } | ||
1597 | 1608 | ||
1598 | if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) | 1609 | if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) |
1599 | mgmt_index_added(hdev); | 1610 | mgmt_index_added(hdev); |
@@ -2209,6 +2220,9 @@ int hci_register_dev(struct hci_dev *hdev) | |||
2209 | } | 2220 | } |
2210 | } | 2221 | } |
2211 | 2222 | ||
2223 | if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) | ||
2224 | set_bit(HCI_RFKILLED, &hdev->dev_flags); | ||
2225 | |||
2212 | set_bit(HCI_SETUP, &hdev->dev_flags); | 2226 | set_bit(HCI_SETUP, &hdev->dev_flags); |
2213 | 2227 | ||
2214 | if (hdev->dev_type != HCI_AMP) | 2228 | if (hdev->dev_type != HCI_AMP) |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 94aab73f89d4..8db3e89fae35 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -3557,7 +3557,11 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
3557 | cp.handle = cpu_to_le16(conn->handle); | 3557 | cp.handle = cpu_to_le16(conn->handle); |
3558 | 3558 | ||
3559 | if (ltk->authenticated) | 3559 | if (ltk->authenticated) |
3560 | conn->sec_level = BT_SECURITY_HIGH; | 3560 | conn->pending_sec_level = BT_SECURITY_HIGH; |
3561 | else | ||
3562 | conn->pending_sec_level = BT_SECURITY_MEDIUM; | ||
3563 | |||
3564 | conn->enc_key_size = ltk->enc_size; | ||
3561 | 3565 | ||
3562 | hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); | 3566 | hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); |
3563 | 3567 | ||
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index b3bb7bca8e60..63fa11109a1c 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -3755,6 +3755,13 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, | |||
3755 | 3755 | ||
3756 | sk = chan->sk; | 3756 | sk = chan->sk; |
3757 | 3757 | ||
3758 | /* For certain devices (ex: HID mouse), support for authentication, | ||
3759 | * pairing and bonding is optional. For such devices, inorder to avoid | ||
3760 | * the ACL alive for too long after L2CAP disconnection, reset the ACL | ||
3761 | * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. | ||
3762 | */ | ||
3763 | conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
3764 | |||
3758 | bacpy(&bt_sk(sk)->src, conn->src); | 3765 | bacpy(&bt_sk(sk)->src, conn->src); |
3759 | bacpy(&bt_sk(sk)->dst, conn->dst); | 3766 | bacpy(&bt_sk(sk)->dst, conn->dst); |
3760 | chan->psm = psm; | 3767 | chan->psm = psm; |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 6d126faf145f..84fcf9fff3ea 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -569,7 +569,6 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) | |||
569 | static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) | 569 | static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) |
570 | { | 570 | { |
571 | struct rfcomm_dev *dev = dlc->owner; | 571 | struct rfcomm_dev *dev = dlc->owner; |
572 | struct tty_struct *tty; | ||
573 | if (!dev) | 572 | if (!dev) |
574 | return; | 573 | return; |
575 | 574 | ||
@@ -581,38 +580,8 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) | |||
581 | DPM_ORDER_DEV_AFTER_PARENT); | 580 | DPM_ORDER_DEV_AFTER_PARENT); |
582 | 581 | ||
583 | wake_up_interruptible(&dev->port.open_wait); | 582 | wake_up_interruptible(&dev->port.open_wait); |
584 | } else if (dlc->state == BT_CLOSED) { | 583 | } else if (dlc->state == BT_CLOSED) |
585 | tty = tty_port_tty_get(&dev->port); | 584 | tty_port_tty_hangup(&dev->port, false); |
586 | if (!tty) { | ||
587 | if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { | ||
588 | /* Drop DLC lock here to avoid deadlock | ||
589 | * 1. rfcomm_dev_get will take rfcomm_dev_lock | ||
590 | * but in rfcomm_dev_add there's lock order: | ||
591 | * rfcomm_dev_lock -> dlc lock | ||
592 | * 2. tty_port_put will deadlock if it's | ||
593 | * the last reference | ||
594 | * | ||
595 | * FIXME: when we release the lock anything | ||
596 | * could happen to dev, even its destruction | ||
597 | */ | ||
598 | rfcomm_dlc_unlock(dlc); | ||
599 | if (rfcomm_dev_get(dev->id) == NULL) { | ||
600 | rfcomm_dlc_lock(dlc); | ||
601 | return; | ||
602 | } | ||
603 | |||
604 | if (!test_and_set_bit(RFCOMM_TTY_RELEASED, | ||
605 | &dev->flags)) | ||
606 | tty_port_put(&dev->port); | ||
607 | |||
608 | tty_port_put(&dev->port); | ||
609 | rfcomm_dlc_lock(dlc); | ||
610 | } | ||
611 | } else { | ||
612 | tty_hangup(tty); | ||
613 | tty_kref_put(tty); | ||
614 | } | ||
615 | } | ||
616 | } | 585 | } |
617 | 586 | ||
618 | static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) | 587 | static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) |
diff --git a/net/core/dev.c b/net/core/dev.c index 5c713f2239cc..65f829cfd928 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -5247,10 +5247,12 @@ static int dev_new_index(struct net *net) | |||
5247 | 5247 | ||
5248 | /* Delayed registration/unregisteration */ | 5248 | /* Delayed registration/unregisteration */ |
5249 | static LIST_HEAD(net_todo_list); | 5249 | static LIST_HEAD(net_todo_list); |
5250 | static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); | ||
5250 | 5251 | ||
5251 | static void net_set_todo(struct net_device *dev) | 5252 | static void net_set_todo(struct net_device *dev) |
5252 | { | 5253 | { |
5253 | list_add_tail(&dev->todo_list, &net_todo_list); | 5254 | list_add_tail(&dev->todo_list, &net_todo_list); |
5255 | dev_net(dev)->dev_unreg_count++; | ||
5254 | } | 5256 | } |
5255 | 5257 | ||
5256 | static void rollback_registered_many(struct list_head *head) | 5258 | static void rollback_registered_many(struct list_head *head) |
@@ -5918,6 +5920,12 @@ void netdev_run_todo(void) | |||
5918 | if (dev->destructor) | 5920 | if (dev->destructor) |
5919 | dev->destructor(dev); | 5921 | dev->destructor(dev); |
5920 | 5922 | ||
5923 | /* Report a network device has been unregistered */ | ||
5924 | rtnl_lock(); | ||
5925 | dev_net(dev)->dev_unreg_count--; | ||
5926 | __rtnl_unlock(); | ||
5927 | wake_up(&netdev_unregistering_wq); | ||
5928 | |||
5921 | /* Free network device */ | 5929 | /* Free network device */ |
5922 | kobject_put(&dev->dev.kobj); | 5930 | kobject_put(&dev->dev.kobj); |
5923 | } | 5931 | } |
@@ -6603,6 +6611,34 @@ static void __net_exit default_device_exit(struct net *net) | |||
6603 | rtnl_unlock(); | 6611 | rtnl_unlock(); |
6604 | } | 6612 | } |
6605 | 6613 | ||
6614 | static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) | ||
6615 | { | ||
6616 | /* Return with the rtnl_lock held when there are no network | ||
6617 | * devices unregistering in any network namespace in net_list. | ||
6618 | */ | ||
6619 | struct net *net; | ||
6620 | bool unregistering; | ||
6621 | DEFINE_WAIT(wait); | ||
6622 | |||
6623 | for (;;) { | ||
6624 | prepare_to_wait(&netdev_unregistering_wq, &wait, | ||
6625 | TASK_UNINTERRUPTIBLE); | ||
6626 | unregistering = false; | ||
6627 | rtnl_lock(); | ||
6628 | list_for_each_entry(net, net_list, exit_list) { | ||
6629 | if (net->dev_unreg_count > 0) { | ||
6630 | unregistering = true; | ||
6631 | break; | ||
6632 | } | ||
6633 | } | ||
6634 | if (!unregistering) | ||
6635 | break; | ||
6636 | __rtnl_unlock(); | ||
6637 | schedule(); | ||
6638 | } | ||
6639 | finish_wait(&netdev_unregistering_wq, &wait); | ||
6640 | } | ||
6641 | |||
6606 | static void __net_exit default_device_exit_batch(struct list_head *net_list) | 6642 | static void __net_exit default_device_exit_batch(struct list_head *net_list) |
6607 | { | 6643 | { |
6608 | /* At exit all network devices most be removed from a network | 6644 | /* At exit all network devices most be removed from a network |
@@ -6614,7 +6650,18 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) | |||
6614 | struct net *net; | 6650 | struct net *net; |
6615 | LIST_HEAD(dev_kill_list); | 6651 | LIST_HEAD(dev_kill_list); |
6616 | 6652 | ||
6617 | rtnl_lock(); | 6653 | /* To prevent network device cleanup code from dereferencing |
6654 | * loopback devices or network devices that have been freed | ||
6655 | * wait here for all pending unregistrations to complete, | ||
6656 | * before unregistring the loopback device and allowing the | ||
6657 | * network namespace be freed. | ||
6658 | * | ||
6659 | * The netdev todo list containing all network devices | ||
6660 | * unregistrations that happen in default_device_exit_batch | ||
6661 | * will run in the rtnl_unlock() at the end of | ||
6662 | * default_device_exit_batch. | ||
6663 | */ | ||
6664 | rtnl_lock_unregistering(net_list); | ||
6618 | list_for_each_entry(net, net_list, exit_list) { | 6665 | list_for_each_entry(net, net_list, exit_list) { |
6619 | for_each_netdev_reverse(net, dev) { | 6666 | for_each_netdev_reverse(net, dev) { |
6620 | if (dev->rtnl_link_ops) | 6667 | if (dev->rtnl_link_ops) |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 1929af87b260..8d7d0dd72db2 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -154,8 +154,8 @@ ipv6: | |||
154 | if (poff >= 0) { | 154 | if (poff >= 0) { |
155 | __be32 *ports, _ports; | 155 | __be32 *ports, _ports; |
156 | 156 | ||
157 | nhoff += poff; | 157 | ports = skb_header_pointer(skb, nhoff + poff, |
158 | ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports); | 158 | sizeof(_ports), &_ports); |
159 | if (ports) | 159 | if (ports) |
160 | flow->ports = *ports; | 160 | flow->ports = *ports; |
161 | } | 161 | } |
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 6a2f13cee86a..3f1ec1586ae1 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c | |||
@@ -10,11 +10,24 @@ | |||
10 | 10 | ||
11 | #include <net/secure_seq.h> | 11 | #include <net/secure_seq.h> |
12 | 12 | ||
13 | static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; | 13 | #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) |
14 | 14 | ||
15 | void net_secret_init(void) | 15 | static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; |
16 | |||
17 | static void net_secret_init(void) | ||
16 | { | 18 | { |
17 | get_random_bytes(net_secret, sizeof(net_secret)); | 19 | u32 tmp; |
20 | int i; | ||
21 | |||
22 | if (likely(net_secret[0])) | ||
23 | return; | ||
24 | |||
25 | for (i = NET_SECRET_SIZE; i > 0;) { | ||
26 | do { | ||
27 | get_random_bytes(&tmp, sizeof(tmp)); | ||
28 | } while (!tmp); | ||
29 | cmpxchg(&net_secret[--i], 0, tmp); | ||
30 | } | ||
18 | } | 31 | } |
19 | 32 | ||
20 | #ifdef CONFIG_INET | 33 | #ifdef CONFIG_INET |
@@ -42,6 +55,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, | |||
42 | u32 hash[MD5_DIGEST_WORDS]; | 55 | u32 hash[MD5_DIGEST_WORDS]; |
43 | u32 i; | 56 | u32 i; |
44 | 57 | ||
58 | net_secret_init(); | ||
45 | memcpy(hash, saddr, 16); | 59 | memcpy(hash, saddr, 16); |
46 | for (i = 0; i < 4; i++) | 60 | for (i = 0; i < 4; i++) |
47 | secret[i] = net_secret[i] + (__force u32)daddr[i]; | 61 | secret[i] = net_secret[i] + (__force u32)daddr[i]; |
@@ -63,6 +77,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, | |||
63 | u32 hash[MD5_DIGEST_WORDS]; | 77 | u32 hash[MD5_DIGEST_WORDS]; |
64 | u32 i; | 78 | u32 i; |
65 | 79 | ||
80 | net_secret_init(); | ||
66 | memcpy(hash, saddr, 16); | 81 | memcpy(hash, saddr, 16); |
67 | for (i = 0; i < 4; i++) | 82 | for (i = 0; i < 4; i++) |
68 | secret[i] = net_secret[i] + (__force u32) daddr[i]; | 83 | secret[i] = net_secret[i] + (__force u32) daddr[i]; |
@@ -82,6 +97,7 @@ __u32 secure_ip_id(__be32 daddr) | |||
82 | { | 97 | { |
83 | u32 hash[MD5_DIGEST_WORDS]; | 98 | u32 hash[MD5_DIGEST_WORDS]; |
84 | 99 | ||
100 | net_secret_init(); | ||
85 | hash[0] = (__force __u32) daddr; | 101 | hash[0] = (__force __u32) daddr; |
86 | hash[1] = net_secret[13]; | 102 | hash[1] = net_secret[13]; |
87 | hash[2] = net_secret[14]; | 103 | hash[2] = net_secret[14]; |
@@ -96,6 +112,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4]) | |||
96 | { | 112 | { |
97 | __u32 hash[4]; | 113 | __u32 hash[4]; |
98 | 114 | ||
115 | net_secret_init(); | ||
99 | memcpy(hash, daddr, 16); | 116 | memcpy(hash, daddr, 16); |
100 | md5_transform(hash, net_secret); | 117 | md5_transform(hash, net_secret); |
101 | 118 | ||
@@ -107,6 +124,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
107 | { | 124 | { |
108 | u32 hash[MD5_DIGEST_WORDS]; | 125 | u32 hash[MD5_DIGEST_WORDS]; |
109 | 126 | ||
127 | net_secret_init(); | ||
110 | hash[0] = (__force u32)saddr; | 128 | hash[0] = (__force u32)saddr; |
111 | hash[1] = (__force u32)daddr; | 129 | hash[1] = (__force u32)daddr; |
112 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; | 130 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; |
@@ -121,6 +139,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) | |||
121 | { | 139 | { |
122 | u32 hash[MD5_DIGEST_WORDS]; | 140 | u32 hash[MD5_DIGEST_WORDS]; |
123 | 141 | ||
142 | net_secret_init(); | ||
124 | hash[0] = (__force u32)saddr; | 143 | hash[0] = (__force u32)saddr; |
125 | hash[1] = (__force u32)daddr; | 144 | hash[1] = (__force u32)daddr; |
126 | hash[2] = (__force u32)dport ^ net_secret[14]; | 145 | hash[2] = (__force u32)dport ^ net_secret[14]; |
@@ -140,6 +159,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, | |||
140 | u32 hash[MD5_DIGEST_WORDS]; | 159 | u32 hash[MD5_DIGEST_WORDS]; |
141 | u64 seq; | 160 | u64 seq; |
142 | 161 | ||
162 | net_secret_init(); | ||
143 | hash[0] = (__force u32)saddr; | 163 | hash[0] = (__force u32)saddr; |
144 | hash[1] = (__force u32)daddr; | 164 | hash[1] = (__force u32)daddr; |
145 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; | 165 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; |
@@ -164,6 +184,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, | |||
164 | u64 seq; | 184 | u64 seq; |
165 | u32 i; | 185 | u32 i; |
166 | 186 | ||
187 | net_secret_init(); | ||
167 | memcpy(hash, saddr, 16); | 188 | memcpy(hash, saddr, 16); |
168 | for (i = 0; i < 4; i++) | 189 | for (i = 0; i < 4; i++) |
169 | secret[i] = net_secret[i] + daddr[i]; | 190 | secret[i] = net_secret[i] + daddr[i]; |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 7a1874b7b8fd..cfeb85cff4f0 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -263,10 +263,8 @@ void build_ehash_secret(void) | |||
263 | get_random_bytes(&rnd, sizeof(rnd)); | 263 | get_random_bytes(&rnd, sizeof(rnd)); |
264 | } while (rnd == 0); | 264 | } while (rnd == 0); |
265 | 265 | ||
266 | if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) { | 266 | if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) |
267 | get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); | 267 | get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); |
268 | net_secret_init(); | ||
269 | } | ||
270 | } | 268 | } |
271 | EXPORT_SYMBOL(build_ehash_secret); | 269 | EXPORT_SYMBOL(build_ehash_secret); |
272 | 270 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index dace87f06e5f..7defdc9ba167 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -736,7 +736,7 @@ static void igmp_gq_timer_expire(unsigned long data) | |||
736 | 736 | ||
737 | in_dev->mr_gq_running = 0; | 737 | in_dev->mr_gq_running = 0; |
738 | igmpv3_send_report(in_dev, NULL); | 738 | igmpv3_send_report(in_dev, NULL); |
739 | __in_dev_put(in_dev); | 739 | in_dev_put(in_dev); |
740 | } | 740 | } |
741 | 741 | ||
742 | static void igmp_ifc_timer_expire(unsigned long data) | 742 | static void igmp_ifc_timer_expire(unsigned long data) |
@@ -749,7 +749,7 @@ static void igmp_ifc_timer_expire(unsigned long data) | |||
749 | igmp_ifc_start_timer(in_dev, | 749 | igmp_ifc_start_timer(in_dev, |
750 | unsolicited_report_interval(in_dev)); | 750 | unsolicited_report_interval(in_dev)); |
751 | } | 751 | } |
752 | __in_dev_put(in_dev); | 752 | in_dev_put(in_dev); |
753 | } | 753 | } |
754 | 754 | ||
755 | static void igmp_ifc_event(struct in_device *in_dev) | 755 | static void igmp_ifc_event(struct in_device *in_dev) |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index ac9fabe0300f..63a6d6d6b875 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -623,6 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
623 | tunnel->err_count = 0; | 623 | tunnel->err_count = 0; |
624 | } | 624 | } |
625 | 625 | ||
626 | tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); | ||
626 | ttl = tnl_params->ttl; | 627 | ttl = tnl_params->ttl; |
627 | if (ttl == 0) { | 628 | if (ttl == 0) { |
628 | if (skb->protocol == htons(ETH_P_IP)) | 629 | if (skb->protocol == htons(ETH_P_IP)) |
@@ -641,18 +642,17 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
641 | 642 | ||
642 | max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) | 643 | max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) |
643 | + rt->dst.header_len; | 644 | + rt->dst.header_len; |
644 | if (max_headroom > dev->needed_headroom) { | 645 | if (max_headroom > dev->needed_headroom) |
645 | dev->needed_headroom = max_headroom; | 646 | dev->needed_headroom = max_headroom; |
646 | if (skb_cow_head(skb, dev->needed_headroom)) { | 647 | |
647 | dev->stats.tx_dropped++; | 648 | if (skb_cow_head(skb, dev->needed_headroom)) { |
648 | dev_kfree_skb(skb); | 649 | dev->stats.tx_dropped++; |
649 | return; | 650 | dev_kfree_skb(skb); |
650 | } | 651 | return; |
651 | } | 652 | } |
652 | 653 | ||
653 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, | 654 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, |
654 | ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df, | 655 | tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); |
655 | !net_eq(tunnel->net, dev_net(dev))); | ||
656 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 656 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
657 | 657 | ||
658 | return; | 658 | return; |
@@ -853,8 +853,10 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, | |||
853 | /* FB netdevice is special: we have one, and only one per netns. | 853 | /* FB netdevice is special: we have one, and only one per netns. |
854 | * Allowing to move it to another netns is clearly unsafe. | 854 | * Allowing to move it to another netns is clearly unsafe. |
855 | */ | 855 | */ |
856 | if (!IS_ERR(itn->fb_tunnel_dev)) | 856 | if (!IS_ERR(itn->fb_tunnel_dev)) { |
857 | itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; | 857 | itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; |
858 | ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); | ||
859 | } | ||
858 | rtnl_unlock(); | 860 | rtnl_unlock(); |
859 | 861 | ||
860 | return PTR_RET(itn->fb_tunnel_dev); | 862 | return PTR_RET(itn->fb_tunnel_dev); |
@@ -884,8 +886,6 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head, | |||
884 | if (!net_eq(dev_net(t->dev), net)) | 886 | if (!net_eq(dev_net(t->dev), net)) |
885 | unregister_netdevice_queue(t->dev, head); | 887 | unregister_netdevice_queue(t->dev, head); |
886 | } | 888 | } |
887 | if (itn->fb_tunnel_dev) | ||
888 | unregister_netdevice_queue(itn->fb_tunnel_dev, head); | ||
889 | } | 889 | } |
890 | 890 | ||
891 | void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops) | 891 | void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops) |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index d6c856b17fd4..c31e3ad98ef2 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -61,7 +61,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, | |||
61 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | 61 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
62 | 62 | ||
63 | /* Push down and install the IP header. */ | 63 | /* Push down and install the IP header. */ |
64 | __skb_push(skb, sizeof(struct iphdr)); | 64 | skb_push(skb, sizeof(struct iphdr)); |
65 | skb_reset_network_header(skb); | 65 | skb_reset_network_header(skb); |
66 | 66 | ||
67 | iph = ip_hdr(skb); | 67 | iph = ip_hdr(skb); |
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 67e17dcda65e..b6346bf2fde3 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
@@ -267,7 +267,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) | |||
267 | if (th == NULL) | 267 | if (th == NULL) |
268 | return NF_DROP; | 268 | return NF_DROP; |
269 | 269 | ||
270 | synproxy_parse_options(skb, par->thoff, th, &opts); | 270 | if (!synproxy_parse_options(skb, par->thoff, th, &opts)) |
271 | return NF_DROP; | ||
271 | 272 | ||
272 | if (th->syn && !(th->ack || th->fin || th->rst)) { | 273 | if (th->syn && !(th->ack || th->fin || th->rst)) { |
273 | /* Initial SYN from client */ | 274 | /* Initial SYN from client */ |
@@ -350,7 +351,8 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum, | |||
350 | 351 | ||
351 | /* fall through */ | 352 | /* fall through */ |
352 | case TCP_CONNTRACK_SYN_SENT: | 353 | case TCP_CONNTRACK_SYN_SENT: |
353 | synproxy_parse_options(skb, thoff, th, &opts); | 354 | if (!synproxy_parse_options(skb, thoff, th, &opts)) |
355 | return NF_DROP; | ||
354 | 356 | ||
355 | if (!th->syn && th->ack && | 357 | if (!th->syn && th->ack && |
356 | CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { | 358 | CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { |
@@ -373,7 +375,9 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum, | |||
373 | if (!th->syn || !th->ack) | 375 | if (!th->syn || !th->ack) |
374 | break; | 376 | break; |
375 | 377 | ||
376 | synproxy_parse_options(skb, thoff, th, &opts); | 378 | if (!synproxy_parse_options(skb, thoff, th, &opts)) |
379 | return NF_DROP; | ||
380 | |||
377 | if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) | 381 | if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) |
378 | synproxy->tsoff = opts.tsval - synproxy->its; | 382 | synproxy->tsoff = opts.tsval - synproxy->its; |
379 | 383 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bfec521c717f..193db03540ad 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -218,8 +218,10 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) | |||
218 | 218 | ||
219 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) | 219 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) |
220 | ipv4_sk_update_pmtu(skb, sk, info); | 220 | ipv4_sk_update_pmtu(skb, sk, info); |
221 | else if (type == ICMP_REDIRECT) | 221 | else if (type == ICMP_REDIRECT) { |
222 | ipv4_sk_redirect(skb, sk); | 222 | ipv4_sk_redirect(skb, sk); |
223 | return; | ||
224 | } | ||
223 | 225 | ||
224 | /* Report error on raw socket, if: | 226 | /* Report error on raw socket, if: |
225 | 1. User requested ip_recverr. | 227 | 1. User requested ip_recverr. |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7c83cb8bf137..e6bb8256e59f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -895,8 +895,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
895 | 895 | ||
896 | skb_orphan(skb); | 896 | skb_orphan(skb); |
897 | skb->sk = sk; | 897 | skb->sk = sk; |
898 | skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? | 898 | skb->destructor = tcp_wfree; |
899 | tcp_wfree : sock_wfree; | ||
900 | atomic_add(skb->truesize, &sk->sk_wmem_alloc); | 899 | atomic_add(skb->truesize, &sk->sk_wmem_alloc); |
901 | 900 | ||
902 | /* Build TCP header and checksum it. */ | 901 | /* Build TCP header and checksum it. */ |
@@ -1840,7 +1839,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1840 | while ((skb = tcp_send_head(sk))) { | 1839 | while ((skb = tcp_send_head(sk))) { |
1841 | unsigned int limit; | 1840 | unsigned int limit; |
1842 | 1841 | ||
1843 | |||
1844 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); | 1842 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
1845 | BUG_ON(!tso_segs); | 1843 | BUG_ON(!tso_segs); |
1846 | 1844 | ||
@@ -1869,13 +1867,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1869 | break; | 1867 | break; |
1870 | } | 1868 | } |
1871 | 1869 | ||
1872 | /* TSQ : sk_wmem_alloc accounts skb truesize, | 1870 | /* TCP Small Queues : |
1873 | * including skb overhead. But thats OK. | 1871 | * Control number of packets in qdisc/devices to two packets / or ~1 ms. |
1872 | * This allows for : | ||
1873 | * - better RTT estimation and ACK scheduling | ||
1874 | * - faster recovery | ||
1875 | * - high rates | ||
1874 | */ | 1876 | */ |
1875 | if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) { | 1877 | limit = max(skb->truesize, sk->sk_pacing_rate >> 10); |
1878 | |||
1879 | if (atomic_read(&sk->sk_wmem_alloc) > limit) { | ||
1876 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); | 1880 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); |
1877 | break; | 1881 | break; |
1878 | } | 1882 | } |
1883 | |||
1879 | limit = mss_now; | 1884 | limit = mss_now; |
1880 | if (tso_segs > 1 && !tcp_urg_mode(tp)) | 1885 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
1881 | limit = tcp_mss_split_point(sk, skb, mss_now, | 1886 | limit = tcp_mss_split_point(sk, skb, mss_now, |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 74d2c95db57f..0ca44df51ee9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -658,7 +658,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
658 | break; | 658 | break; |
659 | case ICMP_REDIRECT: | 659 | case ICMP_REDIRECT: |
660 | ipv4_sk_redirect(skb, sk); | 660 | ipv4_sk_redirect(skb, sk); |
661 | break; | 661 | goto out; |
662 | } | 662 | } |
663 | 663 | ||
664 | /* | 664 | /* |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d6ff12617f36..cd3fb301da38 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1499,6 +1499,33 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | |||
1499 | return false; | 1499 | return false; |
1500 | } | 1500 | } |
1501 | 1501 | ||
1502 | /* Compares an address/prefix_len with addresses on device @dev. | ||
1503 | * If one is found it returns true. | ||
1504 | */ | ||
1505 | bool ipv6_chk_custom_prefix(const struct in6_addr *addr, | ||
1506 | const unsigned int prefix_len, struct net_device *dev) | ||
1507 | { | ||
1508 | struct inet6_dev *idev; | ||
1509 | struct inet6_ifaddr *ifa; | ||
1510 | bool ret = false; | ||
1511 | |||
1512 | rcu_read_lock(); | ||
1513 | idev = __in6_dev_get(dev); | ||
1514 | if (idev) { | ||
1515 | read_lock_bh(&idev->lock); | ||
1516 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | ||
1517 | ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len); | ||
1518 | if (ret) | ||
1519 | break; | ||
1520 | } | ||
1521 | read_unlock_bh(&idev->lock); | ||
1522 | } | ||
1523 | rcu_read_unlock(); | ||
1524 | |||
1525 | return ret; | ||
1526 | } | ||
1527 | EXPORT_SYMBOL(ipv6_chk_custom_prefix); | ||
1528 | |||
1502 | int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) | 1529 | int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) |
1503 | { | 1530 | { |
1504 | struct inet6_dev *idev; | 1531 | struct inet6_dev *idev; |
@@ -2193,43 +2220,21 @@ ok: | |||
2193 | else | 2220 | else |
2194 | stored_lft = 0; | 2221 | stored_lft = 0; |
2195 | if (!update_lft && !create && stored_lft) { | 2222 | if (!update_lft && !create && stored_lft) { |
2196 | if (valid_lft > MIN_VALID_LIFETIME || | 2223 | const u32 minimum_lft = min( |
2197 | valid_lft > stored_lft) | 2224 | stored_lft, (u32)MIN_VALID_LIFETIME); |
2198 | update_lft = 1; | 2225 | valid_lft = max(valid_lft, minimum_lft); |
2199 | else if (stored_lft <= MIN_VALID_LIFETIME) { | 2226 | |
2200 | /* valid_lft <= stored_lft is always true */ | 2227 | /* RFC4862 Section 5.5.3e: |
2201 | /* | 2228 | * "Note that the preferred lifetime of the |
2202 | * RFC 4862 Section 5.5.3e: | 2229 | * corresponding address is always reset to |
2203 | * "Note that the preferred lifetime of | 2230 | * the Preferred Lifetime in the received |
2204 | * the corresponding address is always | 2231 | * Prefix Information option, regardless of |
2205 | * reset to the Preferred Lifetime in | 2232 | * whether the valid lifetime is also reset or |
2206 | * the received Prefix Information | 2233 | * ignored." |
2207 | * option, regardless of whether the | 2234 | * |
2208 | * valid lifetime is also reset or | 2235 | * So we should always update prefered_lft here. |
2209 | * ignored." | 2236 | */ |
2210 | * | 2237 | update_lft = 1; |
2211 | * So if the preferred lifetime in | ||
2212 | * this advertisement is different | ||
2213 | * than what we have stored, but the | ||
2214 | * valid lifetime is invalid, just | ||
2215 | * reset prefered_lft. | ||
2216 | * | ||
2217 | * We must set the valid lifetime | ||
2218 | * to the stored lifetime since we'll | ||
2219 | * be updating the timestamp below, | ||
2220 | * else we'll set it back to the | ||
2221 | * minimum. | ||
2222 | */ | ||
2223 | if (prefered_lft != ifp->prefered_lft) { | ||
2224 | valid_lft = stored_lft; | ||
2225 | update_lft = 1; | ||
2226 | } | ||
2227 | } else { | ||
2228 | valid_lft = MIN_VALID_LIFETIME; | ||
2229 | if (valid_lft < prefered_lft) | ||
2230 | prefered_lft = valid_lft; | ||
2231 | update_lft = 1; | ||
2232 | } | ||
2233 | } | 2238 | } |
2234 | 2239 | ||
2235 | if (update_lft) { | 2240 | if (update_lft) { |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 6b26e9feafb9..7bb5446b9d73 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -618,7 +618,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
618 | struct ip6_tnl *tunnel = netdev_priv(dev); | 618 | struct ip6_tnl *tunnel = netdev_priv(dev); |
619 | struct net_device *tdev; /* Device to other host */ | 619 | struct net_device *tdev; /* Device to other host */ |
620 | struct ipv6hdr *ipv6h; /* Our new IP header */ | 620 | struct ipv6hdr *ipv6h; /* Our new IP header */ |
621 | unsigned int max_headroom; /* The extra header space needed */ | 621 | unsigned int max_headroom = 0; /* The extra header space needed */ |
622 | int gre_hlen; | 622 | int gre_hlen; |
623 | struct ipv6_tel_txoption opt; | 623 | struct ipv6_tel_txoption opt; |
624 | int mtu; | 624 | int mtu; |
@@ -693,7 +693,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
693 | 693 | ||
694 | skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); | 694 | skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); |
695 | 695 | ||
696 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; | 696 | max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; |
697 | 697 | ||
698 | if (skb_headroom(skb) < max_headroom || skb_shared(skb) || | 698 | if (skb_headroom(skb) < max_headroom || skb_shared(skb) || |
699 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { | 699 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 3a692d529163..a54c45ce4a48 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1015,6 +1015,8 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
1015 | * udp datagram | 1015 | * udp datagram |
1016 | */ | 1016 | */ |
1017 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { | 1017 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { |
1018 | struct frag_hdr fhdr; | ||
1019 | |||
1018 | skb = sock_alloc_send_skb(sk, | 1020 | skb = sock_alloc_send_skb(sk, |
1019 | hh_len + fragheaderlen + transhdrlen + 20, | 1021 | hh_len + fragheaderlen + transhdrlen + 20, |
1020 | (flags & MSG_DONTWAIT), &err); | 1022 | (flags & MSG_DONTWAIT), &err); |
@@ -1036,12 +1038,6 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
1036 | skb->protocol = htons(ETH_P_IPV6); | 1038 | skb->protocol = htons(ETH_P_IPV6); |
1037 | skb->ip_summed = CHECKSUM_PARTIAL; | 1039 | skb->ip_summed = CHECKSUM_PARTIAL; |
1038 | skb->csum = 0; | 1040 | skb->csum = 0; |
1039 | } | ||
1040 | |||
1041 | err = skb_append_datato_frags(sk,skb, getfrag, from, | ||
1042 | (length - transhdrlen)); | ||
1043 | if (!err) { | ||
1044 | struct frag_hdr fhdr; | ||
1045 | 1041 | ||
1046 | /* Specify the length of each IPv6 datagram fragment. | 1042 | /* Specify the length of each IPv6 datagram fragment. |
1047 | * It has to be a multiple of 8. | 1043 | * It has to be a multiple of 8. |
@@ -1052,15 +1048,10 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
1052 | ipv6_select_ident(&fhdr, rt); | 1048 | ipv6_select_ident(&fhdr, rt); |
1053 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; | 1049 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; |
1054 | __skb_queue_tail(&sk->sk_write_queue, skb); | 1050 | __skb_queue_tail(&sk->sk_write_queue, skb); |
1055 | |||
1056 | return 0; | ||
1057 | } | 1051 | } |
1058 | /* There is not enough support do UPD LSO, | ||
1059 | * so follow normal path | ||
1060 | */ | ||
1061 | kfree_skb(skb); | ||
1062 | 1052 | ||
1063 | return err; | 1053 | return skb_append_datato_frags(sk, skb, getfrag, from, |
1054 | (length - transhdrlen)); | ||
1064 | } | 1055 | } |
1065 | 1056 | ||
1066 | static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, | 1057 | static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, |
@@ -1227,27 +1218,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1227 | * --yoshfuji | 1218 | * --yoshfuji |
1228 | */ | 1219 | */ |
1229 | 1220 | ||
1230 | cork->length += length; | 1221 | if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP || |
1231 | if (length > mtu) { | 1222 | sk->sk_protocol == IPPROTO_RAW)) { |
1232 | int proto = sk->sk_protocol; | 1223 | ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); |
1233 | if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ | 1224 | return -EMSGSIZE; |
1234 | ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); | 1225 | } |
1235 | return -EMSGSIZE; | ||
1236 | } | ||
1237 | |||
1238 | if (proto == IPPROTO_UDP && | ||
1239 | (rt->dst.dev->features & NETIF_F_UFO)) { | ||
1240 | 1226 | ||
1241 | err = ip6_ufo_append_data(sk, getfrag, from, length, | 1227 | skb = skb_peek_tail(&sk->sk_write_queue); |
1242 | hh_len, fragheaderlen, | 1228 | cork->length += length; |
1243 | transhdrlen, mtu, flags, rt); | 1229 | if (((length > mtu) || |
1244 | if (err) | 1230 | (skb && skb_is_gso(skb))) && |
1245 | goto error; | 1231 | (sk->sk_protocol == IPPROTO_UDP) && |
1246 | return 0; | 1232 | (rt->dst.dev->features & NETIF_F_UFO)) { |
1247 | } | 1233 | err = ip6_ufo_append_data(sk, getfrag, from, length, |
1234 | hh_len, fragheaderlen, | ||
1235 | transhdrlen, mtu, flags, rt); | ||
1236 | if (err) | ||
1237 | goto error; | ||
1238 | return 0; | ||
1248 | } | 1239 | } |
1249 | 1240 | ||
1250 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) | 1241 | if (!skb) |
1251 | goto alloc_new_skb; | 1242 | goto alloc_new_skb; |
1252 | 1243 | ||
1253 | while (length > 0) { | 1244 | while (length > 0) { |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 2d8f4829575b..a791552e0422 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1731,8 +1731,6 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) | |||
1731 | } | 1731 | } |
1732 | } | 1732 | } |
1733 | 1733 | ||
1734 | t = rtnl_dereference(ip6n->tnls_wc[0]); | ||
1735 | unregister_netdevice_queue(t->dev, &list); | ||
1736 | unregister_netdevice_many(&list); | 1734 | unregister_netdevice_many(&list); |
1737 | } | 1735 | } |
1738 | 1736 | ||
@@ -1752,6 +1750,7 @@ static int __net_init ip6_tnl_init_net(struct net *net) | |||
1752 | if (!ip6n->fb_tnl_dev) | 1750 | if (!ip6n->fb_tnl_dev) |
1753 | goto err_alloc_dev; | 1751 | goto err_alloc_dev; |
1754 | dev_net_set(ip6n->fb_tnl_dev, net); | 1752 | dev_net_set(ip6n->fb_tnl_dev, net); |
1753 | ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; | ||
1755 | /* FB netdevice is special: we have one, and only one per netns. | 1754 | /* FB netdevice is special: we have one, and only one per netns. |
1756 | * Allowing to move it to another netns is clearly unsafe. | 1755 | * Allowing to move it to another netns is clearly unsafe. |
1757 | */ | 1756 | */ |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 096cd67b737c..d18f9f903db6 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -2034,7 +2034,7 @@ static void mld_dad_timer_expire(unsigned long data) | |||
2034 | if (idev->mc_dad_count) | 2034 | if (idev->mc_dad_count) |
2035 | mld_dad_start_timer(idev, idev->mc_maxdelay); | 2035 | mld_dad_start_timer(idev, idev->mc_maxdelay); |
2036 | } | 2036 | } |
2037 | __in6_dev_put(idev); | 2037 | in6_dev_put(idev); |
2038 | } | 2038 | } |
2039 | 2039 | ||
2040 | static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, | 2040 | static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, |
@@ -2379,7 +2379,7 @@ static void mld_gq_timer_expire(unsigned long data) | |||
2379 | 2379 | ||
2380 | idev->mc_gq_running = 0; | 2380 | idev->mc_gq_running = 0; |
2381 | mld_send_report(idev, NULL); | 2381 | mld_send_report(idev, NULL); |
2382 | __in6_dev_put(idev); | 2382 | in6_dev_put(idev); |
2383 | } | 2383 | } |
2384 | 2384 | ||
2385 | static void mld_ifc_timer_expire(unsigned long data) | 2385 | static void mld_ifc_timer_expire(unsigned long data) |
@@ -2392,7 +2392,7 @@ static void mld_ifc_timer_expire(unsigned long data) | |||
2392 | if (idev->mc_ifc_count) | 2392 | if (idev->mc_ifc_count) |
2393 | mld_ifc_start_timer(idev, idev->mc_maxdelay); | 2393 | mld_ifc_start_timer(idev, idev->mc_maxdelay); |
2394 | } | 2394 | } |
2395 | __in6_dev_put(idev); | 2395 | in6_dev_put(idev); |
2396 | } | 2396 | } |
2397 | 2397 | ||
2398 | static void mld_ifc_event(struct inet6_dev *idev) | 2398 | static void mld_ifc_event(struct inet6_dev *idev) |
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 19cfea8dbcaa..2748b042da72 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c | |||
@@ -282,7 +282,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) | |||
282 | if (th == NULL) | 282 | if (th == NULL) |
283 | return NF_DROP; | 283 | return NF_DROP; |
284 | 284 | ||
285 | synproxy_parse_options(skb, par->thoff, th, &opts); | 285 | if (!synproxy_parse_options(skb, par->thoff, th, &opts)) |
286 | return NF_DROP; | ||
286 | 287 | ||
287 | if (th->syn && !(th->ack || th->fin || th->rst)) { | 288 | if (th->syn && !(th->ack || th->fin || th->rst)) { |
288 | /* Initial SYN from client */ | 289 | /* Initial SYN from client */ |
@@ -372,7 +373,8 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum, | |||
372 | 373 | ||
373 | /* fall through */ | 374 | /* fall through */ |
374 | case TCP_CONNTRACK_SYN_SENT: | 375 | case TCP_CONNTRACK_SYN_SENT: |
375 | synproxy_parse_options(skb, thoff, th, &opts); | 376 | if (!synproxy_parse_options(skb, thoff, th, &opts)) |
377 | return NF_DROP; | ||
376 | 378 | ||
377 | if (!th->syn && th->ack && | 379 | if (!th->syn && th->ack && |
378 | CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { | 380 | CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { |
@@ -395,7 +397,9 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum, | |||
395 | if (!th->syn || !th->ack) | 397 | if (!th->syn || !th->ack) |
396 | break; | 398 | break; |
397 | 399 | ||
398 | synproxy_parse_options(skb, thoff, th, &opts); | 400 | if (!synproxy_parse_options(skb, thoff, th, &opts)) |
401 | return NF_DROP; | ||
402 | |||
399 | if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) | 403 | if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) |
400 | synproxy->tsoff = opts.tsval - synproxy->its; | 404 | synproxy->tsoff = opts.tsval - synproxy->its; |
401 | 405 | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 58916bbb1728..a4ed2416399e 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -335,8 +335,10 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb, | |||
335 | ip6_sk_update_pmtu(skb, sk, info); | 335 | ip6_sk_update_pmtu(skb, sk, info); |
336 | harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); | 336 | harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); |
337 | } | 337 | } |
338 | if (type == NDISC_REDIRECT) | 338 | if (type == NDISC_REDIRECT) { |
339 | ip6_sk_redirect(skb, sk); | 339 | ip6_sk_redirect(skb, sk); |
340 | return; | ||
341 | } | ||
340 | if (np->recverr) { | 342 | if (np->recverr) { |
341 | u8 *payload = skb->data; | 343 | u8 *payload = skb->data; |
342 | if (!inet->hdrincl) | 344 | if (!inet->hdrincl) |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 7ee5cb96db34..19269453a8ea 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -566,6 +566,70 @@ static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr, | |||
566 | return false; | 566 | return false; |
567 | } | 567 | } |
568 | 568 | ||
569 | /* Checks if an address matches an address on the tunnel interface. | ||
570 | * Used to detect the NAT of proto 41 packets and let them pass spoofing test. | ||
571 | * Long story: | ||
572 | * This function is called after we considered the packet as spoofed | ||
573 | * in is_spoofed_6rd. | ||
574 | * We may have a router that is doing NAT for proto 41 packets | ||
575 | * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb | ||
576 | * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd | ||
577 | * function will return true, dropping the packet. | ||
578 | * But, we can still check if is spoofed against the IP | ||
579 | * addresses associated with the interface. | ||
580 | */ | ||
581 | static bool only_dnatted(const struct ip_tunnel *tunnel, | ||
582 | const struct in6_addr *v6dst) | ||
583 | { | ||
584 | int prefix_len; | ||
585 | |||
586 | #ifdef CONFIG_IPV6_SIT_6RD | ||
587 | prefix_len = tunnel->ip6rd.prefixlen + 32 | ||
588 | - tunnel->ip6rd.relay_prefixlen; | ||
589 | #else | ||
590 | prefix_len = 48; | ||
591 | #endif | ||
592 | return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev); | ||
593 | } | ||
594 | |||
595 | /* Returns true if a packet is spoofed */ | ||
596 | static bool packet_is_spoofed(struct sk_buff *skb, | ||
597 | const struct iphdr *iph, | ||
598 | struct ip_tunnel *tunnel) | ||
599 | { | ||
600 | const struct ipv6hdr *ipv6h; | ||
601 | |||
602 | if (tunnel->dev->priv_flags & IFF_ISATAP) { | ||
603 | if (!isatap_chksrc(skb, iph, tunnel)) | ||
604 | return true; | ||
605 | |||
606 | return false; | ||
607 | } | ||
608 | |||
609 | if (tunnel->dev->flags & IFF_POINTOPOINT) | ||
610 | return false; | ||
611 | |||
612 | ipv6h = ipv6_hdr(skb); | ||
613 | |||
614 | if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) { | ||
615 | net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n", | ||
616 | &iph->saddr, &ipv6h->saddr, | ||
617 | &iph->daddr, &ipv6h->daddr); | ||
618 | return true; | ||
619 | } | ||
620 | |||
621 | if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr))) | ||
622 | return false; | ||
623 | |||
624 | if (only_dnatted(tunnel, &ipv6h->daddr)) | ||
625 | return false; | ||
626 | |||
627 | net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n", | ||
628 | &iph->saddr, &ipv6h->saddr, | ||
629 | &iph->daddr, &ipv6h->daddr); | ||
630 | return true; | ||
631 | } | ||
632 | |||
569 | static int ipip6_rcv(struct sk_buff *skb) | 633 | static int ipip6_rcv(struct sk_buff *skb) |
570 | { | 634 | { |
571 | const struct iphdr *iph = ip_hdr(skb); | 635 | const struct iphdr *iph = ip_hdr(skb); |
@@ -586,19 +650,9 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
586 | IPCB(skb)->flags = 0; | 650 | IPCB(skb)->flags = 0; |
587 | skb->protocol = htons(ETH_P_IPV6); | 651 | skb->protocol = htons(ETH_P_IPV6); |
588 | 652 | ||
589 | if (tunnel->dev->priv_flags & IFF_ISATAP) { | 653 | if (packet_is_spoofed(skb, iph, tunnel)) { |
590 | if (!isatap_chksrc(skb, iph, tunnel)) { | 654 | tunnel->dev->stats.rx_errors++; |
591 | tunnel->dev->stats.rx_errors++; | 655 | goto out; |
592 | goto out; | ||
593 | } | ||
594 | } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) { | ||
595 | if (is_spoofed_6rd(tunnel, iph->saddr, | ||
596 | &ipv6_hdr(skb)->saddr) || | ||
597 | is_spoofed_6rd(tunnel, iph->daddr, | ||
598 | &ipv6_hdr(skb)->daddr)) { | ||
599 | tunnel->dev->stats.rx_errors++; | ||
600 | goto out; | ||
601 | } | ||
602 | } | 656 | } |
603 | 657 | ||
604 | __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); | 658 | __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); |
@@ -748,7 +802,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
748 | neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); | 802 | neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); |
749 | 803 | ||
750 | if (neigh == NULL) { | 804 | if (neigh == NULL) { |
751 | net_dbg_ratelimited("sit: nexthop == NULL\n"); | 805 | net_dbg_ratelimited("nexthop == NULL\n"); |
752 | goto tx_error; | 806 | goto tx_error; |
753 | } | 807 | } |
754 | 808 | ||
@@ -777,7 +831,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
777 | neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); | 831 | neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); |
778 | 832 | ||
779 | if (neigh == NULL) { | 833 | if (neigh == NULL) { |
780 | net_dbg_ratelimited("sit: nexthop == NULL\n"); | 834 | net_dbg_ratelimited("nexthop == NULL\n"); |
781 | goto tx_error; | 835 | goto tx_error; |
782 | } | 836 | } |
783 | 837 | ||
@@ -1612,6 +1666,7 @@ static int __net_init sit_init_net(struct net *net) | |||
1612 | goto err_alloc_dev; | 1666 | goto err_alloc_dev; |
1613 | } | 1667 | } |
1614 | dev_net_set(sitn->fb_tunnel_dev, net); | 1668 | dev_net_set(sitn->fb_tunnel_dev, net); |
1669 | sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops; | ||
1615 | /* FB netdevice is special: we have one, and only one per netns. | 1670 | /* FB netdevice is special: we have one, and only one per netns. |
1616 | * Allowing to move it to another netns is clearly unsafe. | 1671 | * Allowing to move it to another netns is clearly unsafe. |
1617 | */ | 1672 | */ |
@@ -1646,7 +1701,6 @@ static void __net_exit sit_exit_net(struct net *net) | |||
1646 | 1701 | ||
1647 | rtnl_lock(); | 1702 | rtnl_lock(); |
1648 | sit_destroy_tunnels(sitn, &list); | 1703 | sit_destroy_tunnels(sitn, &list); |
1649 | unregister_netdevice_queue(sitn->fb_tunnel_dev, &list); | ||
1650 | unregister_netdevice_many(&list); | 1704 | unregister_netdevice_many(&list); |
1651 | rtnl_unlock(); | 1705 | rtnl_unlock(); |
1652 | } | 1706 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f4058150262b..72b7eaaf3ca0 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -525,8 +525,10 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
525 | 525 | ||
526 | if (type == ICMPV6_PKT_TOOBIG) | 526 | if (type == ICMPV6_PKT_TOOBIG) |
527 | ip6_sk_update_pmtu(skb, sk, info); | 527 | ip6_sk_update_pmtu(skb, sk, info); |
528 | if (type == NDISC_REDIRECT) | 528 | if (type == NDISC_REDIRECT) { |
529 | ip6_sk_redirect(skb, sk); | 529 | ip6_sk_redirect(skb, sk); |
530 | goto out; | ||
531 | } | ||
530 | 532 | ||
531 | np = inet6_sk(sk); | 533 | np = inet6_sk(sk); |
532 | 534 | ||
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c index 54563ad8aeb1..355cc3b6fa4d 100644 --- a/net/lapb/lapb_timer.c +++ b/net/lapb/lapb_timer.c | |||
@@ -154,6 +154,7 @@ static void lapb_t1timer_expiry(unsigned long param) | |||
154 | } else { | 154 | } else { |
155 | lapb->n2count++; | 155 | lapb->n2count++; |
156 | lapb_requeue_frames(lapb); | 156 | lapb_requeue_frames(lapb); |
157 | lapb_kick(lapb); | ||
157 | } | 158 | } |
158 | break; | 159 | break; |
159 | 160 | ||
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 4f69e83ff836..74fd00c27210 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -116,6 +116,7 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
116 | 116 | ||
117 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 117 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
118 | struct ip_vs_cpu_stats *s; | 118 | struct ip_vs_cpu_stats *s; |
119 | struct ip_vs_service *svc; | ||
119 | 120 | ||
120 | s = this_cpu_ptr(dest->stats.cpustats); | 121 | s = this_cpu_ptr(dest->stats.cpustats); |
121 | s->ustats.inpkts++; | 122 | s->ustats.inpkts++; |
@@ -123,11 +124,14 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
123 | s->ustats.inbytes += skb->len; | 124 | s->ustats.inbytes += skb->len; |
124 | u64_stats_update_end(&s->syncp); | 125 | u64_stats_update_end(&s->syncp); |
125 | 126 | ||
126 | s = this_cpu_ptr(dest->svc->stats.cpustats); | 127 | rcu_read_lock(); |
128 | svc = rcu_dereference(dest->svc); | ||
129 | s = this_cpu_ptr(svc->stats.cpustats); | ||
127 | s->ustats.inpkts++; | 130 | s->ustats.inpkts++; |
128 | u64_stats_update_begin(&s->syncp); | 131 | u64_stats_update_begin(&s->syncp); |
129 | s->ustats.inbytes += skb->len; | 132 | s->ustats.inbytes += skb->len; |
130 | u64_stats_update_end(&s->syncp); | 133 | u64_stats_update_end(&s->syncp); |
134 | rcu_read_unlock(); | ||
131 | 135 | ||
132 | s = this_cpu_ptr(ipvs->tot_stats.cpustats); | 136 | s = this_cpu_ptr(ipvs->tot_stats.cpustats); |
133 | s->ustats.inpkts++; | 137 | s->ustats.inpkts++; |
@@ -146,6 +150,7 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
146 | 150 | ||
147 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 151 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
148 | struct ip_vs_cpu_stats *s; | 152 | struct ip_vs_cpu_stats *s; |
153 | struct ip_vs_service *svc; | ||
149 | 154 | ||
150 | s = this_cpu_ptr(dest->stats.cpustats); | 155 | s = this_cpu_ptr(dest->stats.cpustats); |
151 | s->ustats.outpkts++; | 156 | s->ustats.outpkts++; |
@@ -153,11 +158,14 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
153 | s->ustats.outbytes += skb->len; | 158 | s->ustats.outbytes += skb->len; |
154 | u64_stats_update_end(&s->syncp); | 159 | u64_stats_update_end(&s->syncp); |
155 | 160 | ||
156 | s = this_cpu_ptr(dest->svc->stats.cpustats); | 161 | rcu_read_lock(); |
162 | svc = rcu_dereference(dest->svc); | ||
163 | s = this_cpu_ptr(svc->stats.cpustats); | ||
157 | s->ustats.outpkts++; | 164 | s->ustats.outpkts++; |
158 | u64_stats_update_begin(&s->syncp); | 165 | u64_stats_update_begin(&s->syncp); |
159 | s->ustats.outbytes += skb->len; | 166 | s->ustats.outbytes += skb->len; |
160 | u64_stats_update_end(&s->syncp); | 167 | u64_stats_update_end(&s->syncp); |
168 | rcu_read_unlock(); | ||
161 | 169 | ||
162 | s = this_cpu_ptr(ipvs->tot_stats.cpustats); | 170 | s = this_cpu_ptr(ipvs->tot_stats.cpustats); |
163 | s->ustats.outpkts++; | 171 | s->ustats.outpkts++; |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c8148e487386..a3df9bddc4f7 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -460,7 +460,7 @@ static inline void | |||
460 | __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) | 460 | __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) |
461 | { | 461 | { |
462 | atomic_inc(&svc->refcnt); | 462 | atomic_inc(&svc->refcnt); |
463 | dest->svc = svc; | 463 | rcu_assign_pointer(dest->svc, svc); |
464 | } | 464 | } |
465 | 465 | ||
466 | static void ip_vs_service_free(struct ip_vs_service *svc) | 466 | static void ip_vs_service_free(struct ip_vs_service *svc) |
@@ -470,18 +470,25 @@ static void ip_vs_service_free(struct ip_vs_service *svc) | |||
470 | kfree(svc); | 470 | kfree(svc); |
471 | } | 471 | } |
472 | 472 | ||
473 | static void | 473 | static void ip_vs_service_rcu_free(struct rcu_head *head) |
474 | __ip_vs_unbind_svc(struct ip_vs_dest *dest) | ||
475 | { | 474 | { |
476 | struct ip_vs_service *svc = dest->svc; | 475 | struct ip_vs_service *svc; |
476 | |||
477 | svc = container_of(head, struct ip_vs_service, rcu_head); | ||
478 | ip_vs_service_free(svc); | ||
479 | } | ||
477 | 480 | ||
478 | dest->svc = NULL; | 481 | static void __ip_vs_svc_put(struct ip_vs_service *svc, bool do_delay) |
482 | { | ||
479 | if (atomic_dec_and_test(&svc->refcnt)) { | 483 | if (atomic_dec_and_test(&svc->refcnt)) { |
480 | IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", | 484 | IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", |
481 | svc->fwmark, | 485 | svc->fwmark, |
482 | IP_VS_DBG_ADDR(svc->af, &svc->addr), | 486 | IP_VS_DBG_ADDR(svc->af, &svc->addr), |
483 | ntohs(svc->port)); | 487 | ntohs(svc->port)); |
484 | ip_vs_service_free(svc); | 488 | if (do_delay) |
489 | call_rcu(&svc->rcu_head, ip_vs_service_rcu_free); | ||
490 | else | ||
491 | ip_vs_service_free(svc); | ||
485 | } | 492 | } |
486 | } | 493 | } |
487 | 494 | ||
@@ -667,11 +674,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, | |||
667 | IP_VS_DBG_ADDR(svc->af, &dest->addr), | 674 | IP_VS_DBG_ADDR(svc->af, &dest->addr), |
668 | ntohs(dest->port), | 675 | ntohs(dest->port), |
669 | atomic_read(&dest->refcnt)); | 676 | atomic_read(&dest->refcnt)); |
670 | /* We can not reuse dest while in grace period | ||
671 | * because conns still can use dest->svc | ||
672 | */ | ||
673 | if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state)) | ||
674 | continue; | ||
675 | if (dest->af == svc->af && | 677 | if (dest->af == svc->af && |
676 | ip_vs_addr_equal(svc->af, &dest->addr, daddr) && | 678 | ip_vs_addr_equal(svc->af, &dest->addr, daddr) && |
677 | dest->port == dport && | 679 | dest->port == dport && |
@@ -697,8 +699,10 @@ out: | |||
697 | 699 | ||
698 | static void ip_vs_dest_free(struct ip_vs_dest *dest) | 700 | static void ip_vs_dest_free(struct ip_vs_dest *dest) |
699 | { | 701 | { |
702 | struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1); | ||
703 | |||
700 | __ip_vs_dst_cache_reset(dest); | 704 | __ip_vs_dst_cache_reset(dest); |
701 | __ip_vs_unbind_svc(dest); | 705 | __ip_vs_svc_put(svc, false); |
702 | free_percpu(dest->stats.cpustats); | 706 | free_percpu(dest->stats.cpustats); |
703 | kfree(dest); | 707 | kfree(dest); |
704 | } | 708 | } |
@@ -771,6 +775,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, | |||
771 | struct ip_vs_dest_user_kern *udest, int add) | 775 | struct ip_vs_dest_user_kern *udest, int add) |
772 | { | 776 | { |
773 | struct netns_ipvs *ipvs = net_ipvs(svc->net); | 777 | struct netns_ipvs *ipvs = net_ipvs(svc->net); |
778 | struct ip_vs_service *old_svc; | ||
774 | struct ip_vs_scheduler *sched; | 779 | struct ip_vs_scheduler *sched; |
775 | int conn_flags; | 780 | int conn_flags; |
776 | 781 | ||
@@ -792,13 +797,14 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, | |||
792 | atomic_set(&dest->conn_flags, conn_flags); | 797 | atomic_set(&dest->conn_flags, conn_flags); |
793 | 798 | ||
794 | /* bind the service */ | 799 | /* bind the service */ |
795 | if (!dest->svc) { | 800 | old_svc = rcu_dereference_protected(dest->svc, 1); |
801 | if (!old_svc) { | ||
796 | __ip_vs_bind_svc(dest, svc); | 802 | __ip_vs_bind_svc(dest, svc); |
797 | } else { | 803 | } else { |
798 | if (dest->svc != svc) { | 804 | if (old_svc != svc) { |
799 | __ip_vs_unbind_svc(dest); | ||
800 | ip_vs_zero_stats(&dest->stats); | 805 | ip_vs_zero_stats(&dest->stats); |
801 | __ip_vs_bind_svc(dest, svc); | 806 | __ip_vs_bind_svc(dest, svc); |
807 | __ip_vs_svc_put(old_svc, true); | ||
802 | } | 808 | } |
803 | } | 809 | } |
804 | 810 | ||
@@ -998,16 +1004,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) | |||
998 | return 0; | 1004 | return 0; |
999 | } | 1005 | } |
1000 | 1006 | ||
1001 | static void ip_vs_dest_wait_readers(struct rcu_head *head) | ||
1002 | { | ||
1003 | struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest, | ||
1004 | rcu_head); | ||
1005 | |||
1006 | /* End of grace period after unlinking */ | ||
1007 | clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state); | ||
1008 | } | ||
1009 | |||
1010 | |||
1011 | /* | 1007 | /* |
1012 | * Delete a destination (must be already unlinked from the service) | 1008 | * Delete a destination (must be already unlinked from the service) |
1013 | */ | 1009 | */ |
@@ -1023,20 +1019,16 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest, | |||
1023 | */ | 1019 | */ |
1024 | ip_vs_rs_unhash(dest); | 1020 | ip_vs_rs_unhash(dest); |
1025 | 1021 | ||
1026 | if (!cleanup) { | ||
1027 | set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state); | ||
1028 | call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers); | ||
1029 | } | ||
1030 | |||
1031 | spin_lock_bh(&ipvs->dest_trash_lock); | 1022 | spin_lock_bh(&ipvs->dest_trash_lock); |
1032 | IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", | 1023 | IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", |
1033 | IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), | 1024 | IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), |
1034 | atomic_read(&dest->refcnt)); | 1025 | atomic_read(&dest->refcnt)); |
1035 | if (list_empty(&ipvs->dest_trash) && !cleanup) | 1026 | if (list_empty(&ipvs->dest_trash) && !cleanup) |
1036 | mod_timer(&ipvs->dest_trash_timer, | 1027 | mod_timer(&ipvs->dest_trash_timer, |
1037 | jiffies + IP_VS_DEST_TRASH_PERIOD); | 1028 | jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); |
1038 | /* dest lives in trash without reference */ | 1029 | /* dest lives in trash without reference */ |
1039 | list_add(&dest->t_list, &ipvs->dest_trash); | 1030 | list_add(&dest->t_list, &ipvs->dest_trash); |
1031 | dest->idle_start = 0; | ||
1040 | spin_unlock_bh(&ipvs->dest_trash_lock); | 1032 | spin_unlock_bh(&ipvs->dest_trash_lock); |
1041 | ip_vs_dest_put(dest); | 1033 | ip_vs_dest_put(dest); |
1042 | } | 1034 | } |
@@ -1108,24 +1100,30 @@ static void ip_vs_dest_trash_expire(unsigned long data) | |||
1108 | struct net *net = (struct net *) data; | 1100 | struct net *net = (struct net *) data; |
1109 | struct netns_ipvs *ipvs = net_ipvs(net); | 1101 | struct netns_ipvs *ipvs = net_ipvs(net); |
1110 | struct ip_vs_dest *dest, *next; | 1102 | struct ip_vs_dest *dest, *next; |
1103 | unsigned long now = jiffies; | ||
1111 | 1104 | ||
1112 | spin_lock(&ipvs->dest_trash_lock); | 1105 | spin_lock(&ipvs->dest_trash_lock); |
1113 | list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { | 1106 | list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { |
1114 | /* Skip if dest is in grace period */ | ||
1115 | if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state)) | ||
1116 | continue; | ||
1117 | if (atomic_read(&dest->refcnt) > 0) | 1107 | if (atomic_read(&dest->refcnt) > 0) |
1118 | continue; | 1108 | continue; |
1109 | if (dest->idle_start) { | ||
1110 | if (time_before(now, dest->idle_start + | ||
1111 | IP_VS_DEST_TRASH_PERIOD)) | ||
1112 | continue; | ||
1113 | } else { | ||
1114 | dest->idle_start = max(1UL, now); | ||
1115 | continue; | ||
1116 | } | ||
1119 | IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n", | 1117 | IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n", |
1120 | dest->vfwmark, | 1118 | dest->vfwmark, |
1121 | IP_VS_DBG_ADDR(dest->svc->af, &dest->addr), | 1119 | IP_VS_DBG_ADDR(dest->af, &dest->addr), |
1122 | ntohs(dest->port)); | 1120 | ntohs(dest->port)); |
1123 | list_del(&dest->t_list); | 1121 | list_del(&dest->t_list); |
1124 | ip_vs_dest_free(dest); | 1122 | ip_vs_dest_free(dest); |
1125 | } | 1123 | } |
1126 | if (!list_empty(&ipvs->dest_trash)) | 1124 | if (!list_empty(&ipvs->dest_trash)) |
1127 | mod_timer(&ipvs->dest_trash_timer, | 1125 | mod_timer(&ipvs->dest_trash_timer, |
1128 | jiffies + IP_VS_DEST_TRASH_PERIOD); | 1126 | jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); |
1129 | spin_unlock(&ipvs->dest_trash_lock); | 1127 | spin_unlock(&ipvs->dest_trash_lock); |
1130 | } | 1128 | } |
1131 | 1129 | ||
@@ -1320,14 +1318,6 @@ out: | |||
1320 | return ret; | 1318 | return ret; |
1321 | } | 1319 | } |
1322 | 1320 | ||
1323 | static void ip_vs_service_rcu_free(struct rcu_head *head) | ||
1324 | { | ||
1325 | struct ip_vs_service *svc; | ||
1326 | |||
1327 | svc = container_of(head, struct ip_vs_service, rcu_head); | ||
1328 | ip_vs_service_free(svc); | ||
1329 | } | ||
1330 | |||
1331 | /* | 1321 | /* |
1332 | * Delete a service from the service list | 1322 | * Delete a service from the service list |
1333 | * - The service must be unlinked, unlocked and not referenced! | 1323 | * - The service must be unlinked, unlocked and not referenced! |
@@ -1376,13 +1366,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup) | |||
1376 | /* | 1366 | /* |
1377 | * Free the service if nobody refers to it | 1367 | * Free the service if nobody refers to it |
1378 | */ | 1368 | */ |
1379 | if (atomic_dec_and_test(&svc->refcnt)) { | 1369 | __ip_vs_svc_put(svc, true); |
1380 | IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", | ||
1381 | svc->fwmark, | ||
1382 | IP_VS_DBG_ADDR(svc->af, &svc->addr), | ||
1383 | ntohs(svc->port)); | ||
1384 | call_rcu(&svc->rcu_head, ip_vs_service_rcu_free); | ||
1385 | } | ||
1386 | 1370 | ||
1387 | /* decrease the module use count */ | 1371 | /* decrease the module use count */ |
1388 | ip_vs_use_count_dec(); | 1372 | ip_vs_use_count_dec(); |
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 6bee6d0c73a5..1425e9a924c4 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c | |||
@@ -59,12 +59,13 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum, | |||
59 | struct ip_vs_cpu_stats __percpu *stats) | 59 | struct ip_vs_cpu_stats __percpu *stats) |
60 | { | 60 | { |
61 | int i; | 61 | int i; |
62 | bool add = false; | ||
62 | 63 | ||
63 | for_each_possible_cpu(i) { | 64 | for_each_possible_cpu(i) { |
64 | struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); | 65 | struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); |
65 | unsigned int start; | 66 | unsigned int start; |
66 | __u64 inbytes, outbytes; | 67 | __u64 inbytes, outbytes; |
67 | if (i) { | 68 | if (add) { |
68 | sum->conns += s->ustats.conns; | 69 | sum->conns += s->ustats.conns; |
69 | sum->inpkts += s->ustats.inpkts; | 70 | sum->inpkts += s->ustats.inpkts; |
70 | sum->outpkts += s->ustats.outpkts; | 71 | sum->outpkts += s->ustats.outpkts; |
@@ -76,6 +77,7 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum, | |||
76 | sum->inbytes += inbytes; | 77 | sum->inbytes += inbytes; |
77 | sum->outbytes += outbytes; | 78 | sum->outbytes += outbytes; |
78 | } else { | 79 | } else { |
80 | add = true; | ||
79 | sum->conns = s->ustats.conns; | 81 | sum->conns = s->ustats.conns; |
80 | sum->inpkts = s->ustats.inpkts; | 82 | sum->inpkts = s->ustats.inpkts; |
81 | sum->outpkts = s->ustats.outpkts; | 83 | sum->outpkts = s->ustats.outpkts; |
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 1383b0eadc0e..eff13c94498e 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
@@ -93,7 +93,7 @@ struct ip_vs_lblc_entry { | |||
93 | struct hlist_node list; | 93 | struct hlist_node list; |
94 | int af; /* address family */ | 94 | int af; /* address family */ |
95 | union nf_inet_addr addr; /* destination IP address */ | 95 | union nf_inet_addr addr; /* destination IP address */ |
96 | struct ip_vs_dest __rcu *dest; /* real server (cache) */ | 96 | struct ip_vs_dest *dest; /* real server (cache) */ |
97 | unsigned long lastuse; /* last used time */ | 97 | unsigned long lastuse; /* last used time */ |
98 | struct rcu_head rcu_head; | 98 | struct rcu_head rcu_head; |
99 | }; | 99 | }; |
@@ -130,20 +130,21 @@ static struct ctl_table vs_vars_table[] = { | |||
130 | }; | 130 | }; |
131 | #endif | 131 | #endif |
132 | 132 | ||
133 | static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) | 133 | static void ip_vs_lblc_rcu_free(struct rcu_head *head) |
134 | { | 134 | { |
135 | struct ip_vs_dest *dest; | 135 | struct ip_vs_lblc_entry *en = container_of(head, |
136 | struct ip_vs_lblc_entry, | ||
137 | rcu_head); | ||
136 | 138 | ||
137 | hlist_del_rcu(&en->list); | 139 | ip_vs_dest_put(en->dest); |
138 | /* | 140 | kfree(en); |
139 | * We don't kfree dest because it is referred either by its service | ||
140 | * or the trash dest list. | ||
141 | */ | ||
142 | dest = rcu_dereference_protected(en->dest, 1); | ||
143 | ip_vs_dest_put(dest); | ||
144 | kfree_rcu(en, rcu_head); | ||
145 | } | 141 | } |
146 | 142 | ||
143 | static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en) | ||
144 | { | ||
145 | hlist_del_rcu(&en->list); | ||
146 | call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free); | ||
147 | } | ||
147 | 148 | ||
148 | /* | 149 | /* |
149 | * Returns hash value for IPVS LBLC entry | 150 | * Returns hash value for IPVS LBLC entry |
@@ -203,30 +204,23 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, | |||
203 | struct ip_vs_lblc_entry *en; | 204 | struct ip_vs_lblc_entry *en; |
204 | 205 | ||
205 | en = ip_vs_lblc_get(dest->af, tbl, daddr); | 206 | en = ip_vs_lblc_get(dest->af, tbl, daddr); |
206 | if (!en) { | 207 | if (en) { |
207 | en = kmalloc(sizeof(*en), GFP_ATOMIC); | 208 | if (en->dest == dest) |
208 | if (!en) | 209 | return en; |
209 | return NULL; | 210 | ip_vs_lblc_del(en); |
210 | 211 | } | |
211 | en->af = dest->af; | 212 | en = kmalloc(sizeof(*en), GFP_ATOMIC); |
212 | ip_vs_addr_copy(dest->af, &en->addr, daddr); | 213 | if (!en) |
213 | en->lastuse = jiffies; | 214 | return NULL; |
214 | 215 | ||
215 | ip_vs_dest_hold(dest); | 216 | en->af = dest->af; |
216 | RCU_INIT_POINTER(en->dest, dest); | 217 | ip_vs_addr_copy(dest->af, &en->addr, daddr); |
218 | en->lastuse = jiffies; | ||
217 | 219 | ||
218 | ip_vs_lblc_hash(tbl, en); | 220 | ip_vs_dest_hold(dest); |
219 | } else { | 221 | en->dest = dest; |
220 | struct ip_vs_dest *old_dest; | ||
221 | 222 | ||
222 | old_dest = rcu_dereference_protected(en->dest, 1); | 223 | ip_vs_lblc_hash(tbl, en); |
223 | if (old_dest != dest) { | ||
224 | ip_vs_dest_put(old_dest); | ||
225 | ip_vs_dest_hold(dest); | ||
226 | /* No ordering constraints for refcnt */ | ||
227 | RCU_INIT_POINTER(en->dest, dest); | ||
228 | } | ||
229 | } | ||
230 | 224 | ||
231 | return en; | 225 | return en; |
232 | } | 226 | } |
@@ -246,7 +240,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc) | |||
246 | tbl->dead = 1; | 240 | tbl->dead = 1; |
247 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { | 241 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { |
248 | hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { | 242 | hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { |
249 | ip_vs_lblc_free(en); | 243 | ip_vs_lblc_del(en); |
250 | atomic_dec(&tbl->entries); | 244 | atomic_dec(&tbl->entries); |
251 | } | 245 | } |
252 | } | 246 | } |
@@ -281,7 +275,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) | |||
281 | sysctl_lblc_expiration(svc))) | 275 | sysctl_lblc_expiration(svc))) |
282 | continue; | 276 | continue; |
283 | 277 | ||
284 | ip_vs_lblc_free(en); | 278 | ip_vs_lblc_del(en); |
285 | atomic_dec(&tbl->entries); | 279 | atomic_dec(&tbl->entries); |
286 | } | 280 | } |
287 | spin_unlock(&svc->sched_lock); | 281 | spin_unlock(&svc->sched_lock); |
@@ -335,7 +329,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) | |||
335 | if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) | 329 | if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) |
336 | continue; | 330 | continue; |
337 | 331 | ||
338 | ip_vs_lblc_free(en); | 332 | ip_vs_lblc_del(en); |
339 | atomic_dec(&tbl->entries); | 333 | atomic_dec(&tbl->entries); |
340 | goal--; | 334 | goal--; |
341 | } | 335 | } |
@@ -443,8 +437,8 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) | |||
443 | continue; | 437 | continue; |
444 | 438 | ||
445 | doh = ip_vs_dest_conn_overhead(dest); | 439 | doh = ip_vs_dest_conn_overhead(dest); |
446 | if (loh * atomic_read(&dest->weight) > | 440 | if ((__s64)loh * atomic_read(&dest->weight) > |
447 | doh * atomic_read(&least->weight)) { | 441 | (__s64)doh * atomic_read(&least->weight)) { |
448 | least = dest; | 442 | least = dest; |
449 | loh = doh; | 443 | loh = doh; |
450 | } | 444 | } |
@@ -511,7 +505,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
511 | * free up entries from the trash at any time. | 505 | * free up entries from the trash at any time. |
512 | */ | 506 | */ |
513 | 507 | ||
514 | dest = rcu_dereference(en->dest); | 508 | dest = en->dest; |
515 | if ((dest->flags & IP_VS_DEST_F_AVAILABLE) && | 509 | if ((dest->flags & IP_VS_DEST_F_AVAILABLE) && |
516 | atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) | 510 | atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) |
517 | goto out; | 511 | goto out; |
@@ -631,7 +625,7 @@ static void __exit ip_vs_lblc_cleanup(void) | |||
631 | { | 625 | { |
632 | unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler); | 626 | unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler); |
633 | unregister_pernet_subsys(&ip_vs_lblc_ops); | 627 | unregister_pernet_subsys(&ip_vs_lblc_ops); |
634 | synchronize_rcu(); | 628 | rcu_barrier(); |
635 | } | 629 | } |
636 | 630 | ||
637 | 631 | ||
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 5199448697f6..0b8550089a2e 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
@@ -89,7 +89,7 @@ | |||
89 | */ | 89 | */ |
90 | struct ip_vs_dest_set_elem { | 90 | struct ip_vs_dest_set_elem { |
91 | struct list_head list; /* list link */ | 91 | struct list_head list; /* list link */ |
92 | struct ip_vs_dest __rcu *dest; /* destination server */ | 92 | struct ip_vs_dest *dest; /* destination server */ |
93 | struct rcu_head rcu_head; | 93 | struct rcu_head rcu_head; |
94 | }; | 94 | }; |
95 | 95 | ||
@@ -107,11 +107,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set, | |||
107 | 107 | ||
108 | if (check) { | 108 | if (check) { |
109 | list_for_each_entry(e, &set->list, list) { | 109 | list_for_each_entry(e, &set->list, list) { |
110 | struct ip_vs_dest *d; | 110 | if (e->dest == dest) |
111 | |||
112 | d = rcu_dereference_protected(e->dest, 1); | ||
113 | if (d == dest) | ||
114 | /* already existed */ | ||
115 | return; | 111 | return; |
116 | } | 112 | } |
117 | } | 113 | } |
@@ -121,7 +117,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set, | |||
121 | return; | 117 | return; |
122 | 118 | ||
123 | ip_vs_dest_hold(dest); | 119 | ip_vs_dest_hold(dest); |
124 | RCU_INIT_POINTER(e->dest, dest); | 120 | e->dest = dest; |
125 | 121 | ||
126 | list_add_rcu(&e->list, &set->list); | 122 | list_add_rcu(&e->list, &set->list); |
127 | atomic_inc(&set->size); | 123 | atomic_inc(&set->size); |
@@ -129,22 +125,27 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set, | |||
129 | set->lastmod = jiffies; | 125 | set->lastmod = jiffies; |
130 | } | 126 | } |
131 | 127 | ||
128 | static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head) | ||
129 | { | ||
130 | struct ip_vs_dest_set_elem *e; | ||
131 | |||
132 | e = container_of(head, struct ip_vs_dest_set_elem, rcu_head); | ||
133 | ip_vs_dest_put(e->dest); | ||
134 | kfree(e); | ||
135 | } | ||
136 | |||
132 | static void | 137 | static void |
133 | ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | 138 | ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) |
134 | { | 139 | { |
135 | struct ip_vs_dest_set_elem *e; | 140 | struct ip_vs_dest_set_elem *e; |
136 | 141 | ||
137 | list_for_each_entry(e, &set->list, list) { | 142 | list_for_each_entry(e, &set->list, list) { |
138 | struct ip_vs_dest *d; | 143 | if (e->dest == dest) { |
139 | |||
140 | d = rcu_dereference_protected(e->dest, 1); | ||
141 | if (d == dest) { | ||
142 | /* HIT */ | 144 | /* HIT */ |
143 | atomic_dec(&set->size); | 145 | atomic_dec(&set->size); |
144 | set->lastmod = jiffies; | 146 | set->lastmod = jiffies; |
145 | ip_vs_dest_put(dest); | ||
146 | list_del_rcu(&e->list); | 147 | list_del_rcu(&e->list); |
147 | kfree_rcu(e, rcu_head); | 148 | call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free); |
148 | break; | 149 | break; |
149 | } | 150 | } |
150 | } | 151 | } |
@@ -155,16 +156,8 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) | |||
155 | struct ip_vs_dest_set_elem *e, *ep; | 156 | struct ip_vs_dest_set_elem *e, *ep; |
156 | 157 | ||
157 | list_for_each_entry_safe(e, ep, &set->list, list) { | 158 | list_for_each_entry_safe(e, ep, &set->list, list) { |
158 | struct ip_vs_dest *d; | ||
159 | |||
160 | d = rcu_dereference_protected(e->dest, 1); | ||
161 | /* | ||
162 | * We don't kfree dest because it is referred either | ||
163 | * by its service or by the trash dest list. | ||
164 | */ | ||
165 | ip_vs_dest_put(d); | ||
166 | list_del_rcu(&e->list); | 159 | list_del_rcu(&e->list); |
167 | kfree_rcu(e, rcu_head); | 160 | call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free); |
168 | } | 161 | } |
169 | } | 162 | } |
170 | 163 | ||
@@ -175,12 +168,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
175 | struct ip_vs_dest *dest, *least; | 168 | struct ip_vs_dest *dest, *least; |
176 | int loh, doh; | 169 | int loh, doh; |
177 | 170 | ||
178 | if (set == NULL) | ||
179 | return NULL; | ||
180 | |||
181 | /* select the first destination server, whose weight > 0 */ | 171 | /* select the first destination server, whose weight > 0 */ |
182 | list_for_each_entry_rcu(e, &set->list, list) { | 172 | list_for_each_entry_rcu(e, &set->list, list) { |
183 | least = rcu_dereference(e->dest); | 173 | least = e->dest; |
184 | if (least->flags & IP_VS_DEST_F_OVERLOAD) | 174 | if (least->flags & IP_VS_DEST_F_OVERLOAD) |
185 | continue; | 175 | continue; |
186 | 176 | ||
@@ -195,13 +185,13 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
195 | /* find the destination with the weighted least load */ | 185 | /* find the destination with the weighted least load */ |
196 | nextstage: | 186 | nextstage: |
197 | list_for_each_entry_continue_rcu(e, &set->list, list) { | 187 | list_for_each_entry_continue_rcu(e, &set->list, list) { |
198 | dest = rcu_dereference(e->dest); | 188 | dest = e->dest; |
199 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) | 189 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) |
200 | continue; | 190 | continue; |
201 | 191 | ||
202 | doh = ip_vs_dest_conn_overhead(dest); | 192 | doh = ip_vs_dest_conn_overhead(dest); |
203 | if ((loh * atomic_read(&dest->weight) > | 193 | if (((__s64)loh * atomic_read(&dest->weight) > |
204 | doh * atomic_read(&least->weight)) | 194 | (__s64)doh * atomic_read(&least->weight)) |
205 | && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 195 | && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
206 | least = dest; | 196 | least = dest; |
207 | loh = doh; | 197 | loh = doh; |
@@ -232,7 +222,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
232 | 222 | ||
233 | /* select the first destination server, whose weight > 0 */ | 223 | /* select the first destination server, whose weight > 0 */ |
234 | list_for_each_entry(e, &set->list, list) { | 224 | list_for_each_entry(e, &set->list, list) { |
235 | most = rcu_dereference_protected(e->dest, 1); | 225 | most = e->dest; |
236 | if (atomic_read(&most->weight) > 0) { | 226 | if (atomic_read(&most->weight) > 0) { |
237 | moh = ip_vs_dest_conn_overhead(most); | 227 | moh = ip_vs_dest_conn_overhead(most); |
238 | goto nextstage; | 228 | goto nextstage; |
@@ -243,11 +233,11 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
243 | /* find the destination with the weighted most load */ | 233 | /* find the destination with the weighted most load */ |
244 | nextstage: | 234 | nextstage: |
245 | list_for_each_entry_continue(e, &set->list, list) { | 235 | list_for_each_entry_continue(e, &set->list, list) { |
246 | dest = rcu_dereference_protected(e->dest, 1); | 236 | dest = e->dest; |
247 | doh = ip_vs_dest_conn_overhead(dest); | 237 | doh = ip_vs_dest_conn_overhead(dest); |
248 | /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ | 238 | /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ |
249 | if ((moh * atomic_read(&dest->weight) < | 239 | if (((__s64)moh * atomic_read(&dest->weight) < |
250 | doh * atomic_read(&most->weight)) | 240 | (__s64)doh * atomic_read(&most->weight)) |
251 | && (atomic_read(&dest->weight) > 0)) { | 241 | && (atomic_read(&dest->weight) > 0)) { |
252 | most = dest; | 242 | most = dest; |
253 | moh = doh; | 243 | moh = doh; |
@@ -611,8 +601,8 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) | |||
611 | continue; | 601 | continue; |
612 | 602 | ||
613 | doh = ip_vs_dest_conn_overhead(dest); | 603 | doh = ip_vs_dest_conn_overhead(dest); |
614 | if (loh * atomic_read(&dest->weight) > | 604 | if ((__s64)loh * atomic_read(&dest->weight) > |
615 | doh * atomic_read(&least->weight)) { | 605 | (__s64)doh * atomic_read(&least->weight)) { |
616 | least = dest; | 606 | least = dest; |
617 | loh = doh; | 607 | loh = doh; |
618 | } | 608 | } |
@@ -819,7 +809,7 @@ static void __exit ip_vs_lblcr_cleanup(void) | |||
819 | { | 809 | { |
820 | unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); | 810 | unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); |
821 | unregister_pernet_subsys(&ip_vs_lblcr_ops); | 811 | unregister_pernet_subsys(&ip_vs_lblcr_ops); |
822 | synchronize_rcu(); | 812 | rcu_barrier(); |
823 | } | 813 | } |
824 | 814 | ||
825 | 815 | ||
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c index d8d9860934fe..961a6de9bb29 100644 --- a/net/netfilter/ipvs/ip_vs_nq.c +++ b/net/netfilter/ipvs/ip_vs_nq.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <net/ip_vs.h> | 40 | #include <net/ip_vs.h> |
41 | 41 | ||
42 | 42 | ||
43 | static inline unsigned int | 43 | static inline int |
44 | ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) | 44 | ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) |
45 | { | 45 | { |
46 | /* | 46 | /* |
@@ -59,7 +59,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
59 | struct ip_vs_iphdr *iph) | 59 | struct ip_vs_iphdr *iph) |
60 | { | 60 | { |
61 | struct ip_vs_dest *dest, *least = NULL; | 61 | struct ip_vs_dest *dest, *least = NULL; |
62 | unsigned int loh = 0, doh; | 62 | int loh = 0, doh; |
63 | 63 | ||
64 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); | 64 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
65 | 65 | ||
@@ -92,8 +92,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
92 | } | 92 | } |
93 | 93 | ||
94 | if (!least || | 94 | if (!least || |
95 | (loh * atomic_read(&dest->weight) > | 95 | ((__s64)loh * atomic_read(&dest->weight) > |
96 | doh * atomic_read(&least->weight))) { | 96 | (__s64)doh * atomic_read(&least->weight))) { |
97 | least = dest; | 97 | least = dest; |
98 | loh = doh; | 98 | loh = doh; |
99 | } | 99 | } |
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c index a5284cc3d882..e446b9fa7424 100644 --- a/net/netfilter/ipvs/ip_vs_sed.c +++ b/net/netfilter/ipvs/ip_vs_sed.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <net/ip_vs.h> | 44 | #include <net/ip_vs.h> |
45 | 45 | ||
46 | 46 | ||
47 | static inline unsigned int | 47 | static inline int |
48 | ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) | 48 | ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) |
49 | { | 49 | { |
50 | /* | 50 | /* |
@@ -63,7 +63,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
63 | struct ip_vs_iphdr *iph) | 63 | struct ip_vs_iphdr *iph) |
64 | { | 64 | { |
65 | struct ip_vs_dest *dest, *least; | 65 | struct ip_vs_dest *dest, *least; |
66 | unsigned int loh, doh; | 66 | int loh, doh; |
67 | 67 | ||
68 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); | 68 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
69 | 69 | ||
@@ -99,8 +99,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
99 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) | 99 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) |
100 | continue; | 100 | continue; |
101 | doh = ip_vs_sed_dest_overhead(dest); | 101 | doh = ip_vs_sed_dest_overhead(dest); |
102 | if (loh * atomic_read(&dest->weight) > | 102 | if ((__s64)loh * atomic_read(&dest->weight) > |
103 | doh * atomic_read(&least->weight)) { | 103 | (__s64)doh * atomic_read(&least->weight)) { |
104 | least = dest; | 104 | least = dest; |
105 | loh = doh; | 105 | loh = doh; |
106 | } | 106 | } |
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index 6dc1fa128840..b5b4650d50a9 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c | |||
@@ -35,7 +35,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
35 | struct ip_vs_iphdr *iph) | 35 | struct ip_vs_iphdr *iph) |
36 | { | 36 | { |
37 | struct ip_vs_dest *dest, *least; | 37 | struct ip_vs_dest *dest, *least; |
38 | unsigned int loh, doh; | 38 | int loh, doh; |
39 | 39 | ||
40 | IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n"); | 40 | IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n"); |
41 | 41 | ||
@@ -71,8 +71,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
71 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) | 71 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) |
72 | continue; | 72 | continue; |
73 | doh = ip_vs_dest_conn_overhead(dest); | 73 | doh = ip_vs_dest_conn_overhead(dest); |
74 | if (loh * atomic_read(&dest->weight) > | 74 | if ((__s64)loh * atomic_read(&dest->weight) > |
75 | doh * atomic_read(&least->weight)) { | 75 | (__s64)doh * atomic_read(&least->weight)) { |
76 | least = dest; | 76 | least = dest; |
77 | loh = doh; | 77 | loh = doh; |
78 | } | 78 | } |
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 6fd967c6278c..cdf4567ba9b3 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c | |||
@@ -24,7 +24,7 @@ | |||
24 | int synproxy_net_id; | 24 | int synproxy_net_id; |
25 | EXPORT_SYMBOL_GPL(synproxy_net_id); | 25 | EXPORT_SYMBOL_GPL(synproxy_net_id); |
26 | 26 | ||
27 | void | 27 | bool |
28 | synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, | 28 | synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, |
29 | const struct tcphdr *th, struct synproxy_options *opts) | 29 | const struct tcphdr *th, struct synproxy_options *opts) |
30 | { | 30 | { |
@@ -32,7 +32,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, | |||
32 | u8 buf[40], *ptr; | 32 | u8 buf[40], *ptr; |
33 | 33 | ||
34 | ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); | 34 | ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); |
35 | BUG_ON(ptr == NULL); | 35 | if (ptr == NULL) |
36 | return false; | ||
36 | 37 | ||
37 | opts->options = 0; | 38 | opts->options = 0; |
38 | while (length > 0) { | 39 | while (length > 0) { |
@@ -41,16 +42,16 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, | |||
41 | 42 | ||
42 | switch (opcode) { | 43 | switch (opcode) { |
43 | case TCPOPT_EOL: | 44 | case TCPOPT_EOL: |
44 | return; | 45 | return true; |
45 | case TCPOPT_NOP: | 46 | case TCPOPT_NOP: |
46 | length--; | 47 | length--; |
47 | continue; | 48 | continue; |
48 | default: | 49 | default: |
49 | opsize = *ptr++; | 50 | opsize = *ptr++; |
50 | if (opsize < 2) | 51 | if (opsize < 2) |
51 | return; | 52 | return true; |
52 | if (opsize > length) | 53 | if (opsize > length) |
53 | return; | 54 | return true; |
54 | 55 | ||
55 | switch (opcode) { | 56 | switch (opcode) { |
56 | case TCPOPT_MSS: | 57 | case TCPOPT_MSS: |
@@ -84,6 +85,7 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, | |||
84 | length -= opsize; | 85 | length -= opsize; |
85 | } | 86 | } |
86 | } | 87 | } |
88 | return true; | ||
87 | } | 89 | } |
88 | EXPORT_SYMBOL_GPL(synproxy_parse_options); | 90 | EXPORT_SYMBOL_GPL(synproxy_parse_options); |
89 | 91 | ||
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 32ad015ee8ce..a2fef8b10b96 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c | |||
@@ -285,7 +285,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) | |||
285 | 285 | ||
286 | 286 | ||
287 | /* remove one skb from head of flow queue */ | 287 | /* remove one skb from head of flow queue */ |
288 | static struct sk_buff *fq_dequeue_head(struct fq_flow *flow) | 288 | static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) |
289 | { | 289 | { |
290 | struct sk_buff *skb = flow->head; | 290 | struct sk_buff *skb = flow->head; |
291 | 291 | ||
@@ -293,6 +293,8 @@ static struct sk_buff *fq_dequeue_head(struct fq_flow *flow) | |||
293 | flow->head = skb->next; | 293 | flow->head = skb->next; |
294 | skb->next = NULL; | 294 | skb->next = NULL; |
295 | flow->qlen--; | 295 | flow->qlen--; |
296 | sch->qstats.backlog -= qdisc_pkt_len(skb); | ||
297 | sch->q.qlen--; | ||
296 | } | 298 | } |
297 | return skb; | 299 | return skb; |
298 | } | 300 | } |
@@ -418,8 +420,9 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch) | |||
418 | struct fq_flow_head *head; | 420 | struct fq_flow_head *head; |
419 | struct sk_buff *skb; | 421 | struct sk_buff *skb; |
420 | struct fq_flow *f; | 422 | struct fq_flow *f; |
423 | u32 rate; | ||
421 | 424 | ||
422 | skb = fq_dequeue_head(&q->internal); | 425 | skb = fq_dequeue_head(sch, &q->internal); |
423 | if (skb) | 426 | if (skb) |
424 | goto out; | 427 | goto out; |
425 | fq_check_throttled(q, now); | 428 | fq_check_throttled(q, now); |
@@ -449,7 +452,7 @@ begin: | |||
449 | goto begin; | 452 | goto begin; |
450 | } | 453 | } |
451 | 454 | ||
452 | skb = fq_dequeue_head(f); | 455 | skb = fq_dequeue_head(sch, f); |
453 | if (!skb) { | 456 | if (!skb) { |
454 | head->first = f->next; | 457 | head->first = f->next; |
455 | /* force a pass through old_flows to prevent starvation */ | 458 | /* force a pass through old_flows to prevent starvation */ |
@@ -466,43 +469,74 @@ begin: | |||
466 | f->time_next_packet = now; | 469 | f->time_next_packet = now; |
467 | f->credit -= qdisc_pkt_len(skb); | 470 | f->credit -= qdisc_pkt_len(skb); |
468 | 471 | ||
469 | if (f->credit <= 0 && | 472 | if (f->credit > 0 || !q->rate_enable) |
470 | q->rate_enable && | 473 | goto out; |
471 | skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) { | ||
472 | u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate; | ||
473 | 474 | ||
474 | rate = min(rate, q->flow_max_rate); | 475 | if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) { |
475 | if (rate) { | 476 | rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate; |
476 | u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC; | ||
477 | |||
478 | do_div(len, rate); | ||
479 | /* Since socket rate can change later, | ||
480 | * clamp the delay to 125 ms. | ||
481 | * TODO: maybe segment the too big skb, as in commit | ||
482 | * e43ac79a4bc ("sch_tbf: segment too big GSO packets") | ||
483 | */ | ||
484 | if (unlikely(len > 125 * NSEC_PER_MSEC)) { | ||
485 | len = 125 * NSEC_PER_MSEC; | ||
486 | q->stat_pkts_too_long++; | ||
487 | } | ||
488 | 477 | ||
489 | f->time_next_packet = now + len; | 478 | rate = min(rate, q->flow_max_rate); |
479 | } else { | ||
480 | rate = q->flow_max_rate; | ||
481 | if (rate == ~0U) | ||
482 | goto out; | ||
483 | } | ||
484 | if (rate) { | ||
485 | u32 plen = max(qdisc_pkt_len(skb), q->quantum); | ||
486 | u64 len = (u64)plen * NSEC_PER_SEC; | ||
487 | |||
488 | do_div(len, rate); | ||
489 | /* Since socket rate can change later, | ||
490 | * clamp the delay to 125 ms. | ||
491 | * TODO: maybe segment the too big skb, as in commit | ||
492 | * e43ac79a4bc ("sch_tbf: segment too big GSO packets") | ||
493 | */ | ||
494 | if (unlikely(len > 125 * NSEC_PER_MSEC)) { | ||
495 | len = 125 * NSEC_PER_MSEC; | ||
496 | q->stat_pkts_too_long++; | ||
490 | } | 497 | } |
498 | |||
499 | f->time_next_packet = now + len; | ||
491 | } | 500 | } |
492 | out: | 501 | out: |
493 | sch->qstats.backlog -= qdisc_pkt_len(skb); | ||
494 | qdisc_bstats_update(sch, skb); | 502 | qdisc_bstats_update(sch, skb); |
495 | sch->q.qlen--; | ||
496 | qdisc_unthrottled(sch); | 503 | qdisc_unthrottled(sch); |
497 | return skb; | 504 | return skb; |
498 | } | 505 | } |
499 | 506 | ||
500 | static void fq_reset(struct Qdisc *sch) | 507 | static void fq_reset(struct Qdisc *sch) |
501 | { | 508 | { |
509 | struct fq_sched_data *q = qdisc_priv(sch); | ||
510 | struct rb_root *root; | ||
502 | struct sk_buff *skb; | 511 | struct sk_buff *skb; |
512 | struct rb_node *p; | ||
513 | struct fq_flow *f; | ||
514 | unsigned int idx; | ||
503 | 515 | ||
504 | while ((skb = fq_dequeue(sch)) != NULL) | 516 | while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL) |
505 | kfree_skb(skb); | 517 | kfree_skb(skb); |
518 | |||
519 | if (!q->fq_root) | ||
520 | return; | ||
521 | |||
522 | for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { | ||
523 | root = &q->fq_root[idx]; | ||
524 | while ((p = rb_first(root)) != NULL) { | ||
525 | f = container_of(p, struct fq_flow, fq_node); | ||
526 | rb_erase(p, root); | ||
527 | |||
528 | while ((skb = fq_dequeue_head(sch, f)) != NULL) | ||
529 | kfree_skb(skb); | ||
530 | |||
531 | kmem_cache_free(fq_flow_cachep, f); | ||
532 | } | ||
533 | } | ||
534 | q->new_flows.first = NULL; | ||
535 | q->old_flows.first = NULL; | ||
536 | q->delayed = RB_ROOT; | ||
537 | q->flows = 0; | ||
538 | q->inactive_flows = 0; | ||
539 | q->throttled_flows = 0; | ||
506 | } | 540 | } |
507 | 541 | ||
508 | static void fq_rehash(struct fq_sched_data *q, | 542 | static void fq_rehash(struct fq_sched_data *q, |
@@ -645,6 +679,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) | |||
645 | while (sch->q.qlen > sch->limit) { | 679 | while (sch->q.qlen > sch->limit) { |
646 | struct sk_buff *skb = fq_dequeue(sch); | 680 | struct sk_buff *skb = fq_dequeue(sch); |
647 | 681 | ||
682 | if (!skb) | ||
683 | break; | ||
648 | kfree_skb(skb); | 684 | kfree_skb(skb); |
649 | drop_count++; | 685 | drop_count++; |
650 | } | 686 | } |
@@ -657,21 +693,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) | |||
657 | static void fq_destroy(struct Qdisc *sch) | 693 | static void fq_destroy(struct Qdisc *sch) |
658 | { | 694 | { |
659 | struct fq_sched_data *q = qdisc_priv(sch); | 695 | struct fq_sched_data *q = qdisc_priv(sch); |
660 | struct rb_root *root; | ||
661 | struct rb_node *p; | ||
662 | unsigned int idx; | ||
663 | 696 | ||
664 | if (q->fq_root) { | 697 | fq_reset(sch); |
665 | for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { | 698 | kfree(q->fq_root); |
666 | root = &q->fq_root[idx]; | ||
667 | while ((p = rb_first(root)) != NULL) { | ||
668 | rb_erase(p, root); | ||
669 | kmem_cache_free(fq_flow_cachep, | ||
670 | container_of(p, struct fq_flow, fq_node)); | ||
671 | } | ||
672 | } | ||
673 | kfree(q->fq_root); | ||
674 | } | ||
675 | qdisc_watchdog_cancel(&q->watchdog); | 699 | qdisc_watchdog_cancel(&q->watchdog); |
676 | } | 700 | } |
677 | 701 | ||