diff options
38 files changed, 413 insertions, 261 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index a3e0d10e386e..f01f54f27750 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1843,6 +1843,12 @@ S: Orphan | |||
1843 | F: Documentation/filesystems/befs.txt | 1843 | F: Documentation/filesystems/befs.txt |
1844 | F: fs/befs/ | 1844 | F: fs/befs/ |
1845 | 1845 | ||
1846 | BECKHOFF CX5020 ETHERCAT MASTER DRIVER | ||
1847 | M: Dariusz Marcinkiewicz <reksio@newterm.pl> | ||
1848 | L: netdev@vger.kernel.org | ||
1849 | S: Maintained | ||
1850 | F: drivers/net/ethernet/ec_bhf.c | ||
1851 | |||
1846 | BFS FILE SYSTEM | 1852 | BFS FILE SYSTEM |
1847 | M: "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk> | 1853 | M: "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk> |
1848 | S: Maintained | 1854 | S: Maintained |
@@ -5982,6 +5988,12 @@ T: git git://linuxtv.org/media_tree.git | |||
5982 | S: Maintained | 5988 | S: Maintained |
5983 | F: drivers/media/radio/radio-mr800.c | 5989 | F: drivers/media/radio/radio-mr800.c |
5984 | 5990 | ||
5991 | MRF24J40 IEEE 802.15.4 RADIO DRIVER | ||
5992 | M: Alan Ott <alan@signal11.us> | ||
5993 | L: linux-wpan@vger.kernel.org | ||
5994 | S: Maintained | ||
5995 | F: drivers/net/ieee802154/mrf24j40.c | ||
5996 | |||
5985 | MSI LAPTOP SUPPORT | 5997 | MSI LAPTOP SUPPORT |
5986 | M: "Lee, Chun-Yi" <jlee@suse.com> | 5998 | M: "Lee, Chun-Yi" <jlee@suse.com> |
5987 | L: platform-driver-x86@vger.kernel.org | 5999 | L: platform-driver-x86@vger.kernel.org |
diff --git a/drivers/isdn/hardware/eicon/xdi_msg.h b/drivers/isdn/hardware/eicon/xdi_msg.h index 58368f7b5cba..2498c349a32e 100644 --- a/drivers/isdn/hardware/eicon/xdi_msg.h +++ b/drivers/isdn/hardware/eicon/xdi_msg.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */ | 1 | /* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */ |
2 | 2 | ||
3 | #ifndef __DIVA_XDI_UM_CFG_MESSSGE_H__ | 3 | #ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__ |
4 | #define __DIVA_XDI_UM_CFG_MESSAGE_H__ | 4 | #define __DIVA_XDI_UM_CFG_MESSAGE_H__ |
5 | 5 | ||
6 | /* | 6 | /* |
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 5dede6e64376..109cb44291f5 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
@@ -280,7 +280,7 @@ static int c_can_plat_probe(struct platform_device *pdev) | |||
280 | 280 | ||
281 | priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, | 281 | priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, |
282 | resource_size(res)); | 282 | resource_size(res)); |
283 | if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) | 283 | if (!priv->raminit_ctrlreg || priv->instance < 0) |
284 | dev_info(&pdev->dev, "control memory is not used for raminit\n"); | 284 | dev_info(&pdev->dev, "control memory is not used for raminit\n"); |
285 | else | 285 | else |
286 | priv->raminit = c_can_hw_raminit_ti; | 286 | priv->raminit = c_can_hw_raminit_ti; |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index f425ec2c7839..944aa5d3af6e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -549,6 +549,13 @@ static void do_state(struct net_device *dev, | |||
549 | 549 | ||
550 | /* process state changes depending on the new state */ | 550 | /* process state changes depending on the new state */ |
551 | switch (new_state) { | 551 | switch (new_state) { |
552 | case CAN_STATE_ERROR_WARNING: | ||
553 | netdev_dbg(dev, "Error Warning\n"); | ||
554 | cf->can_id |= CAN_ERR_CRTL; | ||
555 | cf->data[1] = (bec.txerr > bec.rxerr) ? | ||
556 | CAN_ERR_CRTL_TX_WARNING : | ||
557 | CAN_ERR_CRTL_RX_WARNING; | ||
558 | break; | ||
552 | case CAN_STATE_ERROR_ACTIVE: | 559 | case CAN_STATE_ERROR_ACTIVE: |
553 | netdev_dbg(dev, "Error Active\n"); | 560 | netdev_dbg(dev, "Error Active\n"); |
554 | cf->can_id |= CAN_ERR_PROT; | 561 | cf->can_id |= CAN_ERR_PROT; |
@@ -852,6 +859,8 @@ static int flexcan_chip_start(struct net_device *dev) | |||
852 | if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE || | 859 | if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE || |
853 | priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) | 860 | priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) |
854 | reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; | 861 | reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; |
862 | else | ||
863 | reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK; | ||
855 | 864 | ||
856 | /* save for later use */ | 865 | /* save for later use */ |
857 | priv->reg_ctrl_default = reg_ctrl; | 866 | priv->reg_ctrl_default = reg_ctrl; |
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index d1692154ed1b..b27ac6074afb 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -172,6 +172,35 @@ static void set_normal_mode(struct net_device *dev) | |||
172 | netdev_err(dev, "setting SJA1000 into normal mode failed!\n"); | 172 | netdev_err(dev, "setting SJA1000 into normal mode failed!\n"); |
173 | } | 173 | } |
174 | 174 | ||
175 | /* | ||
176 | * initialize SJA1000 chip: | ||
177 | * - reset chip | ||
178 | * - set output mode | ||
179 | * - set baudrate | ||
180 | * - enable interrupts | ||
181 | * - start operating mode | ||
182 | */ | ||
183 | static void chipset_init(struct net_device *dev) | ||
184 | { | ||
185 | struct sja1000_priv *priv = netdev_priv(dev); | ||
186 | |||
187 | /* set clock divider and output control register */ | ||
188 | priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); | ||
189 | |||
190 | /* set acceptance filter (accept all) */ | ||
191 | priv->write_reg(priv, SJA1000_ACCC0, 0x00); | ||
192 | priv->write_reg(priv, SJA1000_ACCC1, 0x00); | ||
193 | priv->write_reg(priv, SJA1000_ACCC2, 0x00); | ||
194 | priv->write_reg(priv, SJA1000_ACCC3, 0x00); | ||
195 | |||
196 | priv->write_reg(priv, SJA1000_ACCM0, 0xFF); | ||
197 | priv->write_reg(priv, SJA1000_ACCM1, 0xFF); | ||
198 | priv->write_reg(priv, SJA1000_ACCM2, 0xFF); | ||
199 | priv->write_reg(priv, SJA1000_ACCM3, 0xFF); | ||
200 | |||
201 | priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL); | ||
202 | } | ||
203 | |||
175 | static void sja1000_start(struct net_device *dev) | 204 | static void sja1000_start(struct net_device *dev) |
176 | { | 205 | { |
177 | struct sja1000_priv *priv = netdev_priv(dev); | 206 | struct sja1000_priv *priv = netdev_priv(dev); |
@@ -180,6 +209,10 @@ static void sja1000_start(struct net_device *dev) | |||
180 | if (priv->can.state != CAN_STATE_STOPPED) | 209 | if (priv->can.state != CAN_STATE_STOPPED) |
181 | set_reset_mode(dev); | 210 | set_reset_mode(dev); |
182 | 211 | ||
212 | /* Initialize chip if uninitialized at this stage */ | ||
213 | if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN)) | ||
214 | chipset_init(dev); | ||
215 | |||
183 | /* Clear error counters and error code capture */ | 216 | /* Clear error counters and error code capture */ |
184 | priv->write_reg(priv, SJA1000_TXERR, 0x0); | 217 | priv->write_reg(priv, SJA1000_TXERR, 0x0); |
185 | priv->write_reg(priv, SJA1000_RXERR, 0x0); | 218 | priv->write_reg(priv, SJA1000_RXERR, 0x0); |
@@ -237,35 +270,6 @@ static int sja1000_get_berr_counter(const struct net_device *dev, | |||
237 | } | 270 | } |
238 | 271 | ||
239 | /* | 272 | /* |
240 | * initialize SJA1000 chip: | ||
241 | * - reset chip | ||
242 | * - set output mode | ||
243 | * - set baudrate | ||
244 | * - enable interrupts | ||
245 | * - start operating mode | ||
246 | */ | ||
247 | static void chipset_init(struct net_device *dev) | ||
248 | { | ||
249 | struct sja1000_priv *priv = netdev_priv(dev); | ||
250 | |||
251 | /* set clock divider and output control register */ | ||
252 | priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); | ||
253 | |||
254 | /* set acceptance filter (accept all) */ | ||
255 | priv->write_reg(priv, SJA1000_ACCC0, 0x00); | ||
256 | priv->write_reg(priv, SJA1000_ACCC1, 0x00); | ||
257 | priv->write_reg(priv, SJA1000_ACCC2, 0x00); | ||
258 | priv->write_reg(priv, SJA1000_ACCC3, 0x00); | ||
259 | |||
260 | priv->write_reg(priv, SJA1000_ACCM0, 0xFF); | ||
261 | priv->write_reg(priv, SJA1000_ACCM1, 0xFF); | ||
262 | priv->write_reg(priv, SJA1000_ACCM2, 0xFF); | ||
263 | priv->write_reg(priv, SJA1000_ACCM3, 0xFF); | ||
264 | |||
265 | priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL); | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * transmit a CAN message | 273 | * transmit a CAN message |
270 | * message layout in the sk_buff should be like this: | 274 | * message layout in the sk_buff should be like this: |
271 | * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 | 275 | * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index e1a8f4e19983..e4222af2baa6 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -563,15 +563,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) | |||
563 | struct xgene_enet_desc_ring *ring; | 563 | struct xgene_enet_desc_ring *ring; |
564 | 564 | ||
565 | ring = pdata->tx_ring; | 565 | ring = pdata->tx_ring; |
566 | if (ring && ring->cp_ring && ring->cp_ring->cp_skb) | 566 | if (ring) { |
567 | devm_kfree(dev, ring->cp_ring->cp_skb); | 567 | if (ring->cp_ring && ring->cp_ring->cp_skb) |
568 | xgene_enet_free_desc_ring(ring); | 568 | devm_kfree(dev, ring->cp_ring->cp_skb); |
569 | xgene_enet_free_desc_ring(ring); | ||
570 | } | ||
569 | 571 | ||
570 | ring = pdata->rx_ring; | 572 | ring = pdata->rx_ring; |
571 | if (ring && ring->buf_pool && ring->buf_pool->rx_skb) | 573 | if (ring) { |
572 | devm_kfree(dev, ring->buf_pool->rx_skb); | 574 | if (ring->buf_pool) { |
573 | xgene_enet_free_desc_ring(ring->buf_pool); | 575 | if (ring->buf_pool->rx_skb) |
574 | xgene_enet_free_desc_ring(ring); | 576 | devm_kfree(dev, ring->buf_pool->rx_skb); |
577 | xgene_enet_free_desc_ring(ring->buf_pool); | ||
578 | } | ||
579 | xgene_enet_free_desc_ring(ring); | ||
580 | } | ||
575 | } | 581 | } |
576 | 582 | ||
577 | static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( | 583 | static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4e6c82e20224..4ccc806b1150 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -483,11 +483,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
483 | 483 | ||
484 | #ifdef BNX2X_STOP_ON_ERROR | 484 | #ifdef BNX2X_STOP_ON_ERROR |
485 | fp->tpa_queue_used |= (1 << queue); | 485 | fp->tpa_queue_used |= (1 << queue); |
486 | #ifdef _ASM_GENERIC_INT_L64_H | ||
487 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
488 | #else | ||
489 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", | 486 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", |
490 | #endif | ||
491 | fp->tpa_queue_used); | 487 | fp->tpa_queue_used); |
492 | #endif | 488 | #endif |
493 | } | 489 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c13364b6cc19..900cab420810 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -10052,6 +10052,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
10052 | } | 10052 | } |
10053 | 10053 | ||
10054 | #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) | 10054 | #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) |
10055 | #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \ | ||
10056 | 0x1848 + ((f) << 4)) | ||
10055 | #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) | 10057 | #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) |
10056 | #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) | 10058 | #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) |
10057 | #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) | 10059 | #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) |
@@ -10059,8 +10061,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
10059 | #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) | 10061 | #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) |
10060 | #define BCM_5710_UNDI_FW_MF_MINOR (0x08) | 10062 | #define BCM_5710_UNDI_FW_MF_MINOR (0x08) |
10061 | #define BCM_5710_UNDI_FW_MF_VERS (0x05) | 10063 | #define BCM_5710_UNDI_FW_MF_VERS (0x05) |
10062 | #define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) | ||
10063 | #define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) | ||
10064 | 10064 | ||
10065 | static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) | 10065 | static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) |
10066 | { | 10066 | { |
@@ -10079,72 +10079,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) | |||
10079 | return false; | 10079 | return false; |
10080 | } | 10080 | } |
10081 | 10081 | ||
10082 | static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) | 10082 | static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) |
10083 | { | ||
10084 | u8 major, minor, version; | ||
10085 | u32 fw; | ||
10086 | |||
10087 | /* Must check that FW is loaded */ | ||
10088 | if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & | ||
10089 | MISC_REGISTERS_RESET_REG_1_RST_XSEM)) { | ||
10090 | BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n"); | ||
10091 | return false; | ||
10092 | } | ||
10093 | |||
10094 | /* Read Currently loaded FW version */ | ||
10095 | fw = REG_RD(bp, XSEM_REG_PRAM); | ||
10096 | major = fw & 0xff; | ||
10097 | minor = (fw >> 0x8) & 0xff; | ||
10098 | version = (fw >> 0x10) & 0xff; | ||
10099 | BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n", | ||
10100 | fw, major, minor, version); | ||
10101 | |||
10102 | if (major > BCM_5710_UNDI_FW_MF_MAJOR) | ||
10103 | return true; | ||
10104 | |||
10105 | if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && | ||
10106 | (minor > BCM_5710_UNDI_FW_MF_MINOR)) | ||
10107 | return true; | ||
10108 | |||
10109 | if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && | ||
10110 | (minor == BCM_5710_UNDI_FW_MF_MINOR) && | ||
10111 | (version >= BCM_5710_UNDI_FW_MF_VERS)) | ||
10112 | return true; | ||
10113 | |||
10114 | return false; | ||
10115 | } | ||
10116 | |||
10117 | static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp) | ||
10118 | { | ||
10119 | int i; | ||
10120 | |||
10121 | /* Due to legacy (FW) code, the first function on each engine has a | ||
10122 | * different offset macro from the rest of the functions. | ||
10123 | * Setting this for all 8 functions is harmless regardless of whether | ||
10124 | * this is actually a multi-function device. | ||
10125 | */ | ||
10126 | for (i = 0; i < 2; i++) | ||
10127 | REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1); | ||
10128 | |||
10129 | for (i = 2; i < 8; i++) | ||
10130 | REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1); | ||
10131 | |||
10132 | BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n"); | ||
10133 | } | ||
10134 | |||
10135 | static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) | ||
10136 | { | 10083 | { |
10137 | u16 rcq, bd; | 10084 | u16 rcq, bd; |
10138 | u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); | 10085 | u32 addr, tmp_reg; |
10139 | 10086 | ||
10087 | if (BP_FUNC(bp) < 2) | ||
10088 | addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); | ||
10089 | else | ||
10090 | addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); | ||
10091 | |||
10092 | tmp_reg = REG_RD(bp, addr); | ||
10140 | rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; | 10093 | rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; |
10141 | bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; | 10094 | bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; |
10142 | 10095 | ||
10143 | tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); | 10096 | tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); |
10144 | REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); | 10097 | REG_WR(bp, addr, tmp_reg); |
10145 | 10098 | ||
10146 | BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", | 10099 | BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", |
10147 | port, bd, rcq); | 10100 | BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); |
10148 | } | 10101 | } |
10149 | 10102 | ||
10150 | static int bnx2x_prev_mcp_done(struct bnx2x *bp) | 10103 | static int bnx2x_prev_mcp_done(struct bnx2x *bp) |
@@ -10383,7 +10336,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10383 | /* Reset should be performed after BRB is emptied */ | 10336 | /* Reset should be performed after BRB is emptied */ |
10384 | if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { | 10337 | if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { |
10385 | u32 timer_count = 1000; | 10338 | u32 timer_count = 1000; |
10386 | bool need_write = true; | ||
10387 | 10339 | ||
10388 | /* Close the MAC Rx to prevent BRB from filling up */ | 10340 | /* Close the MAC Rx to prevent BRB from filling up */ |
10389 | bnx2x_prev_unload_close_mac(bp, &mac_vals); | 10341 | bnx2x_prev_unload_close_mac(bp, &mac_vals); |
@@ -10420,20 +10372,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10420 | else | 10372 | else |
10421 | timer_count--; | 10373 | timer_count--; |
10422 | 10374 | ||
10423 | /* New UNDI FW supports MF and contains better | 10375 | /* If UNDI resides in memory, manually increment it */ |
10424 | * cleaning methods - might be redundant but harmless. | 10376 | if (prev_undi) |
10425 | */ | 10377 | bnx2x_prev_unload_undi_inc(bp, 1); |
10426 | if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { | 10378 | |
10427 | if (need_write) { | ||
10428 | bnx2x_prev_unload_undi_mf(bp); | ||
10429 | need_write = false; | ||
10430 | } | ||
10431 | } else if (prev_undi) { | ||
10432 | /* If UNDI resides in memory, | ||
10433 | * manually increment it | ||
10434 | */ | ||
10435 | bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); | ||
10436 | } | ||
10437 | udelay(10); | 10379 | udelay(10); |
10438 | } | 10380 | } |
10439 | 10381 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d57282172ea5..c067b7888ac4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -652,6 +652,7 @@ struct adapter { | |||
652 | struct tid_info tids; | 652 | struct tid_info tids; |
653 | void **tid_release_head; | 653 | void **tid_release_head; |
654 | spinlock_t tid_release_lock; | 654 | spinlock_t tid_release_lock; |
655 | struct workqueue_struct *workq; | ||
655 | struct work_struct tid_release_task; | 656 | struct work_struct tid_release_task; |
656 | struct work_struct db_full_task; | 657 | struct work_struct db_full_task; |
657 | struct work_struct db_drop_task; | 658 | struct work_struct db_drop_task; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1afee70ce856..18fb9c61d7ba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -643,8 +643,6 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) | |||
643 | return ret; | 643 | return ret; |
644 | } | 644 | } |
645 | 645 | ||
646 | static struct workqueue_struct *workq; | ||
647 | |||
648 | /** | 646 | /** |
649 | * link_start - enable a port | 647 | * link_start - enable a port |
650 | * @dev: the port to enable | 648 | * @dev: the port to enable |
@@ -3340,7 +3338,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | |||
3340 | adap->tid_release_head = (void **)((uintptr_t)p | chan); | 3338 | adap->tid_release_head = (void **)((uintptr_t)p | chan); |
3341 | if (!adap->tid_release_task_busy) { | 3339 | if (!adap->tid_release_task_busy) { |
3342 | adap->tid_release_task_busy = true; | 3340 | adap->tid_release_task_busy = true; |
3343 | queue_work(workq, &adap->tid_release_task); | 3341 | queue_work(adap->workq, &adap->tid_release_task); |
3344 | } | 3342 | } |
3345 | spin_unlock_bh(&adap->tid_release_lock); | 3343 | spin_unlock_bh(&adap->tid_release_lock); |
3346 | } | 3344 | } |
@@ -4140,7 +4138,7 @@ void t4_db_full(struct adapter *adap) | |||
4140 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); | 4138 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); |
4141 | t4_set_reg_field(adap, SGE_INT_ENABLE3, | 4139 | t4_set_reg_field(adap, SGE_INT_ENABLE3, |
4142 | DBFIFO_HP_INT | DBFIFO_LP_INT, 0); | 4140 | DBFIFO_HP_INT | DBFIFO_LP_INT, 0); |
4143 | queue_work(workq, &adap->db_full_task); | 4141 | queue_work(adap->workq, &adap->db_full_task); |
4144 | } | 4142 | } |
4145 | } | 4143 | } |
4146 | 4144 | ||
@@ -4150,7 +4148,7 @@ void t4_db_dropped(struct adapter *adap) | |||
4150 | disable_dbs(adap); | 4148 | disable_dbs(adap); |
4151 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); | 4149 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); |
4152 | } | 4150 | } |
4153 | queue_work(workq, &adap->db_drop_task); | 4151 | queue_work(adap->workq, &adap->db_drop_task); |
4154 | } | 4152 | } |
4155 | 4153 | ||
4156 | static void uld_attach(struct adapter *adap, unsigned int uld) | 4154 | static void uld_attach(struct adapter *adap, unsigned int uld) |
@@ -6517,6 +6515,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6517 | goto out_disable_device; | 6515 | goto out_disable_device; |
6518 | } | 6516 | } |
6519 | 6517 | ||
6518 | adapter->workq = create_singlethread_workqueue("cxgb4"); | ||
6519 | if (!adapter->workq) { | ||
6520 | err = -ENOMEM; | ||
6521 | goto out_free_adapter; | ||
6522 | } | ||
6523 | |||
6520 | /* PCI device has been enabled */ | 6524 | /* PCI device has been enabled */ |
6521 | adapter->flags |= DEV_ENABLED; | 6525 | adapter->flags |= DEV_ENABLED; |
6522 | 6526 | ||
@@ -6715,6 +6719,9 @@ sriov: | |||
6715 | out_unmap_bar0: | 6719 | out_unmap_bar0: |
6716 | iounmap(adapter->regs); | 6720 | iounmap(adapter->regs); |
6717 | out_free_adapter: | 6721 | out_free_adapter: |
6722 | if (adapter->workq) | ||
6723 | destroy_workqueue(adapter->workq); | ||
6724 | |||
6718 | kfree(adapter); | 6725 | kfree(adapter); |
6719 | out_disable_device: | 6726 | out_disable_device: |
6720 | pci_disable_pcie_error_reporting(pdev); | 6727 | pci_disable_pcie_error_reporting(pdev); |
@@ -6736,6 +6743,11 @@ static void remove_one(struct pci_dev *pdev) | |||
6736 | if (adapter) { | 6743 | if (adapter) { |
6737 | int i; | 6744 | int i; |
6738 | 6745 | ||
6746 | /* Tear down per-adapter Work Queue first since it can contain | ||
6747 | * references to our adapter data structure. | ||
6748 | */ | ||
6749 | destroy_workqueue(adapter->workq); | ||
6750 | |||
6739 | if (is_offload(adapter)) | 6751 | if (is_offload(adapter)) |
6740 | detach_ulds(adapter); | 6752 | detach_ulds(adapter); |
6741 | 6753 | ||
@@ -6788,20 +6800,14 @@ static int __init cxgb4_init_module(void) | |||
6788 | { | 6800 | { |
6789 | int ret; | 6801 | int ret; |
6790 | 6802 | ||
6791 | workq = create_singlethread_workqueue("cxgb4"); | ||
6792 | if (!workq) | ||
6793 | return -ENOMEM; | ||
6794 | |||
6795 | /* Debugfs support is optional, just warn if this fails */ | 6803 | /* Debugfs support is optional, just warn if this fails */ |
6796 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 6804 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
6797 | if (!cxgb4_debugfs_root) | 6805 | if (!cxgb4_debugfs_root) |
6798 | pr_warn("could not create debugfs entry, continuing\n"); | 6806 | pr_warn("could not create debugfs entry, continuing\n"); |
6799 | 6807 | ||
6800 | ret = pci_register_driver(&cxgb4_driver); | 6808 | ret = pci_register_driver(&cxgb4_driver); |
6801 | if (ret < 0) { | 6809 | if (ret < 0) |
6802 | debugfs_remove(cxgb4_debugfs_root); | 6810 | debugfs_remove(cxgb4_debugfs_root); |
6803 | destroy_workqueue(workq); | ||
6804 | } | ||
6805 | 6811 | ||
6806 | register_inet6addr_notifier(&cxgb4_inet6addr_notifier); | 6812 | register_inet6addr_notifier(&cxgb4_inet6addr_notifier); |
6807 | 6813 | ||
@@ -6813,8 +6819,6 @@ static void __exit cxgb4_cleanup_module(void) | |||
6813 | unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); | 6819 | unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); |
6814 | pci_unregister_driver(&cxgb4_driver); | 6820 | pci_unregister_driver(&cxgb4_driver); |
6815 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ | 6821 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ |
6816 | flush_workqueue(workq); | ||
6817 | destroy_workqueue(workq); | ||
6818 | } | 6822 | } |
6819 | 6823 | ||
6820 | module_init(cxgb4_init_module); | 6824 | module_init(cxgb4_init_module); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b0bba32d69d5..d22d728d4e5c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -2303,7 +2303,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | |||
2303 | FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); | 2303 | FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); |
2304 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | | 2304 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | |
2305 | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); | 2305 | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); |
2306 | c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); | 2306 | c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE | |
2307 | FW_EQ_ETH_CMD_VIID(pi->viid)); | ||
2307 | c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | | 2308 | c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | |
2308 | FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | | 2309 | FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | |
2309 | FW_EQ_ETH_CMD_FETCHRO(1) | | 2310 | FW_EQ_ETH_CMD_FETCHRO(1) | |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0549170d7e2e..5f2729ebadbe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -1227,6 +1227,7 @@ struct fw_eq_eth_cmd { | |||
1227 | #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) | 1227 | #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) |
1228 | #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) | 1228 | #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) |
1229 | 1229 | ||
1230 | #define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30) | ||
1230 | #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) | 1231 | #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) |
1231 | 1232 | ||
1232 | struct fw_eq_ctrl_cmd { | 1233 | struct fw_eq_ctrl_cmd { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index bdfa80ca5e31..a5fb9493dee8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -2250,7 +2250,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2250 | cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | | 2250 | cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | |
2251 | FW_EQ_ETH_CMD_EQSTART | | 2251 | FW_EQ_ETH_CMD_EQSTART | |
2252 | FW_LEN16(cmd)); | 2252 | FW_LEN16(cmd)); |
2253 | cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); | 2253 | cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE | |
2254 | FW_EQ_ETH_CMD_VIID(pi->viid)); | ||
2254 | cmd.fetchszm_to_iqid = | 2255 | cmd.fetchszm_to_iqid = |
2255 | cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | | 2256 | cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | |
2256 | FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | | 2257 | FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 9f7fa644a397..ee41d98b44b6 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -275,6 +275,9 @@ struct fec_enet_private { | |||
275 | struct clk *clk_enet_out; | 275 | struct clk *clk_enet_out; |
276 | struct clk *clk_ptp; | 276 | struct clk *clk_ptp; |
277 | 277 | ||
278 | bool ptp_clk_on; | ||
279 | struct mutex ptp_clk_mutex; | ||
280 | |||
278 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 281 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
279 | unsigned char *tx_bounce[TX_RING_SIZE]; | 282 | unsigned char *tx_bounce[TX_RING_SIZE]; |
280 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | 283 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; |
@@ -335,7 +338,7 @@ struct fec_enet_private { | |||
335 | u32 cycle_speed; | 338 | u32 cycle_speed; |
336 | int hwts_rx_en; | 339 | int hwts_rx_en; |
337 | int hwts_tx_en; | 340 | int hwts_tx_en; |
338 | struct timer_list time_keep; | 341 | struct delayed_work time_keep; |
339 | struct regulator *reg_phy; | 342 | struct regulator *reg_phy; |
340 | }; | 343 | }; |
341 | 344 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4f87dffcb9b2..89355a719625 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1611,17 +1611,27 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
1611 | goto failed_clk_enet_out; | 1611 | goto failed_clk_enet_out; |
1612 | } | 1612 | } |
1613 | if (fep->clk_ptp) { | 1613 | if (fep->clk_ptp) { |
1614 | mutex_lock(&fep->ptp_clk_mutex); | ||
1614 | ret = clk_prepare_enable(fep->clk_ptp); | 1615 | ret = clk_prepare_enable(fep->clk_ptp); |
1615 | if (ret) | 1616 | if (ret) { |
1617 | mutex_unlock(&fep->ptp_clk_mutex); | ||
1616 | goto failed_clk_ptp; | 1618 | goto failed_clk_ptp; |
1619 | } else { | ||
1620 | fep->ptp_clk_on = true; | ||
1621 | } | ||
1622 | mutex_unlock(&fep->ptp_clk_mutex); | ||
1617 | } | 1623 | } |
1618 | } else { | 1624 | } else { |
1619 | clk_disable_unprepare(fep->clk_ahb); | 1625 | clk_disable_unprepare(fep->clk_ahb); |
1620 | clk_disable_unprepare(fep->clk_ipg); | 1626 | clk_disable_unprepare(fep->clk_ipg); |
1621 | if (fep->clk_enet_out) | 1627 | if (fep->clk_enet_out) |
1622 | clk_disable_unprepare(fep->clk_enet_out); | 1628 | clk_disable_unprepare(fep->clk_enet_out); |
1623 | if (fep->clk_ptp) | 1629 | if (fep->clk_ptp) { |
1630 | mutex_lock(&fep->ptp_clk_mutex); | ||
1624 | clk_disable_unprepare(fep->clk_ptp); | 1631 | clk_disable_unprepare(fep->clk_ptp); |
1632 | fep->ptp_clk_on = false; | ||
1633 | mutex_unlock(&fep->ptp_clk_mutex); | ||
1634 | } | ||
1625 | } | 1635 | } |
1626 | 1636 | ||
1627 | return 0; | 1637 | return 0; |
@@ -2625,6 +2635,8 @@ fec_probe(struct platform_device *pdev) | |||
2625 | if (IS_ERR(fep->clk_enet_out)) | 2635 | if (IS_ERR(fep->clk_enet_out)) |
2626 | fep->clk_enet_out = NULL; | 2636 | fep->clk_enet_out = NULL; |
2627 | 2637 | ||
2638 | fep->ptp_clk_on = false; | ||
2639 | mutex_init(&fep->ptp_clk_mutex); | ||
2628 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); | 2640 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); |
2629 | fep->bufdesc_ex = | 2641 | fep->bufdesc_ex = |
2630 | pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; | 2642 | pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; |
@@ -2715,10 +2727,10 @@ fec_drv_remove(struct platform_device *pdev) | |||
2715 | struct net_device *ndev = platform_get_drvdata(pdev); | 2727 | struct net_device *ndev = platform_get_drvdata(pdev); |
2716 | struct fec_enet_private *fep = netdev_priv(ndev); | 2728 | struct fec_enet_private *fep = netdev_priv(ndev); |
2717 | 2729 | ||
2730 | cancel_delayed_work_sync(&fep->time_keep); | ||
2718 | cancel_work_sync(&fep->tx_timeout_work); | 2731 | cancel_work_sync(&fep->tx_timeout_work); |
2719 | unregister_netdev(ndev); | 2732 | unregister_netdev(ndev); |
2720 | fec_enet_mii_remove(fep); | 2733 | fec_enet_mii_remove(fep); |
2721 | del_timer_sync(&fep->time_keep); | ||
2722 | if (fep->reg_phy) | 2734 | if (fep->reg_phy) |
2723 | regulator_disable(fep->reg_phy); | 2735 | regulator_disable(fep->reg_phy); |
2724 | if (fep->ptp_clock) | 2736 | if (fep->ptp_clock) |
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 82386b29914a..cca3617a2321 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c | |||
@@ -245,12 +245,20 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, | |||
245 | u64 ns; | 245 | u64 ns; |
246 | unsigned long flags; | 246 | unsigned long flags; |
247 | 247 | ||
248 | mutex_lock(&fep->ptp_clk_mutex); | ||
249 | /* Check the ptp clock */ | ||
250 | if (!fep->ptp_clk_on) { | ||
251 | mutex_unlock(&fep->ptp_clk_mutex); | ||
252 | return -EINVAL; | ||
253 | } | ||
254 | |||
248 | ns = ts->tv_sec * 1000000000ULL; | 255 | ns = ts->tv_sec * 1000000000ULL; |
249 | ns += ts->tv_nsec; | 256 | ns += ts->tv_nsec; |
250 | 257 | ||
251 | spin_lock_irqsave(&fep->tmreg_lock, flags); | 258 | spin_lock_irqsave(&fep->tmreg_lock, flags); |
252 | timecounter_init(&fep->tc, &fep->cc, ns); | 259 | timecounter_init(&fep->tc, &fep->cc, ns); |
253 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | 260 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); |
261 | mutex_unlock(&fep->ptp_clk_mutex); | ||
254 | return 0; | 262 | return 0; |
255 | } | 263 | } |
256 | 264 | ||
@@ -338,17 +346,22 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr) | |||
338 | * fec_time_keep - call timecounter_read every second to avoid timer overrun | 346 | * fec_time_keep - call timecounter_read every second to avoid timer overrun |
339 | * because ENET just support 32bit counter, will timeout in 4s | 347 | * because ENET just support 32bit counter, will timeout in 4s |
340 | */ | 348 | */ |
341 | static void fec_time_keep(unsigned long _data) | 349 | static void fec_time_keep(struct work_struct *work) |
342 | { | 350 | { |
343 | struct fec_enet_private *fep = (struct fec_enet_private *)_data; | 351 | struct delayed_work *dwork = to_delayed_work(work); |
352 | struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); | ||
344 | u64 ns; | 353 | u64 ns; |
345 | unsigned long flags; | 354 | unsigned long flags; |
346 | 355 | ||
347 | spin_lock_irqsave(&fep->tmreg_lock, flags); | 356 | mutex_lock(&fep->ptp_clk_mutex); |
348 | ns = timecounter_read(&fep->tc); | 357 | if (fep->ptp_clk_on) { |
349 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | 358 | spin_lock_irqsave(&fep->tmreg_lock, flags); |
359 | ns = timecounter_read(&fep->tc); | ||
360 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | ||
361 | } | ||
362 | mutex_unlock(&fep->ptp_clk_mutex); | ||
350 | 363 | ||
351 | mod_timer(&fep->time_keep, jiffies + HZ); | 364 | schedule_delayed_work(&fep->time_keep, HZ); |
352 | } | 365 | } |
353 | 366 | ||
354 | /** | 367 | /** |
@@ -386,15 +399,13 @@ void fec_ptp_init(struct platform_device *pdev) | |||
386 | 399 | ||
387 | fec_ptp_start_cyclecounter(ndev); | 400 | fec_ptp_start_cyclecounter(ndev); |
388 | 401 | ||
389 | init_timer(&fep->time_keep); | 402 | INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); |
390 | fep->time_keep.data = (unsigned long)fep; | ||
391 | fep->time_keep.function = fec_time_keep; | ||
392 | fep->time_keep.expires = jiffies + HZ; | ||
393 | add_timer(&fep->time_keep); | ||
394 | 403 | ||
395 | fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); | 404 | fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); |
396 | if (IS_ERR(fep->ptp_clock)) { | 405 | if (IS_ERR(fep->ptp_clock)) { |
397 | fep->ptp_clock = NULL; | 406 | fep->ptp_clock = NULL; |
398 | pr_err("ptp_clock_register failed\n"); | 407 | pr_err("ptp_clock_register failed\n"); |
399 | } | 408 | } |
409 | |||
410 | schedule_delayed_work(&fep->time_keep, HZ); | ||
400 | } | 411 | } |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c9127562bd22..21978cc019e7 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -292,6 +292,18 @@ failure: | |||
292 | atomic_add(buffers_added, &(pool->available)); | 292 | atomic_add(buffers_added, &(pool->available)); |
293 | } | 293 | } |
294 | 294 | ||
295 | /* | ||
296 | * The final 8 bytes of the buffer list is a counter of frames dropped | ||
297 | * because there was not a buffer in the buffer list capable of holding | ||
298 | * the frame. | ||
299 | */ | ||
300 | static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) | ||
301 | { | ||
302 | __be64 *p = adapter->buffer_list_addr + 4096 - 8; | ||
303 | |||
304 | adapter->rx_no_buffer = be64_to_cpup(p); | ||
305 | } | ||
306 | |||
295 | /* replenish routine */ | 307 | /* replenish routine */ |
296 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | 308 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) |
297 | { | 309 | { |
@@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
307 | ibmveth_replenish_buffer_pool(adapter, pool); | 319 | ibmveth_replenish_buffer_pool(adapter, pool); |
308 | } | 320 | } |
309 | 321 | ||
310 | adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) + | 322 | ibmveth_update_rx_no_buffer(adapter); |
311 | 4096 - 8); | ||
312 | } | 323 | } |
313 | 324 | ||
314 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ | 325 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ |
@@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev) | |||
698 | 709 | ||
699 | free_irq(netdev->irq, netdev); | 710 | free_irq(netdev->irq, netdev); |
700 | 711 | ||
701 | adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) + | 712 | ibmveth_update_rx_no_buffer(adapter); |
702 | 4096 - 8); | ||
703 | 713 | ||
704 | ibmveth_cleanup(adapter); | 714 | ibmveth_cleanup(adapter); |
705 | 715 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index bb7fe98b3a6c..537b6216971d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c | |||
@@ -247,7 +247,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) | |||
247 | u32 prttsyn_stat; | 247 | u32 prttsyn_stat; |
248 | int n; | 248 | int n; |
249 | 249 | ||
250 | if (pf->flags & I40E_FLAG_PTP) | 250 | if (!(pf->flags & I40E_FLAG_PTP)) |
251 | return; | 251 | return; |
252 | 252 | ||
253 | prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); | 253 | prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 89672551dce9..3ac6a0d2f143 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
@@ -1003,11 +1003,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) | |||
1003 | static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, | 1003 | static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, |
1004 | u32 v_retval, u8 *msg, u16 msglen) | 1004 | u32 v_retval, u8 *msg, u16 msglen) |
1005 | { | 1005 | { |
1006 | struct i40e_pf *pf = vf->pf; | 1006 | struct i40e_pf *pf; |
1007 | struct i40e_hw *hw = &pf->hw; | 1007 | struct i40e_hw *hw; |
1008 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | 1008 | int abs_vf_id; |
1009 | i40e_status aq_ret; | 1009 | i40e_status aq_ret; |
1010 | 1010 | ||
1011 | /* validate the request */ | ||
1012 | if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) | ||
1013 | return -EINVAL; | ||
1014 | |||
1015 | pf = vf->pf; | ||
1016 | hw = &pf->hw; | ||
1017 | abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1018 | |||
1011 | /* single place to detect unsuccessful return values */ | 1019 | /* single place to detect unsuccessful return values */ |
1012 | if (v_retval) { | 1020 | if (v_retval) { |
1013 | vf->num_invalid_msgs++; | 1021 | vf->num_invalid_msgs++; |
@@ -1928,17 +1936,20 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, | |||
1928 | { | 1936 | { |
1929 | struct i40e_hw *hw = &pf->hw; | 1937 | struct i40e_hw *hw = &pf->hw; |
1930 | struct i40e_vf *vf = pf->vf; | 1938 | struct i40e_vf *vf = pf->vf; |
1931 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1932 | int i; | 1939 | int i; |
1933 | 1940 | ||
1934 | for (i = 0; i < pf->num_alloc_vfs; i++) { | 1941 | for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { |
1942 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1943 | /* Not all vfs are enabled so skip the ones that are not */ | ||
1944 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && | ||
1945 | !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) | ||
1946 | continue; | ||
1947 | |||
1935 | /* Ignore return value on purpose - a given VF may fail, but | 1948 | /* Ignore return value on purpose - a given VF may fail, but |
1936 | * we need to keep going and send to all of them | 1949 | * we need to keep going and send to all of them |
1937 | */ | 1950 | */ |
1938 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, | 1951 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, |
1939 | msg, msglen, NULL); | 1952 | msg, msglen, NULL); |
1940 | vf++; | ||
1941 | abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1942 | } | 1953 | } |
1943 | } | 1954 | } |
1944 | 1955 | ||
@@ -1954,12 +1965,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) | |||
1954 | struct i40e_hw *hw = &pf->hw; | 1965 | struct i40e_hw *hw = &pf->hw; |
1955 | struct i40e_vf *vf = pf->vf; | 1966 | struct i40e_vf *vf = pf->vf; |
1956 | struct i40e_link_status *ls = &pf->hw.phy.link_info; | 1967 | struct i40e_link_status *ls = &pf->hw.phy.link_info; |
1957 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1958 | int i; | 1968 | int i; |
1959 | 1969 | ||
1960 | pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; | 1970 | pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; |
1961 | pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; | 1971 | pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; |
1962 | for (i = 0; i < pf->num_alloc_vfs; i++) { | 1972 | for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { |
1973 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1963 | if (vf->link_forced) { | 1974 | if (vf->link_forced) { |
1964 | pfe.event_data.link_event.link_status = vf->link_up; | 1975 | pfe.event_data.link_event.link_status = vf->link_up; |
1965 | pfe.event_data.link_event.link_speed = | 1976 | pfe.event_data.link_event.link_speed = |
@@ -1972,8 +1983,6 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) | |||
1972 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, | 1983 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, |
1973 | 0, (u8 *)&pfe, sizeof(pfe), | 1984 | 0, (u8 *)&pfe, sizeof(pfe), |
1974 | NULL); | 1985 | NULL); |
1975 | vf++; | ||
1976 | abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1977 | } | 1986 | } |
1978 | } | 1987 | } |
1979 | 1988 | ||
@@ -2002,7 +2011,18 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) | |||
2002 | void i40e_vc_notify_vf_reset(struct i40e_vf *vf) | 2011 | void i40e_vc_notify_vf_reset(struct i40e_vf *vf) |
2003 | { | 2012 | { |
2004 | struct i40e_virtchnl_pf_event pfe; | 2013 | struct i40e_virtchnl_pf_event pfe; |
2005 | int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; | 2014 | int abs_vf_id; |
2015 | |||
2016 | /* validate the request */ | ||
2017 | if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) | ||
2018 | return; | ||
2019 | |||
2020 | /* verify if the VF is in either init or active before proceeding */ | ||
2021 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && | ||
2022 | !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) | ||
2023 | return; | ||
2024 | |||
2025 | abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; | ||
2006 | 2026 | ||
2007 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; | 2027 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; |
2008 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; | 2028 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 16039d1497b8..b84f5ea3d659 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -268,7 +268,7 @@ struct qlcnic_fdt { | |||
268 | u16 cksum; | 268 | u16 cksum; |
269 | u16 unused; | 269 | u16 unused; |
270 | u8 model[16]; | 270 | u8 model[16]; |
271 | u16 mfg_id; | 271 | u8 mfg_id; |
272 | u16 id; | 272 | u16 id; |
273 | u8 flag; | 273 | u8 flag; |
274 | u8 erase_cmd; | 274 | u8 erase_cmd; |
@@ -2362,6 +2362,19 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter) | |||
2362 | return QLC_DEFAULT_VNIC_COUNT; | 2362 | return QLC_DEFAULT_VNIC_COUNT; |
2363 | } | 2363 | } |
2364 | 2364 | ||
2365 | static inline void qlcnic_swap32_buffer(u32 *buffer, int count) | ||
2366 | { | ||
2367 | #if defined(__BIG_ENDIAN) | ||
2368 | u32 *tmp = buffer; | ||
2369 | int i; | ||
2370 | |||
2371 | for (i = 0; i < count; i++) { | ||
2372 | *tmp = swab32(*tmp); | ||
2373 | tmp++; | ||
2374 | } | ||
2375 | #endif | ||
2376 | } | ||
2377 | |||
2365 | #ifdef CONFIG_QLCNIC_HWMON | 2378 | #ifdef CONFIG_QLCNIC_HWMON |
2366 | void qlcnic_register_hwmon_dev(struct qlcnic_adapter *); | 2379 | void qlcnic_register_hwmon_dev(struct qlcnic_adapter *); |
2367 | void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *); | 2380 | void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index a4a4ec0b68f8..476e4998ef99 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -2603,7 +2603,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, | |||
2603 | } | 2603 | } |
2604 | 2604 | ||
2605 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, | 2605 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, |
2606 | (addr)); | 2606 | (addr & 0xFFFF0000)); |
2607 | 2607 | ||
2608 | range = flash_offset + (count * sizeof(u32)); | 2608 | range = flash_offset + (count * sizeof(u32)); |
2609 | /* Check if data is spread across multiple sectors */ | 2609 | /* Check if data is spread across multiple sectors */ |
@@ -2753,7 +2753,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter) | |||
2753 | ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, | 2753 | ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, |
2754 | (u8 *)&adapter->ahw->fdt, | 2754 | (u8 *)&adapter->ahw->fdt, |
2755 | count); | 2755 | count); |
2756 | 2756 | qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count); | |
2757 | qlcnic_83xx_unlock_flash(adapter); | 2757 | qlcnic_83xx_unlock_flash(adapter); |
2758 | return ret; | 2758 | return ret; |
2759 | } | 2759 | } |
@@ -2788,7 +2788,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter, | |||
2788 | 2788 | ||
2789 | addr1 = (sector_start_addr & 0xFF) << 16; | 2789 | addr1 = (sector_start_addr & 0xFF) << 16; |
2790 | addr2 = (sector_start_addr & 0xFF0000) >> 16; | 2790 | addr2 = (sector_start_addr & 0xFF0000) >> 16; |
2791 | reversed_addr = addr1 | addr2; | 2791 | reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00); |
2792 | 2792 | ||
2793 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, | 2793 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, |
2794 | reversed_addr); | 2794 | reversed_addr); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index f33559b72528..86783e1afcf7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -1378,31 +1378,45 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) | |||
1378 | { | 1378 | { |
1379 | struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; | 1379 | struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; |
1380 | const struct firmware *fw = fw_info->fw; | 1380 | const struct firmware *fw = fw_info->fw; |
1381 | u32 dest, *p_cache; | 1381 | u32 dest, *p_cache, *temp; |
1382 | int i, ret = -EIO; | 1382 | int i, ret = -EIO; |
1383 | __le32 *temp_le; | ||
1383 | u8 data[16]; | 1384 | u8 data[16]; |
1384 | size_t size; | 1385 | size_t size; |
1385 | u64 addr; | 1386 | u64 addr; |
1386 | 1387 | ||
1388 | temp = kzalloc(fw->size, GFP_KERNEL); | ||
1389 | if (!temp) { | ||
1390 | release_firmware(fw); | ||
1391 | fw_info->fw = NULL; | ||
1392 | return -ENOMEM; | ||
1393 | } | ||
1394 | |||
1395 | temp_le = (__le32 *)fw->data; | ||
1396 | |||
1397 | /* FW image in file is in little endian, swap the data to nullify | ||
1398 | * the effect of writel() operation on big endian platform. | ||
1399 | */ | ||
1400 | for (i = 0; i < fw->size / sizeof(u32); i++) | ||
1401 | temp[i] = __le32_to_cpu(temp_le[i]); | ||
1402 | |||
1387 | dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); | 1403 | dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); |
1388 | size = (fw->size & ~0xF); | 1404 | size = (fw->size & ~0xF); |
1389 | p_cache = (u32 *)fw->data; | 1405 | p_cache = temp; |
1390 | addr = (u64)dest; | 1406 | addr = (u64)dest; |
1391 | 1407 | ||
1392 | ret = qlcnic_ms_mem_write128(adapter, addr, | 1408 | ret = qlcnic_ms_mem_write128(adapter, addr, |
1393 | p_cache, size / 16); | 1409 | p_cache, size / 16); |
1394 | if (ret) { | 1410 | if (ret) { |
1395 | dev_err(&adapter->pdev->dev, "MS memory write failed\n"); | 1411 | dev_err(&adapter->pdev->dev, "MS memory write failed\n"); |
1396 | release_firmware(fw); | 1412 | goto exit; |
1397 | fw_info->fw = NULL; | ||
1398 | return -EIO; | ||
1399 | } | 1413 | } |
1400 | 1414 | ||
1401 | /* alignment check */ | 1415 | /* alignment check */ |
1402 | if (fw->size & 0xF) { | 1416 | if (fw->size & 0xF) { |
1403 | addr = dest + size; | 1417 | addr = dest + size; |
1404 | for (i = 0; i < (fw->size & 0xF); i++) | 1418 | for (i = 0; i < (fw->size & 0xF); i++) |
1405 | data[i] = fw->data[size + i]; | 1419 | data[i] = temp[size + i]; |
1406 | for (; i < 16; i++) | 1420 | for (; i < 16; i++) |
1407 | data[i] = 0; | 1421 | data[i] = 0; |
1408 | ret = qlcnic_ms_mem_write128(adapter, addr, | 1422 | ret = qlcnic_ms_mem_write128(adapter, addr, |
@@ -1410,15 +1424,16 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) | |||
1410 | if (ret) { | 1424 | if (ret) { |
1411 | dev_err(&adapter->pdev->dev, | 1425 | dev_err(&adapter->pdev->dev, |
1412 | "MS memory write failed\n"); | 1426 | "MS memory write failed\n"); |
1413 | release_firmware(fw); | 1427 | goto exit; |
1414 | fw_info->fw = NULL; | ||
1415 | return -EIO; | ||
1416 | } | 1428 | } |
1417 | } | 1429 | } |
1430 | |||
1431 | exit: | ||
1418 | release_firmware(fw); | 1432 | release_firmware(fw); |
1419 | fw_info->fw = NULL; | 1433 | fw_info->fw = NULL; |
1434 | kfree(temp); | ||
1420 | 1435 | ||
1421 | return 0; | 1436 | return ret; |
1422 | } | 1437 | } |
1423 | 1438 | ||
1424 | static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) | 1439 | static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index e46fc39d425d..c9f57fb84b9e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c | |||
@@ -47,15 +47,26 @@ struct qlcnic_common_entry_hdr { | |||
47 | u32 type; | 47 | u32 type; |
48 | u32 offset; | 48 | u32 offset; |
49 | u32 cap_size; | 49 | u32 cap_size; |
50 | #if defined(__LITTLE_ENDIAN) | ||
50 | u8 mask; | 51 | u8 mask; |
51 | u8 rsvd[2]; | 52 | u8 rsvd[2]; |
52 | u8 flags; | 53 | u8 flags; |
54 | #else | ||
55 | u8 flags; | ||
56 | u8 rsvd[2]; | ||
57 | u8 mask; | ||
58 | #endif | ||
53 | } __packed; | 59 | } __packed; |
54 | 60 | ||
55 | struct __crb { | 61 | struct __crb { |
56 | u32 addr; | 62 | u32 addr; |
63 | #if defined(__LITTLE_ENDIAN) | ||
57 | u8 stride; | 64 | u8 stride; |
58 | u8 rsvd1[3]; | 65 | u8 rsvd1[3]; |
66 | #else | ||
67 | u8 rsvd1[3]; | ||
68 | u8 stride; | ||
69 | #endif | ||
59 | u32 data_size; | 70 | u32 data_size; |
60 | u32 no_ops; | 71 | u32 no_ops; |
61 | u32 rsvd2[4]; | 72 | u32 rsvd2[4]; |
@@ -63,15 +74,28 @@ struct __crb { | |||
63 | 74 | ||
64 | struct __ctrl { | 75 | struct __ctrl { |
65 | u32 addr; | 76 | u32 addr; |
77 | #if defined(__LITTLE_ENDIAN) | ||
66 | u8 stride; | 78 | u8 stride; |
67 | u8 index_a; | 79 | u8 index_a; |
68 | u16 timeout; | 80 | u16 timeout; |
81 | #else | ||
82 | u16 timeout; | ||
83 | u8 index_a; | ||
84 | u8 stride; | ||
85 | #endif | ||
69 | u32 data_size; | 86 | u32 data_size; |
70 | u32 no_ops; | 87 | u32 no_ops; |
88 | #if defined(__LITTLE_ENDIAN) | ||
71 | u8 opcode; | 89 | u8 opcode; |
72 | u8 index_v; | 90 | u8 index_v; |
73 | u8 shl_val; | 91 | u8 shl_val; |
74 | u8 shr_val; | 92 | u8 shr_val; |
93 | #else | ||
94 | u8 shr_val; | ||
95 | u8 shl_val; | ||
96 | u8 index_v; | ||
97 | u8 opcode; | ||
98 | #endif | ||
75 | u32 val1; | 99 | u32 val1; |
76 | u32 val2; | 100 | u32 val2; |
77 | u32 val3; | 101 | u32 val3; |
@@ -79,16 +103,27 @@ struct __ctrl { | |||
79 | 103 | ||
80 | struct __cache { | 104 | struct __cache { |
81 | u32 addr; | 105 | u32 addr; |
106 | #if defined(__LITTLE_ENDIAN) | ||
82 | u16 stride; | 107 | u16 stride; |
83 | u16 init_tag_val; | 108 | u16 init_tag_val; |
109 | #else | ||
110 | u16 init_tag_val; | ||
111 | u16 stride; | ||
112 | #endif | ||
84 | u32 size; | 113 | u32 size; |
85 | u32 no_ops; | 114 | u32 no_ops; |
86 | u32 ctrl_addr; | 115 | u32 ctrl_addr; |
87 | u32 ctrl_val; | 116 | u32 ctrl_val; |
88 | u32 read_addr; | 117 | u32 read_addr; |
118 | #if defined(__LITTLE_ENDIAN) | ||
89 | u8 read_addr_stride; | 119 | u8 read_addr_stride; |
90 | u8 read_addr_num; | 120 | u8 read_addr_num; |
91 | u8 rsvd1[2]; | 121 | u8 rsvd1[2]; |
122 | #else | ||
123 | u8 rsvd1[2]; | ||
124 | u8 read_addr_num; | ||
125 | u8 read_addr_stride; | ||
126 | #endif | ||
92 | } __packed; | 127 | } __packed; |
93 | 128 | ||
94 | struct __ocm { | 129 | struct __ocm { |
@@ -122,23 +157,39 @@ struct __mux { | |||
122 | 157 | ||
123 | struct __queue { | 158 | struct __queue { |
124 | u32 sel_addr; | 159 | u32 sel_addr; |
160 | #if defined(__LITTLE_ENDIAN) | ||
125 | u16 stride; | 161 | u16 stride; |
126 | u8 rsvd[2]; | 162 | u8 rsvd[2]; |
163 | #else | ||
164 | u8 rsvd[2]; | ||
165 | u16 stride; | ||
166 | #endif | ||
127 | u32 size; | 167 | u32 size; |
128 | u32 no_ops; | 168 | u32 no_ops; |
129 | u8 rsvd2[8]; | 169 | u8 rsvd2[8]; |
130 | u32 read_addr; | 170 | u32 read_addr; |
171 | #if defined(__LITTLE_ENDIAN) | ||
131 | u8 read_addr_stride; | 172 | u8 read_addr_stride; |
132 | u8 read_addr_cnt; | 173 | u8 read_addr_cnt; |
133 | u8 rsvd3[2]; | 174 | u8 rsvd3[2]; |
175 | #else | ||
176 | u8 rsvd3[2]; | ||
177 | u8 read_addr_cnt; | ||
178 | u8 read_addr_stride; | ||
179 | #endif | ||
134 | } __packed; | 180 | } __packed; |
135 | 181 | ||
136 | struct __pollrd { | 182 | struct __pollrd { |
137 | u32 sel_addr; | 183 | u32 sel_addr; |
138 | u32 read_addr; | 184 | u32 read_addr; |
139 | u32 sel_val; | 185 | u32 sel_val; |
186 | #if defined(__LITTLE_ENDIAN) | ||
140 | u16 sel_val_stride; | 187 | u16 sel_val_stride; |
141 | u16 no_ops; | 188 | u16 no_ops; |
189 | #else | ||
190 | u16 no_ops; | ||
191 | u16 sel_val_stride; | ||
192 | #endif | ||
142 | u32 poll_wait; | 193 | u32 poll_wait; |
143 | u32 poll_mask; | 194 | u32 poll_mask; |
144 | u32 data_size; | 195 | u32 data_size; |
@@ -153,9 +204,15 @@ struct __mux2 { | |||
153 | u32 no_ops; | 204 | u32 no_ops; |
154 | u32 sel_val_mask; | 205 | u32 sel_val_mask; |
155 | u32 read_addr; | 206 | u32 read_addr; |
207 | #if defined(__LITTLE_ENDIAN) | ||
156 | u8 sel_val_stride; | 208 | u8 sel_val_stride; |
157 | u8 data_size; | 209 | u8 data_size; |
158 | u8 rsvd[2]; | 210 | u8 rsvd[2]; |
211 | #else | ||
212 | u8 rsvd[2]; | ||
213 | u8 data_size; | ||
214 | u8 sel_val_stride; | ||
215 | #endif | ||
159 | } __packed; | 216 | } __packed; |
160 | 217 | ||
161 | struct __pollrdmwr { | 218 | struct __pollrdmwr { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index f5786d5792df..59a721fba018 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -280,6 +280,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, | |||
280 | if (ret != 0) | 280 | if (ret != 0) |
281 | return ret; | 281 | return ret; |
282 | qlcnic_read_crb(adapter, buf, offset, size); | 282 | qlcnic_read_crb(adapter, buf, offset, size); |
283 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
283 | 284 | ||
284 | return size; | 285 | return size; |
285 | } | 286 | } |
@@ -296,6 +297,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, | |||
296 | if (ret != 0) | 297 | if (ret != 0) |
297 | return ret; | 298 | return ret; |
298 | 299 | ||
300 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
299 | qlcnic_write_crb(adapter, buf, offset, size); | 301 | qlcnic_write_crb(adapter, buf, offset, size); |
300 | return size; | 302 | return size; |
301 | } | 303 | } |
@@ -329,6 +331,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, | |||
329 | return -EIO; | 331 | return -EIO; |
330 | 332 | ||
331 | memcpy(buf, &data, size); | 333 | memcpy(buf, &data, size); |
334 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
332 | 335 | ||
333 | return size; | 336 | return size; |
334 | } | 337 | } |
@@ -346,6 +349,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, | |||
346 | if (ret != 0) | 349 | if (ret != 0) |
347 | return ret; | 350 | return ret; |
348 | 351 | ||
352 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
349 | memcpy(&data, buf, size); | 353 | memcpy(&data, buf, size); |
350 | 354 | ||
351 | if (qlcnic_pci_mem_write_2M(adapter, offset, data)) | 355 | if (qlcnic_pci_mem_write_2M(adapter, offset, data)) |
@@ -412,6 +416,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, | |||
412 | if (rem) | 416 | if (rem) |
413 | return QL_STATUS_INVALID_PARAM; | 417 | return QL_STATUS_INVALID_PARAM; |
414 | 418 | ||
419 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
415 | pm_cfg = (struct qlcnic_pm_func_cfg *)buf; | 420 | pm_cfg = (struct qlcnic_pm_func_cfg *)buf; |
416 | ret = validate_pm_config(adapter, pm_cfg, count); | 421 | ret = validate_pm_config(adapter, pm_cfg, count); |
417 | 422 | ||
@@ -474,6 +479,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, | |||
474 | pm_cfg[pci_func].dest_npar = 0; | 479 | pm_cfg[pci_func].dest_npar = 0; |
475 | pm_cfg[pci_func].pci_func = i; | 480 | pm_cfg[pci_func].pci_func = i; |
476 | } | 481 | } |
482 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
477 | return size; | 483 | return size; |
478 | } | 484 | } |
479 | 485 | ||
@@ -555,6 +561,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, | |||
555 | if (rem) | 561 | if (rem) |
556 | return QL_STATUS_INVALID_PARAM; | 562 | return QL_STATUS_INVALID_PARAM; |
557 | 563 | ||
564 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
558 | esw_cfg = (struct qlcnic_esw_func_cfg *)buf; | 565 | esw_cfg = (struct qlcnic_esw_func_cfg *)buf; |
559 | ret = validate_esw_config(adapter, esw_cfg, count); | 566 | ret = validate_esw_config(adapter, esw_cfg, count); |
560 | if (ret) | 567 | if (ret) |
@@ -649,6 +656,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, | |||
649 | if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) | 656 | if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) |
650 | return QL_STATUS_INVALID_PARAM; | 657 | return QL_STATUS_INVALID_PARAM; |
651 | } | 658 | } |
659 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
652 | return size; | 660 | return size; |
653 | } | 661 | } |
654 | 662 | ||
@@ -688,6 +696,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, | |||
688 | if (rem) | 696 | if (rem) |
689 | return QL_STATUS_INVALID_PARAM; | 697 | return QL_STATUS_INVALID_PARAM; |
690 | 698 | ||
699 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
691 | np_cfg = (struct qlcnic_npar_func_cfg *)buf; | 700 | np_cfg = (struct qlcnic_npar_func_cfg *)buf; |
692 | ret = validate_npar_config(adapter, np_cfg, count); | 701 | ret = validate_npar_config(adapter, np_cfg, count); |
693 | if (ret) | 702 | if (ret) |
@@ -759,6 +768,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, | |||
759 | np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; | 768 | np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; |
760 | np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; | 769 | np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; |
761 | } | 770 | } |
771 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
762 | return size; | 772 | return size; |
763 | } | 773 | } |
764 | 774 | ||
@@ -916,6 +926,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, | |||
916 | 926 | ||
917 | pci_cfg = (struct qlcnic_pci_func_cfg *)buf; | 927 | pci_cfg = (struct qlcnic_pci_func_cfg *)buf; |
918 | count = size / sizeof(struct qlcnic_pci_func_cfg); | 928 | count = size / sizeof(struct qlcnic_pci_func_cfg); |
929 | qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32)); | ||
919 | for (i = 0; i < count; i++) { | 930 | for (i = 0; i < count; i++) { |
920 | pci_cfg[i].pci_func = pci_info[i].id; | 931 | pci_cfg[i].pci_func = pci_info[i].id; |
921 | pci_cfg[i].func_type = pci_info[i].type; | 932 | pci_cfg[i].func_type = pci_info[i].type; |
@@ -969,6 +980,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, | |||
969 | } | 980 | } |
970 | 981 | ||
971 | qlcnic_83xx_unlock_flash(adapter); | 982 | qlcnic_83xx_unlock_flash(adapter); |
983 | qlcnic_swap32_buffer((u32 *)p_read_buf, count); | ||
972 | memcpy(buf, p_read_buf, size); | 984 | memcpy(buf, p_read_buf, size); |
973 | kfree(p_read_buf); | 985 | kfree(p_read_buf); |
974 | 986 | ||
@@ -986,9 +998,10 @@ static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter, | |||
986 | if (!p_cache) | 998 | if (!p_cache) |
987 | return -ENOMEM; | 999 | return -ENOMEM; |
988 | 1000 | ||
1001 | count = size / sizeof(u32); | ||
1002 | qlcnic_swap32_buffer((u32 *)buf, count); | ||
989 | memcpy(p_cache, buf, size); | 1003 | memcpy(p_cache, buf, size); |
990 | p_src = p_cache; | 1004 | p_src = p_cache; |
991 | count = size / sizeof(u32); | ||
992 | 1005 | ||
993 | if (qlcnic_83xx_lock_flash(adapter) != 0) { | 1006 | if (qlcnic_83xx_lock_flash(adapter) != 0) { |
994 | kfree(p_cache); | 1007 | kfree(p_cache); |
@@ -1053,6 +1066,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter, | |||
1053 | if (!p_cache) | 1066 | if (!p_cache) |
1054 | return -ENOMEM; | 1067 | return -ENOMEM; |
1055 | 1068 | ||
1069 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
1056 | memcpy(p_cache, buf, size); | 1070 | memcpy(p_cache, buf, size); |
1057 | p_src = p_cache; | 1071 | p_src = p_cache; |
1058 | count = size / sizeof(u32); | 1072 | count = size / sizeof(u32); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 60e4ca01ccbb..a96955597755 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -739,7 +739,10 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
739 | struct macvlan_dev *vlan = netdev_priv(dev); | 739 | struct macvlan_dev *vlan = netdev_priv(dev); |
740 | int err = -EINVAL; | 740 | int err = -EINVAL; |
741 | 741 | ||
742 | if (!vlan->port->passthru) | 742 | /* Support unicast filter only on passthru devices. |
743 | * Multicast filter should be allowed on all devices. | ||
744 | */ | ||
745 | if (!vlan->port->passthru && is_unicast_ether_addr(addr)) | ||
743 | return -EOPNOTSUPP; | 746 | return -EOPNOTSUPP; |
744 | 747 | ||
745 | if (flags & NLM_F_REPLACE) | 748 | if (flags & NLM_F_REPLACE) |
@@ -760,7 +763,10 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], | |||
760 | struct macvlan_dev *vlan = netdev_priv(dev); | 763 | struct macvlan_dev *vlan = netdev_priv(dev); |
761 | int err = -EINVAL; | 764 | int err = -EINVAL; |
762 | 765 | ||
763 | if (!vlan->port->passthru) | 766 | /* Support unicast filter only on passthru devices. |
767 | * Multicast filter should be allowed on all devices. | ||
768 | */ | ||
769 | if (!vlan->port->passthru && is_unicast_ether_addr(addr)) | ||
764 | return -EOPNOTSUPP; | 770 | return -EOPNOTSUPP; |
765 | 771 | ||
766 | if (is_unicast_ether_addr(addr)) | 772 | if (is_unicast_ether_addr(addr)) |
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 526b94cea569..fdce1ea28790 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c | |||
@@ -157,6 +157,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) | |||
157 | return bcm7xxx_28nm_afe_config_init(phydev); | 157 | return bcm7xxx_28nm_afe_config_init(phydev); |
158 | } | 158 | } |
159 | 159 | ||
160 | static int bcm7xxx_28nm_resume(struct phy_device *phydev) | ||
161 | { | ||
162 | int ret; | ||
163 | |||
164 | /* Re-apply workarounds coming out suspend/resume */ | ||
165 | ret = bcm7xxx_28nm_config_init(phydev); | ||
166 | if (ret) | ||
167 | return ret; | ||
168 | |||
169 | /* 28nm Gigabit PHYs come out of reset without any half-duplex | ||
170 | * or "hub" compliant advertised mode, fix that. This does not | ||
171 | * cause any problems with the PHY library since genphy_config_aneg() | ||
172 | * gracefully handles auto-negotiated and forced modes. | ||
173 | */ | ||
174 | return genphy_config_aneg(phydev); | ||
175 | } | ||
176 | |||
160 | static int phy_set_clr_bits(struct phy_device *dev, int location, | 177 | static int phy_set_clr_bits(struct phy_device *dev, int location, |
161 | int set_mask, int clr_mask) | 178 | int set_mask, int clr_mask) |
162 | { | 179 | { |
@@ -212,7 +229,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev) | |||
212 | } | 229 | } |
213 | 230 | ||
214 | /* Workaround for putting the PHY in IDDQ mode, required | 231 | /* Workaround for putting the PHY in IDDQ mode, required |
215 | * for all BCM7XXX PHYs | 232 | * for all BCM7XXX 40nm and 65nm PHYs |
216 | */ | 233 | */ |
217 | static int bcm7xxx_suspend(struct phy_device *phydev) | 234 | static int bcm7xxx_suspend(struct phy_device *phydev) |
218 | { | 235 | { |
@@ -257,8 +274,7 @@ static struct phy_driver bcm7xxx_driver[] = { | |||
257 | .config_init = bcm7xxx_28nm_afe_config_init, | 274 | .config_init = bcm7xxx_28nm_afe_config_init, |
258 | .config_aneg = genphy_config_aneg, | 275 | .config_aneg = genphy_config_aneg, |
259 | .read_status = genphy_read_status, | 276 | .read_status = genphy_read_status, |
260 | .suspend = bcm7xxx_suspend, | 277 | .resume = bcm7xxx_28nm_resume, |
261 | .resume = bcm7xxx_28nm_afe_config_init, | ||
262 | .driver = { .owner = THIS_MODULE }, | 278 | .driver = { .owner = THIS_MODULE }, |
263 | }, { | 279 | }, { |
264 | .phy_id = PHY_ID_BCM7439, | 280 | .phy_id = PHY_ID_BCM7439, |
@@ -270,8 +286,7 @@ static struct phy_driver bcm7xxx_driver[] = { | |||
270 | .config_init = bcm7xxx_28nm_afe_config_init, | 286 | .config_init = bcm7xxx_28nm_afe_config_init, |
271 | .config_aneg = genphy_config_aneg, | 287 | .config_aneg = genphy_config_aneg, |
272 | .read_status = genphy_read_status, | 288 | .read_status = genphy_read_status, |
273 | .suspend = bcm7xxx_suspend, | 289 | .resume = bcm7xxx_28nm_resume, |
274 | .resume = bcm7xxx_28nm_afe_config_init, | ||
275 | .driver = { .owner = THIS_MODULE }, | 290 | .driver = { .owner = THIS_MODULE }, |
276 | }, { | 291 | }, { |
277 | .phy_id = PHY_ID_BCM7445, | 292 | .phy_id = PHY_ID_BCM7445, |
@@ -283,21 +298,7 @@ static struct phy_driver bcm7xxx_driver[] = { | |||
283 | .config_init = bcm7xxx_28nm_config_init, | 298 | .config_init = bcm7xxx_28nm_config_init, |
284 | .config_aneg = genphy_config_aneg, | 299 | .config_aneg = genphy_config_aneg, |
285 | .read_status = genphy_read_status, | 300 | .read_status = genphy_read_status, |
286 | .suspend = bcm7xxx_suspend, | 301 | .resume = bcm7xxx_28nm_afe_config_init, |
287 | .resume = bcm7xxx_28nm_config_init, | ||
288 | .driver = { .owner = THIS_MODULE }, | ||
289 | }, { | ||
290 | .name = "Broadcom BCM7XXX 28nm", | ||
291 | .phy_id = PHY_ID_BCM7XXX_28, | ||
292 | .phy_id_mask = PHY_BCM_OUI_MASK, | ||
293 | .features = PHY_GBIT_FEATURES | | ||
294 | SUPPORTED_Pause | SUPPORTED_Asym_Pause, | ||
295 | .flags = PHY_IS_INTERNAL, | ||
296 | .config_init = bcm7xxx_28nm_config_init, | ||
297 | .config_aneg = genphy_config_aneg, | ||
298 | .read_status = genphy_read_status, | ||
299 | .suspend = bcm7xxx_suspend, | ||
300 | .resume = bcm7xxx_28nm_config_init, | ||
301 | .driver = { .owner = THIS_MODULE }, | 302 | .driver = { .owner = THIS_MODULE }, |
302 | }, { | 303 | }, { |
303 | .phy_id = PHY_BCM_OUI_4, | 304 | .phy_id = PHY_BCM_OUI_4, |
@@ -331,7 +332,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { | |||
331 | { PHY_ID_BCM7366, 0xfffffff0, }, | 332 | { PHY_ID_BCM7366, 0xfffffff0, }, |
332 | { PHY_ID_BCM7439, 0xfffffff0, }, | 333 | { PHY_ID_BCM7439, 0xfffffff0, }, |
333 | { PHY_ID_BCM7445, 0xfffffff0, }, | 334 | { PHY_ID_BCM7445, 0xfffffff0, }, |
334 | { PHY_ID_BCM7XXX_28, 0xfffffc00 }, | ||
335 | { PHY_BCM_OUI_4, 0xffff0000 }, | 335 | { PHY_BCM_OUI_4, 0xffff0000 }, |
336 | { PHY_BCM_OUI_5, 0xffffff00 }, | 336 | { PHY_BCM_OUI_5, 0xffffff00 }, |
337 | { } | 337 | { } |
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 180c49479c42..a4b08198fb9f 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c | |||
@@ -43,6 +43,22 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) | |||
43 | 43 | ||
44 | static int smsc_phy_config_init(struct phy_device *phydev) | 44 | static int smsc_phy_config_init(struct phy_device *phydev) |
45 | { | 45 | { |
46 | int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); | ||
47 | |||
48 | if (rc < 0) | ||
49 | return rc; | ||
50 | |||
51 | /* Enable energy detect mode for this SMSC Transceivers */ | ||
52 | rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, | ||
53 | rc | MII_LAN83C185_EDPWRDOWN); | ||
54 | if (rc < 0) | ||
55 | return rc; | ||
56 | |||
57 | return smsc_phy_ack_interrupt(phydev); | ||
58 | } | ||
59 | |||
60 | static int smsc_phy_reset(struct phy_device *phydev) | ||
61 | { | ||
46 | int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES); | 62 | int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES); |
47 | if (rc < 0) | 63 | if (rc < 0) |
48 | return rc; | 64 | return rc; |
@@ -66,18 +82,7 @@ static int smsc_phy_config_init(struct phy_device *phydev) | |||
66 | rc = phy_read(phydev, MII_BMCR); | 82 | rc = phy_read(phydev, MII_BMCR); |
67 | } while (rc & BMCR_RESET); | 83 | } while (rc & BMCR_RESET); |
68 | } | 84 | } |
69 | 85 | return 0; | |
70 | rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); | ||
71 | if (rc < 0) | ||
72 | return rc; | ||
73 | |||
74 | /* Enable energy detect mode for this SMSC Transceivers */ | ||
75 | rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, | ||
76 | rc | MII_LAN83C185_EDPWRDOWN); | ||
77 | if (rc < 0) | ||
78 | return rc; | ||
79 | |||
80 | return smsc_phy_ack_interrupt (phydev); | ||
81 | } | 86 | } |
82 | 87 | ||
83 | static int lan911x_config_init(struct phy_device *phydev) | 88 | static int lan911x_config_init(struct phy_device *phydev) |
@@ -142,6 +147,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
142 | .config_aneg = genphy_config_aneg, | 147 | .config_aneg = genphy_config_aneg, |
143 | .read_status = genphy_read_status, | 148 | .read_status = genphy_read_status, |
144 | .config_init = smsc_phy_config_init, | 149 | .config_init = smsc_phy_config_init, |
150 | .soft_reset = smsc_phy_reset, | ||
145 | 151 | ||
146 | /* IRQ related */ | 152 | /* IRQ related */ |
147 | .ack_interrupt = smsc_phy_ack_interrupt, | 153 | .ack_interrupt = smsc_phy_ack_interrupt, |
@@ -164,6 +170,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
164 | .config_aneg = genphy_config_aneg, | 170 | .config_aneg = genphy_config_aneg, |
165 | .read_status = genphy_read_status, | 171 | .read_status = genphy_read_status, |
166 | .config_init = smsc_phy_config_init, | 172 | .config_init = smsc_phy_config_init, |
173 | .soft_reset = smsc_phy_reset, | ||
167 | 174 | ||
168 | /* IRQ related */ | 175 | /* IRQ related */ |
169 | .ack_interrupt = smsc_phy_ack_interrupt, | 176 | .ack_interrupt = smsc_phy_ack_interrupt, |
@@ -186,6 +193,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
186 | .config_aneg = genphy_config_aneg, | 193 | .config_aneg = genphy_config_aneg, |
187 | .read_status = genphy_read_status, | 194 | .read_status = genphy_read_status, |
188 | .config_init = smsc_phy_config_init, | 195 | .config_init = smsc_phy_config_init, |
196 | .soft_reset = smsc_phy_reset, | ||
189 | 197 | ||
190 | /* IRQ related */ | 198 | /* IRQ related */ |
191 | .ack_interrupt = smsc_phy_ack_interrupt, | 199 | .ack_interrupt = smsc_phy_ack_interrupt, |
@@ -230,6 +238,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
230 | .config_aneg = genphy_config_aneg, | 238 | .config_aneg = genphy_config_aneg, |
231 | .read_status = lan87xx_read_status, | 239 | .read_status = lan87xx_read_status, |
232 | .config_init = smsc_phy_config_init, | 240 | .config_init = smsc_phy_config_init, |
241 | .soft_reset = smsc_phy_reset, | ||
233 | 242 | ||
234 | /* IRQ related */ | 243 | /* IRQ related */ |
235 | .ack_interrupt = smsc_phy_ack_interrupt, | 244 | .ack_interrupt = smsc_phy_ack_interrupt, |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 6f76277baf39..61219b9b3445 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
@@ -16,7 +16,6 @@ | |||
16 | #define PHY_ID_BCM7366 0x600d8490 | 16 | #define PHY_ID_BCM7366 0x600d8490 |
17 | #define PHY_ID_BCM7439 0x600d8480 | 17 | #define PHY_ID_BCM7439 0x600d8480 |
18 | #define PHY_ID_BCM7445 0x600d8510 | 18 | #define PHY_ID_BCM7445 0x600d8510 |
19 | #define PHY_ID_BCM7XXX_28 0x600d8400 | ||
20 | 19 | ||
21 | #define PHY_BCM_OUI_MASK 0xfffffc00 | 20 | #define PHY_BCM_OUI_MASK 0xfffffc00 |
22 | #define PHY_BCM_OUI_1 0x00206000 | 21 | #define PHY_BCM_OUI_1 0x00206000 |
diff --git a/net/atm/lec.c b/net/atm/lec.c index e4853b50cf40..4b98f897044a 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -410,9 +410,11 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
410 | priv->lane2_ops = NULL; | 410 | priv->lane2_ops = NULL; |
411 | if (priv->lane_version > 1) | 411 | if (priv->lane_version > 1) |
412 | priv->lane2_ops = &lane2_ops; | 412 | priv->lane2_ops = &lane2_ops; |
413 | rtnl_lock(); | ||
413 | if (dev_set_mtu(dev, mesg->content.config.mtu)) | 414 | if (dev_set_mtu(dev, mesg->content.config.mtu)) |
414 | pr_info("%s: change_mtu to %d failed\n", | 415 | pr_info("%s: change_mtu to %d failed\n", |
415 | dev->name, mesg->content.config.mtu); | 416 | dev->name, mesg->content.config.mtu); |
417 | rtnl_unlock(); | ||
416 | priv->is_proxy = mesg->content.config.is_proxy; | 418 | priv->is_proxy = mesg->content.config.is_proxy; |
417 | break; | 419 | break; |
418 | case l_flush_tran_id: | 420 | case l_flush_tran_id: |
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 52c43f904220..fc1835c6bb40 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
@@ -188,7 +188,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, | |||
188 | 188 | ||
189 | /* Reached the end of the list, so insert after 'frag_entry_last'. */ | 189 | /* Reached the end of the list, so insert after 'frag_entry_last'. */ |
190 | if (likely(frag_entry_last)) { | 190 | if (likely(frag_entry_last)) { |
191 | hlist_add_behind(&frag_entry_last->list, &frag_entry_new->list); | 191 | hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list); |
192 | chain->size += skb->len - hdr_size; | 192 | chain->size += skb->len - hdr_size; |
193 | chain->timestamp = jiffies; | 193 | chain->timestamp = jiffies; |
194 | ret = true; | 194 | ret = true; |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index cb4459bd1d29..76b7f5ee8f4c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -643,7 +643,7 @@ static int fib6_commit_metrics(struct dst_entry *dst, | |||
643 | if (dst->flags & DST_HOST) { | 643 | if (dst->flags & DST_HOST) { |
644 | mp = dst_metrics_write_ptr(dst); | 644 | mp = dst_metrics_write_ptr(dst); |
645 | } else { | 645 | } else { |
646 | mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); | 646 | mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); |
647 | if (!mp) | 647 | if (!mp) |
648 | return -ENOMEM; | 648 | return -ENOMEM; |
649 | dst_init_metrics(dst, mp, 0); | 649 | dst_init_metrics(dst, mp, 0); |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index fe5cda0deb39..5231652a95d9 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
42 | 42 | ||
43 | static int make_writable(struct sk_buff *skb, int write_len) | 43 | static int make_writable(struct sk_buff *skb, int write_len) |
44 | { | 44 | { |
45 | if (!pskb_may_pull(skb, write_len)) | ||
46 | return -ENOMEM; | ||
47 | |||
45 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) | 48 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) |
46 | return 0; | 49 | return 0; |
47 | 50 | ||
@@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | |||
70 | 73 | ||
71 | vlan_set_encap_proto(skb, vhdr); | 74 | vlan_set_encap_proto(skb, vhdr); |
72 | skb->mac_header += VLAN_HLEN; | 75 | skb->mac_header += VLAN_HLEN; |
76 | if (skb_network_offset(skb) < ETH_HLEN) | ||
77 | skb_set_network_header(skb, ETH_HLEN); | ||
73 | skb_reset_mac_len(skb); | 78 | skb_reset_mac_len(skb); |
74 | 79 | ||
75 | return 0; | 80 | return 0; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8d9f8042705a..93896d2092f6 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -632,6 +632,7 @@ static void init_prb_bdqc(struct packet_sock *po, | |||
632 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); | 632 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); |
633 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; | 633 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; |
634 | 634 | ||
635 | p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); | ||
635 | prb_init_ft_ops(p1, req_u); | 636 | prb_init_ft_ops(p1, req_u); |
636 | prb_setup_retire_blk_timer(po, tx_ring); | 637 | prb_setup_retire_blk_timer(po, tx_ring); |
637 | prb_open_block(p1, pbd); | 638 | prb_open_block(p1, pbd); |
@@ -1942,6 +1943,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1942 | if ((int)snaplen < 0) | 1943 | if ((int)snaplen < 0) |
1943 | snaplen = 0; | 1944 | snaplen = 0; |
1944 | } | 1945 | } |
1946 | } else if (unlikely(macoff + snaplen > | ||
1947 | GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { | ||
1948 | u32 nval; | ||
1949 | |||
1950 | nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; | ||
1951 | pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", | ||
1952 | snaplen, nval, macoff); | ||
1953 | snaplen = nval; | ||
1954 | if (unlikely((int)snaplen < 0)) { | ||
1955 | snaplen = 0; | ||
1956 | macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; | ||
1957 | } | ||
1945 | } | 1958 | } |
1946 | spin_lock(&sk->sk_receive_queue.lock); | 1959 | spin_lock(&sk->sk_receive_queue.lock); |
1947 | h.raw = packet_current_rx_frame(po, skb, | 1960 | h.raw = packet_current_rx_frame(po, skb, |
@@ -3783,6 +3796,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
3783 | goto out; | 3796 | goto out; |
3784 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) | 3797 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
3785 | goto out; | 3798 | goto out; |
3799 | if (po->tp_version >= TPACKET_V3 && | ||
3800 | (int)(req->tp_block_size - | ||
3801 | BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) | ||
3802 | goto out; | ||
3786 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + | 3803 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
3787 | po->tp_reserve)) | 3804 | po->tp_reserve)) |
3788 | goto out; | 3805 | goto out; |
diff --git a/net/packet/internal.h b/net/packet/internal.h index eb9580a6b25f..cdddf6a30399 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h | |||
@@ -29,6 +29,7 @@ struct tpacket_kbdq_core { | |||
29 | char *pkblk_start; | 29 | char *pkblk_start; |
30 | char *pkblk_end; | 30 | char *pkblk_end; |
31 | int kblk_size; | 31 | int kblk_size; |
32 | unsigned int max_frame_len; | ||
32 | unsigned int knum_blocks; | 33 | unsigned int knum_blocks; |
33 | uint64_t knxt_seq_num; | 34 | uint64_t knxt_seq_num; |
34 | char *prev; | 35 | char *prev; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index ead526467cca..762a04bb8f6d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -159,7 +159,6 @@ struct cbq_sched_data { | |||
159 | struct cbq_class *tx_borrowed; | 159 | struct cbq_class *tx_borrowed; |
160 | int tx_len; | 160 | int tx_len; |
161 | psched_time_t now; /* Cached timestamp */ | 161 | psched_time_t now; /* Cached timestamp */ |
162 | psched_time_t now_rt; /* Cached real time */ | ||
163 | unsigned int pmask; | 162 | unsigned int pmask; |
164 | 163 | ||
165 | struct hrtimer delay_timer; | 164 | struct hrtimer delay_timer; |
@@ -353,12 +352,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |||
353 | int toplevel = q->toplevel; | 352 | int toplevel = q->toplevel; |
354 | 353 | ||
355 | if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { | 354 | if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { |
356 | psched_time_t now; | 355 | psched_time_t now = psched_get_time(); |
357 | psched_tdiff_t incr; | ||
358 | |||
359 | now = psched_get_time(); | ||
360 | incr = now - q->now_rt; | ||
361 | now = q->now + incr; | ||
362 | 356 | ||
363 | do { | 357 | do { |
364 | if (cl->undertime < now) { | 358 | if (cl->undertime < now) { |
@@ -700,8 +694,13 @@ cbq_update(struct cbq_sched_data *q) | |||
700 | struct cbq_class *this = q->tx_class; | 694 | struct cbq_class *this = q->tx_class; |
701 | struct cbq_class *cl = this; | 695 | struct cbq_class *cl = this; |
702 | int len = q->tx_len; | 696 | int len = q->tx_len; |
697 | psched_time_t now; | ||
703 | 698 | ||
704 | q->tx_class = NULL; | 699 | q->tx_class = NULL; |
700 | /* Time integrator. We calculate EOS time | ||
701 | * by adding expected packet transmission time. | ||
702 | */ | ||
703 | now = q->now + L2T(&q->link, len); | ||
705 | 704 | ||
706 | for ( ; cl; cl = cl->share) { | 705 | for ( ; cl; cl = cl->share) { |
707 | long avgidle = cl->avgidle; | 706 | long avgidle = cl->avgidle; |
@@ -717,7 +716,7 @@ cbq_update(struct cbq_sched_data *q) | |||
717 | * idle = (now - last) - last_pktlen/rate | 716 | * idle = (now - last) - last_pktlen/rate |
718 | */ | 717 | */ |
719 | 718 | ||
720 | idle = q->now - cl->last; | 719 | idle = now - cl->last; |
721 | if ((unsigned long)idle > 128*1024*1024) { | 720 | if ((unsigned long)idle > 128*1024*1024) { |
722 | avgidle = cl->maxidle; | 721 | avgidle = cl->maxidle; |
723 | } else { | 722 | } else { |
@@ -761,7 +760,7 @@ cbq_update(struct cbq_sched_data *q) | |||
761 | idle -= L2T(&q->link, len); | 760 | idle -= L2T(&q->link, len); |
762 | idle += L2T(cl, len); | 761 | idle += L2T(cl, len); |
763 | 762 | ||
764 | cl->undertime = q->now + idle; | 763 | cl->undertime = now + idle; |
765 | } else { | 764 | } else { |
766 | /* Underlimit */ | 765 | /* Underlimit */ |
767 | 766 | ||
@@ -771,7 +770,8 @@ cbq_update(struct cbq_sched_data *q) | |||
771 | else | 770 | else |
772 | cl->avgidle = avgidle; | 771 | cl->avgidle = avgidle; |
773 | } | 772 | } |
774 | cl->last = q->now; | 773 | if ((s64)(now - cl->last) > 0) |
774 | cl->last = now; | ||
775 | } | 775 | } |
776 | 776 | ||
777 | cbq_update_toplevel(q, this, q->tx_borrowed); | 777 | cbq_update_toplevel(q, this, q->tx_borrowed); |
@@ -943,31 +943,13 @@ cbq_dequeue(struct Qdisc *sch) | |||
943 | struct sk_buff *skb; | 943 | struct sk_buff *skb; |
944 | struct cbq_sched_data *q = qdisc_priv(sch); | 944 | struct cbq_sched_data *q = qdisc_priv(sch); |
945 | psched_time_t now; | 945 | psched_time_t now; |
946 | psched_tdiff_t incr; | ||
947 | 946 | ||
948 | now = psched_get_time(); | 947 | now = psched_get_time(); |
949 | incr = now - q->now_rt; | 948 | |
950 | 949 | if (q->tx_class) | |
951 | if (q->tx_class) { | ||
952 | psched_tdiff_t incr2; | ||
953 | /* Time integrator. We calculate EOS time | ||
954 | * by adding expected packet transmission time. | ||
955 | * If real time is greater, we warp artificial clock, | ||
956 | * so that: | ||
957 | * | ||
958 | * cbq_time = max(real_time, work); | ||
959 | */ | ||
960 | incr2 = L2T(&q->link, q->tx_len); | ||
961 | q->now += incr2; | ||
962 | cbq_update(q); | 950 | cbq_update(q); |
963 | if ((incr -= incr2) < 0) | 951 | |
964 | incr = 0; | 952 | q->now = now; |
965 | q->now += incr; | ||
966 | } else { | ||
967 | if (now > q->now) | ||
968 | q->now = now; | ||
969 | } | ||
970 | q->now_rt = now; | ||
971 | 953 | ||
972 | for (;;) { | 954 | for (;;) { |
973 | q->wd_expires = 0; | 955 | q->wd_expires = 0; |
@@ -1223,7 +1205,6 @@ cbq_reset(struct Qdisc *sch) | |||
1223 | hrtimer_cancel(&q->delay_timer); | 1205 | hrtimer_cancel(&q->delay_timer); |
1224 | q->toplevel = TC_CBQ_MAXLEVEL; | 1206 | q->toplevel = TC_CBQ_MAXLEVEL; |
1225 | q->now = psched_get_time(); | 1207 | q->now = psched_get_time(); |
1226 | q->now_rt = q->now; | ||
1227 | 1208 | ||
1228 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) | 1209 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) |
1229 | q->active[prio] = NULL; | 1210 | q->active[prio] = NULL; |
@@ -1407,7 +1388,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1407 | q->delay_timer.function = cbq_undelay; | 1388 | q->delay_timer.function = cbq_undelay; |
1408 | q->toplevel = TC_CBQ_MAXLEVEL; | 1389 | q->toplevel = TC_CBQ_MAXLEVEL; |
1409 | q->now = psched_get_time(); | 1390 | q->now = psched_get_time(); |
1410 | q->now_rt = q->now; | ||
1411 | 1391 | ||
1412 | cbq_link_class(&q->link); | 1392 | cbq_link_class(&q->link); |
1413 | 1393 | ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 06a9ee6b2d3a..a88b8524846e 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -813,6 +813,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
813 | else { | 813 | else { |
814 | dst_release(transport->dst); | 814 | dst_release(transport->dst); |
815 | transport->dst = NULL; | 815 | transport->dst = NULL; |
816 | ulp_notify = false; | ||
816 | } | 817 | } |
817 | 818 | ||
818 | spc_state = SCTP_ADDR_UNREACHABLE; | 819 | spc_state = SCTP_ADDR_UNREACHABLE; |
@@ -1244,7 +1245,7 @@ static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, | |||
1244 | { | 1245 | { |
1245 | u8 score_curr, score_best; | 1246 | u8 score_curr, score_best; |
1246 | 1247 | ||
1247 | if (best == NULL) | 1248 | if (best == NULL || curr == best) |
1248 | return curr; | 1249 | return curr; |
1249 | 1250 | ||
1250 | score_curr = sctp_trans_score(curr); | 1251 | score_curr = sctp_trans_score(curr); |
@@ -1355,14 +1356,11 @@ static void sctp_select_active_and_retran_path(struct sctp_association *asoc) | |||
1355 | trans_sec = trans_pri; | 1356 | trans_sec = trans_pri; |
1356 | 1357 | ||
1357 | /* If we failed to find a usable transport, just camp on the | 1358 | /* If we failed to find a usable transport, just camp on the |
1358 | * primary or retran, even if they are inactive, if possible | 1359 | * active or pick a PF iff it's the better choice. |
1359 | * pick a PF iff it's the better choice. | ||
1360 | */ | 1360 | */ |
1361 | if (trans_pri == NULL) { | 1361 | if (trans_pri == NULL) { |
1362 | trans_pri = sctp_trans_elect_best(asoc->peer.primary_path, | 1362 | trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf); |
1363 | asoc->peer.retran_path); | 1363 | trans_sec = trans_pri; |
1364 | trans_pri = sctp_trans_elect_best(trans_pri, trans_pf); | ||
1365 | trans_sec = asoc->peer.primary_path; | ||
1366 | } | 1364 | } |
1367 | 1365 | ||
1368 | /* Set the active and retran transports. */ | 1366 | /* Set the active and retran transports. */ |
diff --git a/net/tipc/port.h b/net/tipc/port.h index 3f93454592b6..3087da39ee47 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h | |||
@@ -179,9 +179,12 @@ static inline int tipc_port_importance(struct tipc_port *port) | |||
179 | return msg_importance(&port->phdr); | 179 | return msg_importance(&port->phdr); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline void tipc_port_set_importance(struct tipc_port *port, int imp) | 182 | static inline int tipc_port_set_importance(struct tipc_port *port, int imp) |
183 | { | 183 | { |
184 | if (imp > TIPC_CRITICAL_IMPORTANCE) | ||
185 | return -EINVAL; | ||
184 | msg_set_importance(&port->phdr, (u32)imp); | 186 | msg_set_importance(&port->phdr, (u32)imp); |
187 | return 0; | ||
185 | } | 188 | } |
186 | 189 | ||
187 | #endif | 190 | #endif |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 7d423ee10897..ff8c8118d56e 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1973,7 +1973,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, | |||
1973 | 1973 | ||
1974 | switch (opt) { | 1974 | switch (opt) { |
1975 | case TIPC_IMPORTANCE: | 1975 | case TIPC_IMPORTANCE: |
1976 | tipc_port_set_importance(port, value); | 1976 | res = tipc_port_set_importance(port, value); |
1977 | break; | 1977 | break; |
1978 | case TIPC_SRC_DROPPABLE: | 1978 | case TIPC_SRC_DROPPABLE: |
1979 | if (sock->type != SOCK_STREAM) | 1979 | if (sock->type != SOCK_STREAM) |