aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/smsc.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c78
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--net/batman-adv/translation-table.c5
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/core/skbuff.c37
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/igmp.c33
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c10
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/mcast_snoop.c33
-rw-r--r--net/ipv6/route.c79
-rw-r--r--net/mac80211/rc80211_minstrel.c11
18 files changed, 212 insertions, 146 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c28e3bfdccd7..6ca693b03f33 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5174,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5174 struct device *dev = &adapter->pdev->dev; 5174 struct device *dev = &adapter->pdev->dev;
5175 int status; 5175 int status;
5176 5176
5177 if (lancer_chip(adapter) || BEx_chip(adapter)) 5177 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5178 return; 5178 return;
5179 5179
5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { 5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5221,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5221{ 5221{
5222 struct be_adapter *adapter = netdev_priv(netdev); 5222 struct be_adapter *adapter = netdev_priv(netdev);
5223 5223
5224 if (lancer_chip(adapter) || BEx_chip(adapter)) 5224 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5225 return; 5225 return;
5226 5226
5227 if (adapter->vxlan_port != port) 5227 if (adapter->vxlan_port != port)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2b7610f341b0..10b3bbbbac8e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2102,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
2102 /* Start Rx/Tx DMA and enable the interrupts */ 2102 /* Start Rx/Tx DMA and enable the interrupts */
2103 gfar_start(priv); 2103 gfar_start(priv);
2104 2104
2105 /* force link state update after mac reset */
2106 priv->oldlink = 0;
2107 priv->oldspeed = 0;
2108 priv->oldduplex = -1;
2109
2105 phy_start(priv->phydev); 2110 phy_start(priv->phydev);
2106 2111
2107 enable_napi(priv); 2112 enable_napi(priv);
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00f15..09d2e16fd6b0 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
952 952
953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, 953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); 954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
955 err = dma_mapping_error(adapter->dev, 955 if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
956 sg_dma_address(&tx_ctl->sg)); 956 err = -ENOMEM;
957 if (err) {
958 sg_dma_address(&tx_ctl->sg) = 0; 957 sg_dma_address(&tx_ctl->sg) = 0;
959 goto err; 958 goto err;
960 } 959 }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b506acb..1e1fbb049ec6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
811 bool needs_aneg = false, do_suspend = false; 811 bool needs_aneg = false, do_suspend = false;
812 enum phy_state old_state; 812 enum phy_state old_state;
813 int err = 0; 813 int err = 0;
814 int old_link;
814 815
815 mutex_lock(&phydev->lock); 816 mutex_lock(&phydev->lock);
816 817
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
896 phydev->adjust_link(phydev->attached_dev); 897 phydev->adjust_link(phydev->attached_dev);
897 break; 898 break;
898 case PHY_RUNNING: 899 case PHY_RUNNING:
899 /* Only register a CHANGE if we are 900 /* Only register a CHANGE if we are polling or ignoring
900 * polling or ignoring interrupts 901 * interrupts and link changed since latest checking.
901 */ 902 */
902 if (!phy_interrupt_is_valid(phydev)) 903 if (!phy_interrupt_is_valid(phydev)) {
903 phydev->state = PHY_CHANGELINK; 904 old_link = phydev->link;
905 err = phy_read_status(phydev);
906 if (err)
907 break;
908
909 if (old_link != phydev->link)
910 phydev->state = PHY_CHANGELINK;
911 }
904 break; 912 break;
905 case PHY_CHANGELINK: 913 case PHY_CHANGELINK:
906 err = phy_read_status(phydev); 914 err = phy_read_status(phydev);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c0f6479e19d4..70b08958763a 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
91} 91}
92 92
93/* 93/*
94 * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each 94 * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
95 * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner 95 * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
96 * does send the pulses within this interval, the PHY will remained powered 96 * unstable detection of plugging in Ethernet cable.
97 * down. 97 * This workaround disables Energy Detect Power-Down mode and waiting for
98 * 98 * response on link pulses to detect presence of plugged Ethernet cable.
99 * This workaround will manually toggle the PHY on/off upon calls to read_status 99 * The Energy Detect Power-Down mode is enabled again in the end of procedure to
100 * in order to generate link test pulses if the link is down. If a link partner 100 * save approximately 220 mW of power if cable is unplugged.
101 * is present, it will respond to the pulses, which will cause the ENERGYON bit
102 * to be set and will cause the EDPD mode to be exited.
103 */ 101 */
104static int lan87xx_read_status(struct phy_device *phydev) 102static int lan87xx_read_status(struct phy_device *phydev)
105{ 103{
106 int err = genphy_read_status(phydev); 104 int err = genphy_read_status(phydev);
105 int i;
107 106
108 if (!phydev->link) { 107 if (!phydev->link) {
109 /* Disable EDPD to wake up PHY */ 108 /* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
116 if (rc < 0) 115 if (rc < 0)
117 return rc; 116 return rc;
118 117
119 /* Sleep 64 ms to allow ~5 link test pulses to be sent */ 118 /* Wait max 640 ms to detect energy */
120 msleep(64); 119 for (i = 0; i < 64; i++) {
120 /* Sleep to allow link test pulses to be sent */
121 msleep(10);
122 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
123 if (rc < 0)
124 return rc;
125 if (rc & MII_LAN83C185_ENERGYON)
126 break;
127 }
121 128
122 /* Re-enable EDPD */ 129 /* Re-enable EDPD */
123 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 130 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
191 198
192 /* basic functions */ 199 /* basic functions */
193 .config_aneg = genphy_config_aneg, 200 .config_aneg = genphy_config_aneg,
194 .read_status = genphy_read_status, 201 .read_status = lan87xx_read_status,
195 .config_init = smsc_phy_config_init, 202 .config_init = smsc_phy_config_init,
196 .soft_reset = smsc_phy_reset, 203 .soft_reset = smsc_phy_reset,
197 204
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9d15566521a7..fa8f5046afe9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
269static void ppp_ccp_closed(struct ppp *ppp); 269static void ppp_ccp_closed(struct ppp *ppp);
270static struct compressor *find_compressor(int type); 270static struct compressor *find_compressor(int type);
271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
272static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); 272static struct ppp *ppp_create_interface(struct net *net, int unit,
273 struct file *file, int *retp);
273static void init_ppp_file(struct ppp_file *pf, int kind); 274static void init_ppp_file(struct ppp_file *pf, int kind);
274static void ppp_shutdown_interface(struct ppp *ppp);
275static void ppp_destroy_interface(struct ppp *ppp); 275static void ppp_destroy_interface(struct ppp *ppp);
276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
392 file->private_data = NULL; 392 file->private_data = NULL;
393 if (pf->kind == INTERFACE) { 393 if (pf->kind == INTERFACE) {
394 ppp = PF_TO_PPP(pf); 394 ppp = PF_TO_PPP(pf);
395 rtnl_lock();
395 if (file == ppp->owner) 396 if (file == ppp->owner)
396 ppp_shutdown_interface(ppp); 397 unregister_netdevice(ppp->dev);
398 rtnl_unlock();
397 } 399 }
398 if (atomic_dec_and_test(&pf->refcnt)) { 400 if (atomic_dec_and_test(&pf->refcnt)) {
399 switch (pf->kind) { 401 switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
593 mutex_lock(&ppp_mutex); 595 mutex_lock(&ppp_mutex);
594 if (pf->kind == INTERFACE) { 596 if (pf->kind == INTERFACE) {
595 ppp = PF_TO_PPP(pf); 597 ppp = PF_TO_PPP(pf);
598 rtnl_lock();
596 if (file == ppp->owner) 599 if (file == ppp->owner)
597 ppp_shutdown_interface(ppp); 600 unregister_netdevice(ppp->dev);
601 rtnl_unlock();
598 } 602 }
599 if (atomic_long_read(&file->f_count) < 2) { 603 if (atomic_long_read(&file->f_count) < 2) {
600 ppp_release(NULL, file); 604 ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
838 /* Create a new ppp unit */ 842 /* Create a new ppp unit */
839 if (get_user(unit, p)) 843 if (get_user(unit, p))
840 break; 844 break;
841 ppp = ppp_create_interface(net, unit, &err); 845 ppp = ppp_create_interface(net, unit, file, &err);
842 if (!ppp) 846 if (!ppp)
843 break; 847 break;
844 file->private_data = &ppp->file; 848 file->private_data = &ppp->file;
845 ppp->owner = file;
846 err = -EFAULT; 849 err = -EFAULT;
847 if (put_user(ppp->file.index, p)) 850 if (put_user(ppp->file.index, p))
848 break; 851 break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
916static __net_exit void ppp_exit_net(struct net *net) 919static __net_exit void ppp_exit_net(struct net *net)
917{ 920{
918 struct ppp_net *pn = net_generic(net, ppp_net_id); 921 struct ppp_net *pn = net_generic(net, ppp_net_id);
922 struct ppp *ppp;
923 LIST_HEAD(list);
924 int id;
925
926 rtnl_lock();
927 idr_for_each_entry(&pn->units_idr, ppp, id)
928 unregister_netdevice_queue(ppp->dev, &list);
929
930 unregister_netdevice_many(&list);
931 rtnl_unlock();
919 932
920 idr_destroy(&pn->units_idr); 933 idr_destroy(&pn->units_idr);
921} 934}
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
1088 return 0; 1101 return 0;
1089} 1102}
1090 1103
1104static void ppp_dev_uninit(struct net_device *dev)
1105{
1106 struct ppp *ppp = netdev_priv(dev);
1107 struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1108
1109 ppp_lock(ppp);
1110 ppp->closing = 1;
1111 ppp_unlock(ppp);
1112
1113 mutex_lock(&pn->all_ppp_mutex);
1114 unit_put(&pn->units_idr, ppp->file.index);
1115 mutex_unlock(&pn->all_ppp_mutex);
1116
1117 ppp->owner = NULL;
1118
1119 ppp->file.dead = 1;
1120 wake_up_interruptible(&ppp->file.rwait);
1121}
1122
1091static const struct net_device_ops ppp_netdev_ops = { 1123static const struct net_device_ops ppp_netdev_ops = {
1092 .ndo_init = ppp_dev_init, 1124 .ndo_init = ppp_dev_init,
1125 .ndo_uninit = ppp_dev_uninit,
1093 .ndo_start_xmit = ppp_start_xmit, 1126 .ndo_start_xmit = ppp_start_xmit,
1094 .ndo_do_ioctl = ppp_net_ioctl, 1127 .ndo_do_ioctl = ppp_net_ioctl,
1095 .ndo_get_stats64 = ppp_get_stats64, 1128 .ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2667 * or if there is already a unit with the requested number. 2700 * or if there is already a unit with the requested number.
2668 * unit == -1 means allocate a new number. 2701 * unit == -1 means allocate a new number.
2669 */ 2702 */
2670static struct ppp * 2703static struct ppp *ppp_create_interface(struct net *net, int unit,
2671ppp_create_interface(struct net *net, int unit, int *retp) 2704 struct file *file, int *retp)
2672{ 2705{
2673 struct ppp *ppp; 2706 struct ppp *ppp;
2674 struct ppp_net *pn; 2707 struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2688 ppp->mru = PPP_MRU; 2721 ppp->mru = PPP_MRU;
2689 init_ppp_file(&ppp->file, INTERFACE); 2722 init_ppp_file(&ppp->file, INTERFACE);
2690 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2723 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
2724 ppp->owner = file;
2691 for (i = 0; i < NUM_NP; ++i) 2725 for (i = 0; i < NUM_NP; ++i)
2692 ppp->npmode[i] = NPMODE_PASS; 2726 ppp->npmode[i] = NPMODE_PASS;
2693 INIT_LIST_HEAD(&ppp->channels); 2727 INIT_LIST_HEAD(&ppp->channels);
@@ -2776,34 +2810,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
2776} 2810}
2777 2811
2778/* 2812/*
2779 * Take down a ppp interface unit - called when the owning file
2780 * (the one that created the unit) is closed or detached.
2781 */
2782static void ppp_shutdown_interface(struct ppp *ppp)
2783{
2784 struct ppp_net *pn;
2785
2786 pn = ppp_pernet(ppp->ppp_net);
2787 mutex_lock(&pn->all_ppp_mutex);
2788
2789 /* This will call dev_close() for us. */
2790 ppp_lock(ppp);
2791 if (!ppp->closing) {
2792 ppp->closing = 1;
2793 ppp_unlock(ppp);
2794 unregister_netdev(ppp->dev);
2795 unit_put(&pn->units_idr, ppp->file.index);
2796 } else
2797 ppp_unlock(ppp);
2798
2799 ppp->file.dead = 1;
2800 ppp->owner = NULL;
2801 wake_up_interruptible(&ppp->file.rwait);
2802
2803 mutex_unlock(&pn->all_ppp_mutex);
2804}
2805
2806/*
2807 * Free the memory used by a ppp unit. This is only called once 2813 * Free the memory used by a ppp unit. This is only called once
2808 * there are no channels connected to the unit and no file structs 2814 * there are no channels connected to the unit and no file structs
2809 * that reference the unit. 2815 * that reference the unit.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9d43460ce3c7..64a60afbe50c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
785 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 785 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
786 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 786 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
787 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 787 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
788 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
788 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ 789 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
789 790
790 /* 4. Gobi 1000 devices */ 791 /* 4. Gobi 1000 devices */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5e953297d3b2..5809b39c1922 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -595,8 +595,11 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
595 /* increase the refcounter of the related vlan */ 595 /* increase the refcounter of the related vlan */
596 vlan = batadv_softif_vlan_get(bat_priv, vid); 596 vlan = batadv_softif_vlan_get(bat_priv, vid);
597 if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", 597 if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
598 addr, BATADV_PRINT_VID(vid))) 598 addr, BATADV_PRINT_VID(vid))) {
599 kfree(tt_local);
600 tt_local = NULL;
599 goto out; 601 goto out;
602 }
600 603
601 batadv_dbg(BATADV_DBG_TT, bat_priv, 604 batadv_dbg(BATADV_DBG_TT, bat_priv,
602 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 605 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 0b39dcc65b94..1285eaf5dc22 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1591,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1591 break; 1591 break;
1592 } 1592 }
1593 1593
1594 if (skb_trimmed) 1594 if (skb_trimmed && skb_trimmed != skb)
1595 kfree_skb(skb_trimmed); 1595 kfree_skb(skb_trimmed);
1596 1596
1597 return err; 1597 return err;
@@ -1636,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1636 break; 1636 break;
1637 } 1637 }
1638 1638
1639 if (skb_trimmed) 1639 if (skb_trimmed && skb_trimmed != skb)
1640 kfree_skb(skb_trimmed); 1640 kfree_skb(skb_trimmed);
1641 1641
1642 return err; 1642 return err;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a19ca0f99e..bf9a5d93c2d1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
4022 * Otherwise returns the provided skb. Returns NULL in error cases 4022 * Otherwise returns the provided skb. Returns NULL in error cases
4023 * (e.g. transport_len exceeds skb length or out-of-memory). 4023 * (e.g. transport_len exceeds skb length or out-of-memory).
4024 * 4024 *
4025 * Caller needs to set the skb transport header and release the returned skb. 4025 * Caller needs to set the skb transport header and free any returned skb if it
4026 * Provided skb is consumed. 4026 * differs from the provided skb.
4027 */ 4027 */
4028static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4028static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4029 unsigned int transport_len) 4029 unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4032 unsigned int len = skb_transport_offset(skb) + transport_len; 4032 unsigned int len = skb_transport_offset(skb) + transport_len;
4033 int ret; 4033 int ret;
4034 4034
4035 if (skb->len < len) { 4035 if (skb->len < len)
4036 kfree_skb(skb);
4037 return NULL; 4036 return NULL;
4038 } else if (skb->len == len) { 4037 else if (skb->len == len)
4039 return skb; 4038 return skb;
4040 }
4041 4039
4042 skb_chk = skb_clone(skb, GFP_ATOMIC); 4040 skb_chk = skb_clone(skb, GFP_ATOMIC);
4043 kfree_skb(skb);
4044
4045 if (!skb_chk) 4041 if (!skb_chk)
4046 return NULL; 4042 return NULL;
4047 4043
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4066 * If the skb has data beyond the given transport length, then a 4062 * If the skb has data beyond the given transport length, then a
4067 * trimmed & cloned skb is checked and returned. 4063 * trimmed & cloned skb is checked and returned.
4068 * 4064 *
4069 * Caller needs to set the skb transport header and release the returned skb. 4065 * Caller needs to set the skb transport header and free any returned skb if it
4070 * Provided skb is consumed. 4066 * differs from the provided skb.
4071 */ 4067 */
4072struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4068struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4073 unsigned int transport_len, 4069 unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4079 4075
4080 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4076 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4081 if (!skb_chk) 4077 if (!skb_chk)
4082 return NULL; 4078 goto err;
4083 4079
4084 if (!pskb_may_pull(skb_chk, offset)) { 4080 if (!pskb_may_pull(skb_chk, offset))
4085 kfree_skb(skb_chk); 4081 goto err;
4086 return NULL;
4087 }
4088 4082
4089 __skb_pull(skb_chk, offset); 4083 __skb_pull(skb_chk, offset);
4090 ret = skb_chkf(skb_chk); 4084 ret = skb_chkf(skb_chk);
4091 __skb_push(skb_chk, offset); 4085 __skb_push(skb_chk, offset);
4092 4086
4093 if (ret) { 4087 if (ret)
4094 kfree_skb(skb_chk); 4088 goto err;
4095 return NULL;
4096 }
4097 4089
4098 return skb_chk; 4090 return skb_chk;
4091
4092err:
4093 if (skb_chk && skb_chk != skb)
4094 kfree_skb(skb_chk);
4095
4096 return NULL;
4097
4099} 4098}
4100EXPORT_SYMBOL(skb_checksum_trimmed); 4099EXPORT_SYMBOL(skb_checksum_trimmed);
4101 4100
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 37c4bb89a708..b0c6258ffb79 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2465,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2465 key = l->key + 1; 2465 key = l->key + 1;
2466 iter->pos++; 2466 iter->pos++;
2467 2467
2468 if (pos-- <= 0) 2468 if (--pos <= 0)
2469 break; 2469 break;
2470 2470
2471 l = NULL; 2471 l = NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 651cdf648ec4..9fdfd9deac11 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1435 struct sk_buff *skb_chk; 1435 struct sk_buff *skb_chk;
1436 unsigned int transport_len; 1436 unsigned int transport_len;
1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); 1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
1438 int ret; 1438 int ret = -EINVAL;
1439 1439
1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
1441 1441
1442 skb_get(skb);
1443 skb_chk = skb_checksum_trimmed(skb, transport_len, 1442 skb_chk = skb_checksum_trimmed(skb, transport_len,
1444 ip_mc_validate_checksum); 1443 ip_mc_validate_checksum);
1445 if (!skb_chk) 1444 if (!skb_chk)
1446 return -EINVAL; 1445 goto err;
1447 1446
1448 if (!pskb_may_pull(skb_chk, len)) { 1447 if (!pskb_may_pull(skb_chk, len))
1449 kfree_skb(skb_chk); 1448 goto err;
1450 return -EINVAL;
1451 }
1452 1449
1453 ret = ip_mc_check_igmp_msg(skb_chk); 1450 ret = ip_mc_check_igmp_msg(skb_chk);
1454 if (ret) { 1451 if (ret)
1455 kfree_skb(skb_chk); 1452 goto err;
1456 return ret;
1457 }
1458 1453
1459 if (skb_trimmed) 1454 if (skb_trimmed)
1460 *skb_trimmed = skb_chk; 1455 *skb_trimmed = skb_chk;
1461 else 1456 /* free now unneeded clone */
1457 else if (skb_chk != skb)
1462 kfree_skb(skb_chk); 1458 kfree_skb(skb_chk);
1463 1459
1464 return 0; 1460 ret = 0;
1461
1462err:
1463 if (ret && skb_chk && skb_chk != skb)
1464 kfree_skb(skb_chk);
1465
1466 return ret;
1465} 1467}
1466 1468
1467/** 1469/**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1470 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional) 1472 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
1471 * 1473 *
1472 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets 1474 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
1473 * skb network and transport headers accordingly and returns zero. 1475 * skb transport header accordingly and returns zero.
1474 * 1476 *
1475 * -EINVAL: A broken packet was detected, i.e. it violates some internet 1477 * -EINVAL: A broken packet was detected, i.e. it violates some internet
1476 * standard 1478 * standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1485 * to leave the original skb and its full frame unchanged (which might be 1487 * to leave the original skb and its full frame unchanged (which might be
1486 * desirable for layer 2 frame jugglers). 1488 * desirable for layer 2 frame jugglers).
1487 * 1489 *
1488 * The caller needs to release a reference count from any returned skb_trimmed. 1490 * Caller needs to set the skb network header and free any returned skb if it
1491 * differs from the provided skb.
1489 */ 1492 */
1490int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) 1493int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1491{ 1494{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 05e3145f7dc3..134957159c27 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
593 } 593 }
594 594
595 spin_unlock(&queue->syn_wait_lock); 595 spin_unlock(&queue->syn_wait_lock);
596 if (del_timer_sync(&req->rsk_timer)) 596 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
597 reqsk_put(req); 597 reqsk_put(req);
598 return found; 598 return found;
599} 599}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 433231ccfb17..0330ab2e2b63 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
41static int tcp_syn_retries_max = MAX_TCP_SYNCNT; 41static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
42static int ip_ping_group_range_min[] = { 0, 0 }; 42static int ip_ping_group_range_min[] = { 0, 0 };
43static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 43static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
44static int min_sndbuf = SOCK_MIN_SNDBUF;
45static int min_rcvbuf = SOCK_MIN_RCVBUF;
46 44
47/* Update system visible IP port range */ 45/* Update system visible IP port range */
48static void set_local_port_range(struct net *net, int range[2]) 46static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
530 .maxlen = sizeof(sysctl_tcp_wmem), 528 .maxlen = sizeof(sysctl_tcp_wmem),
531 .mode = 0644, 529 .mode = 0644,
532 .proc_handler = proc_dointvec_minmax, 530 .proc_handler = proc_dointvec_minmax,
533 .extra1 = &min_sndbuf, 531 .extra1 = &one,
534 }, 532 },
535 { 533 {
536 .procname = "tcp_notsent_lowat", 534 .procname = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
545 .maxlen = sizeof(sysctl_tcp_rmem), 543 .maxlen = sizeof(sysctl_tcp_rmem),
546 .mode = 0644, 544 .mode = 0644,
547 .proc_handler = proc_dointvec_minmax, 545 .proc_handler = proc_dointvec_minmax,
548 .extra1 = &min_rcvbuf, 546 .extra1 = &one,
549 }, 547 },
550 { 548 {
551 .procname = "tcp_app_win", 549 .procname = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
758 .maxlen = sizeof(sysctl_udp_rmem_min), 756 .maxlen = sizeof(sysctl_udp_rmem_min),
759 .mode = 0644, 757 .mode = 0644,
760 .proc_handler = proc_dointvec_minmax, 758 .proc_handler = proc_dointvec_minmax,
761 .extra1 = &min_rcvbuf, 759 .extra1 = &one
762 }, 760 },
763 { 761 {
764 .procname = "udp_wmem_min", 762 .procname = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
766 .maxlen = sizeof(sysctl_udp_wmem_min), 764 .maxlen = sizeof(sysctl_udp_wmem_min),
767 .mode = 0644, 765 .mode = 0644,
768 .proc_handler = proc_dointvec_minmax, 766 .proc_handler = proc_dointvec_minmax,
769 .extra1 = &min_sndbuf, 767 .extra1 = &one
770 }, 768 },
771 { } 769 { }
772}; 770};
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 55d19861ab20..548c6237b1e7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
172 *ppcpu_rt = NULL; 172 *ppcpu_rt = NULL;
173 } 173 }
174 } 174 }
175
176 non_pcpu_rt->rt6i_pcpu = NULL;
175} 177}
176 178
177static void rt6_release(struct rt6_info *rt) 179static void rt6_release(struct rt6_info *rt)
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index df8afe5ab31e..9405b04eecc6 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
143 struct sk_buff *skb_chk = NULL; 143 struct sk_buff *skb_chk = NULL;
144 unsigned int transport_len; 144 unsigned int transport_len;
145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg); 145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
146 int ret; 146 int ret = -EINVAL;
147 147
148 transport_len = ntohs(ipv6_hdr(skb)->payload_len); 148 transport_len = ntohs(ipv6_hdr(skb)->payload_len);
149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr); 149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
150 150
151 skb_get(skb);
152 skb_chk = skb_checksum_trimmed(skb, transport_len, 151 skb_chk = skb_checksum_trimmed(skb, transport_len,
153 ipv6_mc_validate_checksum); 152 ipv6_mc_validate_checksum);
154 if (!skb_chk) 153 if (!skb_chk)
155 return -EINVAL; 154 goto err;
156 155
157 if (!pskb_may_pull(skb_chk, len)) { 156 if (!pskb_may_pull(skb_chk, len))
158 kfree_skb(skb_chk); 157 goto err;
159 return -EINVAL;
160 }
161 158
162 ret = ipv6_mc_check_mld_msg(skb_chk); 159 ret = ipv6_mc_check_mld_msg(skb_chk);
163 if (ret) { 160 if (ret)
164 kfree_skb(skb_chk); 161 goto err;
165 return ret;
166 }
167 162
168 if (skb_trimmed) 163 if (skb_trimmed)
169 *skb_trimmed = skb_chk; 164 *skb_trimmed = skb_chk;
170 else 165 /* free now unneeded clone */
166 else if (skb_chk != skb)
171 kfree_skb(skb_chk); 167 kfree_skb(skb_chk);
172 168
173 return 0; 169 ret = 0;
170
171err:
172 if (ret && skb_chk && skb_chk != skb)
173 kfree_skb(skb_chk);
174
175 return ret;
174} 176}
175 177
176/** 178/**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
179 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional) 181 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
180 * 182 *
181 * Checks whether an IPv6 packet is a valid MLD packet. If so sets 183 * Checks whether an IPv6 packet is a valid MLD packet. If so sets
182 * skb network and transport headers accordingly and returns zero. 184 * skb transport header accordingly and returns zero.
183 * 185 *
184 * -EINVAL: A broken packet was detected, i.e. it violates some internet 186 * -EINVAL: A broken packet was detected, i.e. it violates some internet
185 * standard 187 * standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
194 * to leave the original skb and its full frame unchanged (which might be 196 * to leave the original skb and its full frame unchanged (which might be
195 * desirable for layer 2 frame jugglers). 197 * desirable for layer 2 frame jugglers).
196 * 198 *
197 * The caller needs to release a reference count from any returned skb_trimmed. 199 * Caller needs to set the skb network header and free any returned skb if it
200 * differs from the provided skb.
198 */ 201 */
199int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed) 202int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
200{ 203{
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9de4d2bcd916..d15586490cec 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
318/* allocate dst with ip6_dst_ops */ 318/* allocate dst with ip6_dst_ops */
319static struct rt6_info *__ip6_dst_alloc(struct net *net, 319static struct rt6_info *__ip6_dst_alloc(struct net *net,
320 struct net_device *dev, 320 struct net_device *dev,
321 int flags, 321 int flags)
322 struct fib6_table *table)
323{ 322{
324 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 323 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
325 0, DST_OBSOLETE_FORCE_CHK, flags); 324 0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
336 335
337static struct rt6_info *ip6_dst_alloc(struct net *net, 336static struct rt6_info *ip6_dst_alloc(struct net *net,
338 struct net_device *dev, 337 struct net_device *dev,
339 int flags, 338 int flags)
340 struct fib6_table *table)
341{ 339{
342 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table); 340 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
343 341
344 if (rt) { 342 if (rt) {
345 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); 343 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -950,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
950 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) 948 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
951 ort = (struct rt6_info *)ort->dst.from; 949 ort = (struct rt6_info *)ort->dst.from;
952 950
953 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 951 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
954 0, ort->rt6i_table);
955 952
956 if (!rt) 953 if (!rt)
957 return NULL; 954 return NULL;
@@ -983,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
983 struct rt6_info *pcpu_rt; 980 struct rt6_info *pcpu_rt;
984 981
985 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), 982 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
986 rt->dst.dev, rt->dst.flags, 983 rt->dst.dev, rt->dst.flags);
987 rt->rt6i_table);
988 984
989 if (!pcpu_rt) 985 if (!pcpu_rt)
990 return NULL; 986 return NULL;
@@ -997,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
997/* It should be called with read_lock_bh(&tb6_lock) acquired */ 993/* It should be called with read_lock_bh(&tb6_lock) acquired */
998static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) 994static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
999{ 995{
1000 struct rt6_info *pcpu_rt, *prev, **p; 996 struct rt6_info *pcpu_rt, **p;
1001 997
1002 p = this_cpu_ptr(rt->rt6i_pcpu); 998 p = this_cpu_ptr(rt->rt6i_pcpu);
1003 pcpu_rt = *p; 999 pcpu_rt = *p;
1004 1000
1005 if (pcpu_rt) 1001 if (pcpu_rt) {
1006 goto done; 1002 dst_hold(&pcpu_rt->dst);
1003 rt6_dst_from_metrics_check(pcpu_rt);
1004 }
1005 return pcpu_rt;
1006}
1007
1008static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1009{
1010 struct fib6_table *table = rt->rt6i_table;
1011 struct rt6_info *pcpu_rt, *prev, **p;
1007 1012
1008 pcpu_rt = ip6_rt_pcpu_alloc(rt); 1013 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1009 if (!pcpu_rt) { 1014 if (!pcpu_rt) {
1010 struct net *net = dev_net(rt->dst.dev); 1015 struct net *net = dev_net(rt->dst.dev);
1011 1016
1012 pcpu_rt = net->ipv6.ip6_null_entry; 1017 dst_hold(&net->ipv6.ip6_null_entry->dst);
1013 goto done; 1018 return net->ipv6.ip6_null_entry;
1014 } 1019 }
1015 1020
1016 prev = cmpxchg(p, NULL, pcpu_rt); 1021 read_lock_bh(&table->tb6_lock);
1017 if (prev) { 1022 if (rt->rt6i_pcpu) {
1018 /* If someone did it before us, return prev instead */ 1023 p = this_cpu_ptr(rt->rt6i_pcpu);
1024 prev = cmpxchg(p, NULL, pcpu_rt);
1025 if (prev) {
1026 /* If someone did it before us, return prev instead */
1027 dst_destroy(&pcpu_rt->dst);
1028 pcpu_rt = prev;
1029 }
1030 } else {
1031 /* rt has been removed from the fib6 tree
1032 * before we have a chance to acquire the read_lock.
1033 * In this case, don't brother to create a pcpu rt
1034 * since rt is going away anyway. The next
1035 * dst_check() will trigger a re-lookup.
1036 */
1019 dst_destroy(&pcpu_rt->dst); 1037 dst_destroy(&pcpu_rt->dst);
1020 pcpu_rt = prev; 1038 pcpu_rt = rt;
1021 } 1039 }
1022
1023done:
1024 dst_hold(&pcpu_rt->dst); 1040 dst_hold(&pcpu_rt->dst);
1025 rt6_dst_from_metrics_check(pcpu_rt); 1041 rt6_dst_from_metrics_check(pcpu_rt);
1042 read_unlock_bh(&table->tb6_lock);
1026 return pcpu_rt; 1043 return pcpu_rt;
1027} 1044}
1028 1045
@@ -1097,9 +1114,22 @@ redo_rt6_select:
1097 rt->dst.lastuse = jiffies; 1114 rt->dst.lastuse = jiffies;
1098 rt->dst.__use++; 1115 rt->dst.__use++;
1099 pcpu_rt = rt6_get_pcpu_route(rt); 1116 pcpu_rt = rt6_get_pcpu_route(rt);
1100 read_unlock_bh(&table->tb6_lock); 1117
1118 if (pcpu_rt) {
1119 read_unlock_bh(&table->tb6_lock);
1120 } else {
1121 /* We have to do the read_unlock first
1122 * because rt6_make_pcpu_route() may trigger
1123 * ip6_dst_gc() which will take the write_lock.
1124 */
1125 dst_hold(&rt->dst);
1126 read_unlock_bh(&table->tb6_lock);
1127 pcpu_rt = rt6_make_pcpu_route(rt);
1128 dst_release(&rt->dst);
1129 }
1101 1130
1102 return pcpu_rt; 1131 return pcpu_rt;
1132
1103 } 1133 }
1104} 1134}
1105 1135
@@ -1555,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1555 if (unlikely(!idev)) 1585 if (unlikely(!idev))
1556 return ERR_PTR(-ENODEV); 1586 return ERR_PTR(-ENODEV);
1557 1587
1558 rt = ip6_dst_alloc(net, dev, 0, NULL); 1588 rt = ip6_dst_alloc(net, dev, 0);
1559 if (unlikely(!rt)) { 1589 if (unlikely(!rt)) {
1560 in6_dev_put(idev); 1590 in6_dev_put(idev);
1561 dst = ERR_PTR(-ENOMEM); 1591 dst = ERR_PTR(-ENOMEM);
@@ -1742,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg)
1742 if (!table) 1772 if (!table)
1743 goto out; 1773 goto out;
1744 1774
1745 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); 1775 rt = ip6_dst_alloc(net, NULL,
1776 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1746 1777
1747 if (!rt) { 1778 if (!rt) {
1748 err = -ENOMEM; 1779 err = -ENOMEM;
@@ -2399,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2399{ 2430{
2400 struct net *net = dev_net(idev->dev); 2431 struct net *net = dev_net(idev->dev);
2401 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 2432 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2402 DST_NOCOUNT, NULL); 2433 DST_NOCOUNT);
2403 if (!rt) 2434 if (!rt)
2404 return ERR_PTR(-ENOMEM); 2435 return ERR_PTR(-ENOMEM);
2405 2436
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 247552a7f6c2..3ece7d1034c8 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
92static inline void 92static inline void
93minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) 93minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
94{ 94{
95 int j = MAX_THR_RATES; 95 int j;
96 struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats; 96 struct minstrel_rate_stats *tmp_mrs;
97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats; 97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
98 98
99 while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) > 99 for (j = MAX_THR_RATES; j > 0; --j) {
100 minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
101 j--;
102 tmp_mrs = &mi->r[tp_list[j - 1]].stats; 100 tmp_mrs = &mi->r[tp_list[j - 1]].stats;
101 if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
102 minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
103 break;
103 } 104 }
104 105
105 if (j < MAX_THR_RATES - 1) 106 if (j < MAX_THR_RATES - 1)