aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-02-26 21:55:51 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-26 21:55:51 -0500
commitff4783ce78c08d2990126ce1874250ae8e72bbd2 (patch)
tree5c95885a4ab768101dd72942b57c238d452a7565 /net
parent622121719934f60378279eb440d3cec2fc3176d2 (diff)
parent203738e548cefc3fc3c2f73a9063176c9f3583d5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/sfc/rx.c Overlapping changes in drivers/net/ethernet/sfc/rx.c, one to change the rx_buf->is_page boolean into a set of u16 flags, and another to adjust how ->ip_summed is initialized. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/atm/clip.c10
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/rtnetlink.c78
-rw-r--r--net/ipv4/ip_gre.c10
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/xfrm4_mode_beet.c5
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c6
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ndisc.c5
-rw-r--r--net/ipv6/xfrm6_mode_beet.c6
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c38
-rw-r--r--net/netfilter/nf_conntrack_netlink.c46
-rw-r--r--net/netfilter/nf_queue.c40
-rw-r--r--net/netfilter/xt_TEE.c5
-rw-r--r--net/sched/sch_netem.c6
18 files changed, 174 insertions, 101 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c
index ef95a30306fa..5de42ea309bc 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -46,8 +46,8 @@
46 46
47static struct net_device *clip_devs; 47static struct net_device *clip_devs;
48static struct atm_vcc *atmarpd; 48static struct atm_vcc *atmarpd;
49static struct neigh_table clip_tbl;
50static struct timer_list idle_timer; 49static struct timer_list idle_timer;
50static const struct neigh_ops clip_neigh_ops;
51 51
52static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) 52static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
53{ 53{
@@ -123,6 +123,8 @@ static int neigh_check_cb(struct neighbour *n)
123 struct atmarp_entry *entry = neighbour_priv(n); 123 struct atmarp_entry *entry = neighbour_priv(n);
124 struct clip_vcc *cv; 124 struct clip_vcc *cv;
125 125
126 if (n->ops != &clip_neigh_ops)
127 return 0;
126 for (cv = entry->vccs; cv; cv = cv->next) { 128 for (cv = entry->vccs; cv; cv = cv->next) {
127 unsigned long exp = cv->last_use + cv->idle_timeout; 129 unsigned long exp = cv->last_use + cv->idle_timeout;
128 130
@@ -154,10 +156,10 @@ static int neigh_check_cb(struct neighbour *n)
154 156
155static void idle_timer_check(unsigned long dummy) 157static void idle_timer_check(unsigned long dummy)
156{ 158{
157 write_lock(&clip_tbl.lock); 159 write_lock(&arp_tbl.lock);
158 __neigh_for_each_release(&clip_tbl, neigh_check_cb); 160 __neigh_for_each_release(&arp_tbl, neigh_check_cb);
159 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); 161 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
160 write_unlock(&clip_tbl.lock); 162 write_unlock(&arp_tbl.lock);
161} 163}
162 164
163static int clip_arp_rcv(struct sk_buff *skb) 165static int clip_arp_rcv(struct sk_buff *skb)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f98ec444133a..0a68045782d1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -826,6 +826,8 @@ next_elt:
826 write_unlock_bh(&tbl->lock); 826 write_unlock_bh(&tbl->lock);
827 cond_resched(); 827 cond_resched();
828 write_lock_bh(&tbl->lock); 828 write_lock_bh(&tbl->lock);
829 nht = rcu_dereference_protected(tbl->nht,
830 lockdep_is_held(&tbl->lock));
829 } 831 }
830 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 832 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
831 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 833 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 7aef62e53113..5cf39cd7da85 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -60,7 +60,6 @@ struct rtnl_link {
60}; 60};
61 61
62static DEFINE_MUTEX(rtnl_mutex); 62static DEFINE_MUTEX(rtnl_mutex);
63static u16 min_ifinfo_dump_size;
64 63
65void rtnl_lock(void) 64void rtnl_lock(void)
66{ 65{
@@ -724,10 +723,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
724} 723}
725 724
726/* All VF info */ 725/* All VF info */
727static inline int rtnl_vfinfo_size(const struct net_device *dev) 726static inline int rtnl_vfinfo_size(const struct net_device *dev,
727 u32 ext_filter_mask)
728{ 728{
729 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 729 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
730 730 (ext_filter_mask & RTEXT_FILTER_VF)) {
731 int num_vfs = dev_num_vf(dev->dev.parent); 731 int num_vfs = dev_num_vf(dev->dev.parent);
732 size_t size = nla_total_size(sizeof(struct nlattr)); 732 size_t size = nla_total_size(sizeof(struct nlattr));
733 size += nla_total_size(num_vfs * sizeof(struct nlattr)); 733 size += nla_total_size(num_vfs * sizeof(struct nlattr));
@@ -766,7 +766,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
766 return port_self_size; 766 return port_self_size;
767} 767}
768 768
769static noinline size_t if_nlmsg_size(const struct net_device *dev) 769static noinline size_t if_nlmsg_size(const struct net_device *dev,
770 u32 ext_filter_mask)
770{ 771{
771 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 772 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
772 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 773 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -784,8 +785,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
784 + nla_total_size(4) /* IFLA_MASTER */ 785 + nla_total_size(4) /* IFLA_MASTER */
785 + nla_total_size(1) /* IFLA_OPERSTATE */ 786 + nla_total_size(1) /* IFLA_OPERSTATE */
786 + nla_total_size(1) /* IFLA_LINKMODE */ 787 + nla_total_size(1) /* IFLA_LINKMODE */
787 + nla_total_size(4) /* IFLA_NUM_VF */ 788 + nla_total_size(ext_filter_mask
788 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ 789 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
790 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
789 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 791 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
790 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 792 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
791 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */ 793 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
@@ -868,7 +870,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
868 870
869static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 871static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
870 int type, u32 pid, u32 seq, u32 change, 872 int type, u32 pid, u32 seq, u32 change,
871 unsigned int flags) 873 unsigned int flags, u32 ext_filter_mask)
872{ 874{
873 struct ifinfomsg *ifm; 875 struct ifinfomsg *ifm;
874 struct nlmsghdr *nlh; 876 struct nlmsghdr *nlh;
@@ -941,10 +943,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
941 goto nla_put_failure; 943 goto nla_put_failure;
942 copy_rtnl_link_stats64(nla_data(attr), stats); 944 copy_rtnl_link_stats64(nla_data(attr), stats);
943 945
944 if (dev->dev.parent) 946 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
945 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 947 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
946 948
947 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 949 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
950 && (ext_filter_mask & RTEXT_FILTER_VF)) {
948 int i; 951 int i;
949 952
950 struct nlattr *vfinfo, *vf; 953 struct nlattr *vfinfo, *vf;
@@ -1048,6 +1051,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1048 struct net_device *dev; 1051 struct net_device *dev;
1049 struct hlist_head *head; 1052 struct hlist_head *head;
1050 struct hlist_node *node; 1053 struct hlist_node *node;
1054 struct nlattr *tb[IFLA_MAX+1];
1055 u32 ext_filter_mask = 0;
1051 1056
1052 s_h = cb->args[0]; 1057 s_h = cb->args[0];
1053 s_idx = cb->args[1]; 1058 s_idx = cb->args[1];
@@ -1055,6 +1060,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1055 rcu_read_lock(); 1060 rcu_read_lock();
1056 cb->seq = net->dev_base_seq; 1061 cb->seq = net->dev_base_seq;
1057 1062
1063 nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1064 ifla_policy);
1065
1066 if (tb[IFLA_EXT_MASK])
1067 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1068
1058 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1069 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1059 idx = 0; 1070 idx = 0;
1060 head = &net->dev_index_head[h]; 1071 head = &net->dev_index_head[h];
@@ -1064,7 +1075,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1064 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1075 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1065 NETLINK_CB(cb->skb).pid, 1076 NETLINK_CB(cb->skb).pid,
1066 cb->nlh->nlmsg_seq, 0, 1077 cb->nlh->nlmsg_seq, 0,
1067 NLM_F_MULTI) <= 0) 1078 NLM_F_MULTI,
1079 ext_filter_mask) <= 0)
1068 goto out; 1080 goto out;
1069 1081
1070 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1082 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1100,6 +1112,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1100 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1112 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1101 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1113 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1102 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1114 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1115 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1103}; 1116};
1104EXPORT_SYMBOL(ifla_policy); 1117EXPORT_SYMBOL(ifla_policy);
1105 1118
@@ -1509,8 +1522,6 @@ errout:
1509 1522
1510 if (send_addr_notify) 1523 if (send_addr_notify)
1511 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 1524 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1512 min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev),
1513 min_ifinfo_dump_size);
1514 1525
1515 return err; 1526 return err;
1516} 1527}
@@ -1842,6 +1853,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1842 struct net_device *dev = NULL; 1853 struct net_device *dev = NULL;
1843 struct sk_buff *nskb; 1854 struct sk_buff *nskb;
1844 int err; 1855 int err;
1856 u32 ext_filter_mask = 0;
1845 1857
1846 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1858 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1847 if (err < 0) 1859 if (err < 0)
@@ -1850,6 +1862,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1850 if (tb[IFLA_IFNAME]) 1862 if (tb[IFLA_IFNAME])
1851 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 1863 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
1852 1864
1865 if (tb[IFLA_EXT_MASK])
1866 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1867
1853 ifm = nlmsg_data(nlh); 1868 ifm = nlmsg_data(nlh);
1854 if (ifm->ifi_index > 0) 1869 if (ifm->ifi_index > 0)
1855 dev = __dev_get_by_index(net, ifm->ifi_index); 1870 dev = __dev_get_by_index(net, ifm->ifi_index);
@@ -1861,12 +1876,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1861 if (dev == NULL) 1876 if (dev == NULL)
1862 return -ENODEV; 1877 return -ENODEV;
1863 1878
1864 nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); 1879 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
1865 if (nskb == NULL) 1880 if (nskb == NULL)
1866 return -ENOBUFS; 1881 return -ENOBUFS;
1867 1882
1868 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, 1883 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
1869 nlh->nlmsg_seq, 0, 0); 1884 nlh->nlmsg_seq, 0, 0, ext_filter_mask);
1870 if (err < 0) { 1885 if (err < 0) {
1871 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 1886 /* -EMSGSIZE implies BUG in if_nlmsg_size */
1872 WARN_ON(err == -EMSGSIZE); 1887 WARN_ON(err == -EMSGSIZE);
@@ -1877,8 +1892,31 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1877 return err; 1892 return err;
1878} 1893}
1879 1894
1880static u16 rtnl_calcit(struct sk_buff *skb) 1895static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1881{ 1896{
1897 struct net *net = sock_net(skb->sk);
1898 struct net_device *dev;
1899 struct nlattr *tb[IFLA_MAX+1];
1900 u32 ext_filter_mask = 0;
1901 u16 min_ifinfo_dump_size = 0;
1902
1903 nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy);
1904
1905 if (tb[IFLA_EXT_MASK])
1906 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1907
1908 if (!ext_filter_mask)
1909 return NLMSG_GOODSIZE;
1910 /*
1911 * traverse the list of net devices and compute the minimum
1912 * buffer size based upon the filter mask.
1913 */
1914 list_for_each_entry(dev, &net->dev_base_head, dev_list) {
1915 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
1916 if_nlmsg_size(dev,
1917 ext_filter_mask));
1918 }
1919
1882 return min_ifinfo_dump_size; 1920 return min_ifinfo_dump_size;
1883} 1921}
1884 1922
@@ -1913,13 +1951,11 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
1913 int err = -ENOBUFS; 1951 int err = -ENOBUFS;
1914 size_t if_info_size; 1952 size_t if_info_size;
1915 1953
1916 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL); 1954 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
1917 if (skb == NULL) 1955 if (skb == NULL)
1918 goto errout; 1956 goto errout;
1919 1957
1920 min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size); 1958 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
1921
1922 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
1923 if (err < 0) { 1959 if (err < 0) {
1924 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 1960 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
1925 WARN_ON(err == -EMSGSIZE); 1961 WARN_ON(err == -EMSGSIZE);
@@ -1977,7 +2013,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1977 return -EOPNOTSUPP; 2013 return -EOPNOTSUPP;
1978 calcit = rtnl_get_calcit(family, type); 2014 calcit = rtnl_get_calcit(family, type);
1979 if (calcit) 2015 if (calcit)
1980 min_dump_alloc = calcit(skb); 2016 min_dump_alloc = calcit(skb, nlh);
1981 2017
1982 __rtnl_unlock(); 2018 __rtnl_unlock();
1983 rtnl = net->rtnl; 2019 rtnl = net->rtnl;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b59414a0c1ee..6ef66af12291 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -65,7 +65,7 @@
65 it is infeasible task. The most general solutions would be 65 it is infeasible task. The most general solutions would be
66 to keep skb->encapsulation counter (sort of local ttl), 66 to keep skb->encapsulation counter (sort of local ttl),
67 and silently drop packet when it expires. It is a good 67 and silently drop packet when it expires. It is a good
68 solution, but it supposes maintaing new variable in ALL 68 solution, but it supposes maintaining new variable in ALL
69 skb, even if no tunneling is used. 69 skb, even if no tunneling is used.
70 70
71 Current solution: xmit_recursion breaks dead loops. This is a percpu 71 Current solution: xmit_recursion breaks dead loops. This is a percpu
@@ -91,14 +91,14 @@
91 91
92 One of them is to parse packet trying to detect inner encapsulation 92 One of them is to parse packet trying to detect inner encapsulation
93 made by our node. It is difficult or even impossible, especially, 93 made by our node. It is difficult or even impossible, especially,
94 taking into account fragmentation. TO be short, tt is not solution at all. 94 taking into account fragmentation. TO be short, ttl is not solution at all.
95 95
96 Current solution: The solution was UNEXPECTEDLY SIMPLE. 96 Current solution: The solution was UNEXPECTEDLY SIMPLE.
97 We force DF flag on tunnels with preconfigured hop limit, 97 We force DF flag on tunnels with preconfigured hop limit,
98 that is ALL. :-) Well, it does not remove the problem completely, 98 that is ALL. :-) Well, it does not remove the problem completely,
99 but exponential growth of network traffic is changed to linear 99 but exponential growth of network traffic is changed to linear
100 (branches, that exceed pmtu are pruned) and tunnel mtu 100 (branches, that exceed pmtu are pruned) and tunnel mtu
101 fastly degrades to value <68, where looping stops. 101 rapidly degrades to value <68, where looping stops.
102 Yes, it is not good if there exists a router in the loop, 102 Yes, it is not good if there exists a router in the loop,
103 which does not force DF, even when encapsulating packets have DF set. 103 which does not force DF, even when encapsulating packets have DF set.
104 But it is not our problem! Nobody could accuse us, we made 104 But it is not our problem! Nobody could accuse us, we made
@@ -457,8 +457,8 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
457 GRE tunnels with enabled checksum. Tell them "thank you". 457 GRE tunnels with enabled checksum. Tell them "thank you".
458 458
459 Well, I wonder, rfc1812 was written by Cisco employee, 459 Well, I wonder, rfc1812 was written by Cisco employee,
460 what the hell these idiots break standrads established 460 what the hell these idiots break standards established
461 by themself??? 461 by themselves???
462 */ 462 */
463 463
464 const struct iphdr *iph = (const struct iphdr *)skb->data; 464 const struct iphdr *iph = (const struct iphdr *)skb->data;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index cfc82cf339f6..4398a45a9600 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -631,6 +631,7 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
631 631
632 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); 632 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
633 633
634 err = -EOPNOTSUPP;
634 if (flags & MSG_OOB) 635 if (flags & MSG_OOB)
635 goto out; 636 goto out;
636 637
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 37755ccc0e96..22ef5f9fd2ff 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3240,7 +3240,8 @@ void __init tcp_init(void)
3240{ 3240{
3241 struct sk_buff *skb = NULL; 3241 struct sk_buff *skb = NULL;
3242 unsigned long limit; 3242 unsigned long limit;
3243 int i, max_share, cnt; 3243 int max_share, cnt;
3244 unsigned int i;
3244 unsigned long jiffy = jiffies; 3245 unsigned long jiffy = jiffies;
3245 3246
3246 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3247 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3283,7 +3284,7 @@ void __init tcp_init(void)
3283 &tcp_hashinfo.bhash_size, 3284 &tcp_hashinfo.bhash_size,
3284 NULL, 3285 NULL,
3285 64 * 1024); 3286 64 * 1024);
3286 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; 3287 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3287 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3288 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3288 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 3289 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3289 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3290 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index 63418185f524..e3db3f915114 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -110,10 +110,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
110 110
111 skb_push(skb, sizeof(*iph)); 111 skb_push(skb, sizeof(*iph));
112 skb_reset_network_header(skb); 112 skb_reset_network_header(skb);
113 113 skb_mac_header_rebuild(skb);
114 memmove(skb->data - skb->mac_len, skb_mac_header(skb),
115 skb->mac_len);
116 skb_set_mac_header(skb, -skb->mac_len);
117 114
118 xfrm4_beet_make_header(skb); 115 xfrm4_beet_make_header(skb);
119 116
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 534972e114ac..ed4bf11ef9f4 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -66,7 +66,6 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
66 66
67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
68{ 68{
69 const unsigned char *old_mac;
70 int err = -EINVAL; 69 int err = -EINVAL;
71 70
72 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) 71 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -84,10 +83,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
84 if (!(x->props.flags & XFRM_STATE_NOECN)) 83 if (!(x->props.flags & XFRM_STATE_NOECN))
85 ipip_ecn_decapsulate(skb); 84 ipip_ecn_decapsulate(skb);
86 85
87 old_mac = skb_mac_header(skb);
88 skb_set_mac_header(skb, -skb->mac_len);
89 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
90 skb_reset_network_header(skb); 86 skb_reset_network_header(skb);
87 skb_mac_header_rebuild(skb);
88
91 err = 0; 89 err = 0;
92 90
93out: 91out:
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index c7e95c8c579f..5aa3981a3922 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1926,8 +1926,10 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1926 }; 1926 };
1927 1927
1928 dst = ip6_route_output(net, NULL, &fl6); 1928 dst = ip6_route_output(net, NULL, &fl6);
1929 if (!dst) 1929 if (dst->error) {
1930 dst_release(dst);
1930 goto out_free; 1931 goto out_free;
1932 }
1931 1933
1932 skb_dst_drop(skb); 1934 skb_dst_drop(skb);
1933 skb_dst_set(skb, dst); 1935 skb_dst_set(skb, dst);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 8d817018c188..3dcdb81ec3e8 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1550,9 +1550,10 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1550 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex); 1550 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
1551 1551
1552 dst = ip6_route_output(net, NULL, &fl6); 1552 dst = ip6_route_output(net, NULL, &fl6);
1553 if (dst == NULL) 1553 if (dst->error) {
1554 dst_release(dst);
1554 return; 1555 return;
1555 1556 }
1556 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); 1557 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
1557 if (IS_ERR(dst)) 1558 if (IS_ERR(dst))
1558 return; 1559 return;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index a81ce9450750..9949a356d62c 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -80,7 +80,6 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
80static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) 80static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
81{ 81{
82 struct ipv6hdr *ip6h; 82 struct ipv6hdr *ip6h;
83 const unsigned char *old_mac;
84 int size = sizeof(struct ipv6hdr); 83 int size = sizeof(struct ipv6hdr);
85 int err; 84 int err;
86 85
@@ -90,10 +89,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
90 89
91 __skb_push(skb, size); 90 __skb_push(skb, size);
92 skb_reset_network_header(skb); 91 skb_reset_network_header(skb);
93 92 skb_mac_header_rebuild(skb);
94 old_mac = skb_mac_header(skb);
95 skb_set_mac_header(skb, -skb->mac_len);
96 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
97 93
98 xfrm6_beet_make_header(skb); 94 xfrm6_beet_make_header(skb);
99 95
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 261e6e6f487e..9f2095b19ad0 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -63,7 +63,6 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
63static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 63static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
64{ 64{
65 int err = -EINVAL; 65 int err = -EINVAL;
66 const unsigned char *old_mac;
67 66
68 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) 67 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
69 goto out; 68 goto out;
@@ -80,10 +79,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
80 if (!(x->props.flags & XFRM_STATE_NOECN)) 79 if (!(x->props.flags & XFRM_STATE_NOECN))
81 ipip6_ecn_decapsulate(skb); 80 ipip6_ecn_decapsulate(skb);
82 81
83 old_mac = skb_mac_header(skb);
84 skb_set_mac_header(skb, -skb->mac_len);
85 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
86 skb_reset_network_header(skb); 82 skb_reset_network_header(skb);
83 skb_mac_header_rebuild(skb);
84
87 err = 0; 85 err = 0;
88 86
89out: 87out:
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 611c3359b94d..2555816e7788 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
232 __be16 dport = 0; /* destination port to forward */ 232 __be16 dport = 0; /* destination port to forward */
233 unsigned int flags; 233 unsigned int flags;
234 struct ip_vs_conn_param param; 234 struct ip_vs_conn_param param;
235 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
235 union nf_inet_addr snet; /* source network of the client, 236 union nf_inet_addr snet; /* source network of the client,
236 after masking */ 237 after masking */
237 238
@@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
267 { 268 {
268 int protocol = iph.protocol; 269 int protocol = iph.protocol;
269 const union nf_inet_addr *vaddr = &iph.daddr; 270 const union nf_inet_addr *vaddr = &iph.daddr;
270 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
271 __be16 vport = 0; 271 __be16 vport = 0;
272 272
273 if (dst_port == svc->port) { 273 if (dst_port == svc->port) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 76613f5a55c0..ed86a3be678e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -404,19 +404,49 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
404 &net->ct.hash[repl_hash]); 404 &net->ct.hash[repl_hash]);
405} 405}
406 406
407void nf_conntrack_hash_insert(struct nf_conn *ct) 407int
408nf_conntrack_hash_check_insert(struct nf_conn *ct)
408{ 409{
409 struct net *net = nf_ct_net(ct); 410 struct net *net = nf_ct_net(ct);
410 unsigned int hash, repl_hash; 411 unsigned int hash, repl_hash;
412 struct nf_conntrack_tuple_hash *h;
413 struct hlist_nulls_node *n;
411 u16 zone; 414 u16 zone;
412 415
413 zone = nf_ct_zone(ct); 416 zone = nf_ct_zone(ct);
414 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 417 hash = hash_conntrack(net, zone,
415 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 418 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
419 repl_hash = hash_conntrack(net, zone,
420 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
421
422 spin_lock_bh(&nf_conntrack_lock);
416 423
424 /* See if there's one in the list already, including reverse */
425 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
426 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
427 &h->tuple) &&
428 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
429 goto out;
430 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
431 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
432 &h->tuple) &&
433 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
434 goto out;
435
436 add_timer(&ct->timeout);
437 nf_conntrack_get(&ct->ct_general);
417 __nf_conntrack_hash_insert(ct, hash, repl_hash); 438 __nf_conntrack_hash_insert(ct, hash, repl_hash);
439 NF_CT_STAT_INC(net, insert);
440 spin_unlock_bh(&nf_conntrack_lock);
441
442 return 0;
443
444out:
445 NF_CT_STAT_INC(net, insert_failed);
446 spin_unlock_bh(&nf_conntrack_lock);
447 return -EEXIST;
418} 448}
419EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 449EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
420 450
421/* Confirm a connection given skb; places it in hash table */ 451/* Confirm a connection given skb; places it in hash table */
422int 452int
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 28d0312d890a..04fb409623d2 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1404,15 +1404,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1404 nf_ct_protonum(ct)); 1404 nf_ct_protonum(ct));
1405 if (helper == NULL) { 1405 if (helper == NULL) {
1406 rcu_read_unlock(); 1406 rcu_read_unlock();
1407 spin_unlock_bh(&nf_conntrack_lock);
1408#ifdef CONFIG_MODULES 1407#ifdef CONFIG_MODULES
1409 if (request_module("nfct-helper-%s", helpname) < 0) { 1408 if (request_module("nfct-helper-%s", helpname) < 0) {
1410 spin_lock_bh(&nf_conntrack_lock);
1411 err = -EOPNOTSUPP; 1409 err = -EOPNOTSUPP;
1412 goto err1; 1410 goto err1;
1413 } 1411 }
1414 1412
1415 spin_lock_bh(&nf_conntrack_lock);
1416 rcu_read_lock(); 1413 rcu_read_lock();
1417 helper = __nf_conntrack_helper_find(helpname, 1414 helper = __nf_conntrack_helper_find(helpname,
1418 nf_ct_l3num(ct), 1415 nf_ct_l3num(ct),
@@ -1505,8 +1502,10 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1505 if (tstamp) 1502 if (tstamp)
1506 tstamp->start = ktime_to_ns(ktime_get_real()); 1503 tstamp->start = ktime_to_ns(ktime_get_real());
1507 1504
1508 add_timer(&ct->timeout); 1505 err = nf_conntrack_hash_check_insert(ct);
1509 nf_conntrack_hash_insert(ct); 1506 if (err < 0)
1507 goto err2;
1508
1510 rcu_read_unlock(); 1509 rcu_read_unlock();
1511 1510
1512 return ct; 1511 return ct;
@@ -1527,6 +1526,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1527 struct nf_conntrack_tuple otuple, rtuple; 1526 struct nf_conntrack_tuple otuple, rtuple;
1528 struct nf_conntrack_tuple_hash *h = NULL; 1527 struct nf_conntrack_tuple_hash *h = NULL;
1529 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1528 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1529 struct nf_conn *ct;
1530 u_int8_t u3 = nfmsg->nfgen_family; 1530 u_int8_t u3 = nfmsg->nfgen_family;
1531 u16 zone; 1531 u16 zone;
1532 int err; 1532 int err;
@@ -1547,27 +1547,22 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1547 return err; 1547 return err;
1548 } 1548 }
1549 1549
1550 spin_lock_bh(&nf_conntrack_lock);
1551 if (cda[CTA_TUPLE_ORIG]) 1550 if (cda[CTA_TUPLE_ORIG])
1552 h = __nf_conntrack_find(net, zone, &otuple); 1551 h = nf_conntrack_find_get(net, zone, &otuple);
1553 else if (cda[CTA_TUPLE_REPLY]) 1552 else if (cda[CTA_TUPLE_REPLY])
1554 h = __nf_conntrack_find(net, zone, &rtuple); 1553 h = nf_conntrack_find_get(net, zone, &rtuple);
1555 1554
1556 if (h == NULL) { 1555 if (h == NULL) {
1557 err = -ENOENT; 1556 err = -ENOENT;
1558 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1557 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1559 struct nf_conn *ct;
1560 enum ip_conntrack_events events; 1558 enum ip_conntrack_events events;
1561 1559
1562 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, 1560 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1563 &rtuple, u3); 1561 &rtuple, u3);
1564 if (IS_ERR(ct)) { 1562 if (IS_ERR(ct))
1565 err = PTR_ERR(ct); 1563 return PTR_ERR(ct);
1566 goto out_unlock; 1564
1567 }
1568 err = 0; 1565 err = 0;
1569 nf_conntrack_get(&ct->ct_general);
1570 spin_unlock_bh(&nf_conntrack_lock);
1571 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1566 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1572 events = IPCT_RELATED; 1567 events = IPCT_RELATED;
1573 else 1568 else
@@ -1582,23 +1577,19 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1582 ct, NETLINK_CB(skb).pid, 1577 ct, NETLINK_CB(skb).pid,
1583 nlmsg_report(nlh)); 1578 nlmsg_report(nlh));
1584 nf_ct_put(ct); 1579 nf_ct_put(ct);
1585 } else 1580 }
1586 spin_unlock_bh(&nf_conntrack_lock);
1587 1581
1588 return err; 1582 return err;
1589 } 1583 }
1590 /* implicit 'else' */ 1584 /* implicit 'else' */
1591 1585
1592 /* We manipulate the conntrack inside the global conntrack table lock,
1593 * so there's no need to increase the refcount */
1594 err = -EEXIST; 1586 err = -EEXIST;
1587 ct = nf_ct_tuplehash_to_ctrack(h);
1595 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1588 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1596 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 1589 spin_lock_bh(&nf_conntrack_lock);
1597
1598 err = ctnetlink_change_conntrack(ct, cda); 1590 err = ctnetlink_change_conntrack(ct, cda);
1591 spin_unlock_bh(&nf_conntrack_lock);
1599 if (err == 0) { 1592 if (err == 0) {
1600 nf_conntrack_get(&ct->ct_general);
1601 spin_unlock_bh(&nf_conntrack_lock);
1602 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 1593 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1603 (1 << IPCT_ASSURED) | 1594 (1 << IPCT_ASSURED) |
1604 (1 << IPCT_HELPER) | 1595 (1 << IPCT_HELPER) |
@@ -1607,15 +1598,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1607 (1 << IPCT_MARK), 1598 (1 << IPCT_MARK),
1608 ct, NETLINK_CB(skb).pid, 1599 ct, NETLINK_CB(skb).pid,
1609 nlmsg_report(nlh)); 1600 nlmsg_report(nlh));
1610 nf_ct_put(ct); 1601 }
1611 } else
1612 spin_unlock_bh(&nf_conntrack_lock);
1613
1614 return err;
1615 } 1602 }
1616 1603
1617out_unlock: 1604 nf_ct_put(ct);
1618 spin_unlock_bh(&nf_conntrack_lock);
1619 return err; 1605 return err;
1620} 1606}
1621 1607
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index b3a7db678b8d..ce60cf0f6c11 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -203,6 +203,27 @@ err:
203 return status; 203 return status;
204} 204}
205 205
206#ifdef CONFIG_BRIDGE_NETFILTER
207/* When called from bridge netfilter, skb->data must point to MAC header
208 * before calling skb_gso_segment(). Else, original MAC header is lost
209 * and segmented skbs will be sent to wrong destination.
210 */
211static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
212{
213 if (skb->nf_bridge)
214 __skb_push(skb, skb->network_header - skb->mac_header);
215}
216
217static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
218{
219 if (skb->nf_bridge)
220 __skb_pull(skb, skb->network_header - skb->mac_header);
221}
222#else
223#define nf_bridge_adjust_skb_data(s) do {} while (0)
224#define nf_bridge_adjust_segmented_data(s) do {} while (0)
225#endif
226
206int nf_queue(struct sk_buff *skb, 227int nf_queue(struct sk_buff *skb,
207 struct list_head *elem, 228 struct list_head *elem,
208 u_int8_t pf, unsigned int hook, 229 u_int8_t pf, unsigned int hook,
@@ -212,7 +233,7 @@ int nf_queue(struct sk_buff *skb,
212 unsigned int queuenum) 233 unsigned int queuenum)
213{ 234{
214 struct sk_buff *segs; 235 struct sk_buff *segs;
215 int err; 236 int err = -EINVAL;
216 unsigned int queued; 237 unsigned int queued;
217 238
218 if (!skb_is_gso(skb)) 239 if (!skb_is_gso(skb))
@@ -228,23 +249,25 @@ int nf_queue(struct sk_buff *skb,
228 break; 249 break;
229 } 250 }
230 251
252 nf_bridge_adjust_skb_data(skb);
231 segs = skb_gso_segment(skb, 0); 253 segs = skb_gso_segment(skb, 0);
232 /* Does not use PTR_ERR to limit the number of error codes that can be 254 /* Does not use PTR_ERR to limit the number of error codes that can be
233 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean 255 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
234 * 'ignore this hook'. 256 * 'ignore this hook'.
235 */ 257 */
236 if (IS_ERR(segs)) 258 if (IS_ERR(segs))
237 return -EINVAL; 259 goto out_err;
238
239 queued = 0; 260 queued = 0;
240 err = 0; 261 err = 0;
241 do { 262 do {
242 struct sk_buff *nskb = segs->next; 263 struct sk_buff *nskb = segs->next;
243 264
244 segs->next = NULL; 265 segs->next = NULL;
245 if (err == 0) 266 if (err == 0) {
267 nf_bridge_adjust_segmented_data(segs);
246 err = __nf_queue(segs, elem, pf, hook, indev, 268 err = __nf_queue(segs, elem, pf, hook, indev,
247 outdev, okfn, queuenum); 269 outdev, okfn, queuenum);
270 }
248 if (err == 0) 271 if (err == 0)
249 queued++; 272 queued++;
250 else 273 else
@@ -252,11 +275,12 @@ int nf_queue(struct sk_buff *skb,
252 segs = nskb; 275 segs = nskb;
253 } while (segs); 276 } while (segs);
254 277
255 /* also free orig skb if only some segments were queued */ 278 if (queued) {
256 if (unlikely(err && queued))
257 err = 0;
258 if (err == 0)
259 kfree_skb(skb); 279 kfree_skb(skb);
280 return 0;
281 }
282 out_err:
283 nf_bridge_adjust_segmented_data(skb);
260 return err; 284 return err;
261} 285}
262 286
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 3aae66facf9f..4d5057902839 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -152,9 +152,10 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
152 fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) | 152 fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
153 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]; 153 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
154 dst = ip6_route_output(net, NULL, &fl6); 154 dst = ip6_route_output(net, NULL, &fl6);
155 if (dst == NULL) 155 if (dst->error) {
156 dst_release(dst);
156 return false; 157 return false;
157 158 }
158 skb_dst_drop(skb); 159 skb_dst_drop(skb);
159 skb_dst_set(skb, dst); 160 skb_dst_set(skb, dst);
160 skb->dev = dst->dev; 161 skb->dev = dst->dev;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e83d61ca78ca..5da548fa7ae9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -501,9 +501,8 @@ tfifo_dequeue:
501 501
502 /* if more time remaining? */ 502 /* if more time remaining? */
503 if (cb->time_to_send <= psched_get_time()) { 503 if (cb->time_to_send <= psched_get_time()) {
504 skb = qdisc_dequeue_tail(sch); 504 __skb_unlink(skb, &sch->q);
505 if (unlikely(!skb)) 505 sch->qstats.backlog -= qdisc_pkt_len(skb);
506 goto qdisc_dequeue;
507 506
508#ifdef CONFIG_NET_CLS_ACT 507#ifdef CONFIG_NET_CLS_ACT
509 /* 508 /*
@@ -539,7 +538,6 @@ deliver:
539 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); 538 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
540 } 539 }
541 540
542qdisc_dequeue:
543 if (q->qdisc) { 541 if (q->qdisc) {
544 skb = q->qdisc->ops->dequeue(q->qdisc); 542 skb = q->qdisc->ops->dequeue(q->qdisc);
545 if (skb) 543 if (skb)