aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-07-16 17:09:34 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-16 17:09:34 -0400
commit1a98c69af1ecd97bfd1f4e4539924a9192434e36 (patch)
treea243defcf921ea174f8e43fce11d06830a6a9c36 /net
parent7a575f6b907ea5d207d2b5010293c189616eae34 (diff)
parentb6603fe574af289dbe9eb9fb4c540bca04f5a053 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c13
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/core/dev.c30
-rw-r--r--net/core/iovec.c55
-rw-r--r--net/core/neighbour.c9
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/ip_tunnel.c12
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/mcast.c13
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow.h5
-rw-r--r--net/openvswitch/flow_table.c16
-rw-r--r--net/openvswitch/flow_table.h3
-rw-r--r--net/openvswitch/vport-gre.c17
-rw-r--r--net/sctp/ulpevent.c122
-rw-r--r--net/tipc/bcast.c1
-rw-r--r--net/tipc/msg.c11
28 files changed, 169 insertions, 238 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9d0223b16b46..35a6b6b15e8a 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -629,8 +629,6 @@ static void vlan_dev_uninit(struct net_device *dev)
629 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 629 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
630 int i; 630 int i;
631 631
632 free_percpu(vlan->vlan_pcpu_stats);
633 vlan->vlan_pcpu_stats = NULL;
634 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 632 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
635 while ((pm = vlan->egress_priority_map[i]) != NULL) { 633 while ((pm = vlan->egress_priority_map[i]) != NULL) {
636 vlan->egress_priority_map[i] = pm->next; 634 vlan->egress_priority_map[i] = pm->next;
@@ -787,6 +785,15 @@ static const struct net_device_ops vlan_netdev_ops = {
787 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, 785 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
788}; 786};
789 787
788static void vlan_dev_free(struct net_device *dev)
789{
790 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
791
792 free_percpu(vlan->vlan_pcpu_stats);
793 vlan->vlan_pcpu_stats = NULL;
794 free_netdev(dev);
795}
796
790void vlan_setup(struct net_device *dev) 797void vlan_setup(struct net_device *dev)
791{ 798{
792 ether_setup(dev); 799 ether_setup(dev);
@@ -796,7 +803,7 @@ void vlan_setup(struct net_device *dev)
796 dev->tx_queue_len = 0; 803 dev->tx_queue_len = 0;
797 804
798 dev->netdev_ops = &vlan_netdev_ops; 805 dev->netdev_ops = &vlan_netdev_ops;
799 dev->destructor = free_netdev; 806 dev->destructor = vlan_dev_free;
800 dev->ethtool_ops = &vlan_ethtool_ops; 807 dev->ethtool_ops = &vlan_ethtool_ops;
801 808
802 memset(dev->broadcast, 0, ETH_ALEN); 809 memset(dev->broadcast, 0, ETH_ALEN);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 8ceabc073658..c00897f65a31 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1489 goto drop; 1489 goto drop;
1490 1490
1491 /* Queue packet (standard) */ 1491 /* Queue packet (standard) */
1492 skb->sk = sock;
1493
1494 if (sock_queue_rcv_skb(sock, skb) < 0) 1492 if (sock_queue_rcv_skb(sock, skb) < 0)
1495 goto drop; 1493 goto drop;
1496 1494
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1644 if (!skb) 1642 if (!skb)
1645 goto out; 1643 goto out;
1646 1644
1647 skb->sk = sk;
1648 skb_reserve(skb, ddp_dl->header_length); 1645 skb_reserve(skb, ddp_dl->header_length);
1649 skb_reserve(skb, dev->hard_header_len); 1646 skb_reserve(skb, dev->hard_header_len);
1650 skb->dev = dev; 1647 skb->dev = dev;
diff --git a/net/core/dev.c b/net/core/dev.c
index 2c98f10ee62a..138ab897de7d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
148static struct list_head offload_base __read_mostly; 148static struct list_head offload_base __read_mostly;
149 149
150static int netif_rx_internal(struct sk_buff *skb); 150static int netif_rx_internal(struct sk_buff *skb);
151static int call_netdevice_notifiers_info(unsigned long val,
152 struct net_device *dev,
153 struct netdev_notifier_info *info);
151 154
152/* 155/*
153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 156 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1214,7 +1217,11 @@ EXPORT_SYMBOL(netdev_features_change);
1214void netdev_state_change(struct net_device *dev) 1217void netdev_state_change(struct net_device *dev)
1215{ 1218{
1216 if (dev->flags & IFF_UP) { 1219 if (dev->flags & IFF_UP) {
1217 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1220 struct netdev_notifier_change_info change_info;
1221
1222 change_info.flags_changed = 0;
1223 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1224 &change_info.info);
1218 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1225 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1219 } 1226 }
1220} 1227}
@@ -4234,9 +4241,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
4234#endif 4241#endif
4235 napi->weight = weight_p; 4242 napi->weight = weight_p;
4236 local_irq_disable(); 4243 local_irq_disable();
4237 while (work < quota) { 4244 while (1) {
4238 struct sk_buff *skb; 4245 struct sk_buff *skb;
4239 unsigned int qlen;
4240 4246
4241 while ((skb = __skb_dequeue(&sd->process_queue))) { 4247 while ((skb = __skb_dequeue(&sd->process_queue))) {
4242 local_irq_enable(); 4248 local_irq_enable();
@@ -4250,24 +4256,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
4250 } 4256 }
4251 4257
4252 rps_lock(sd); 4258 rps_lock(sd);
4253 qlen = skb_queue_len(&sd->input_pkt_queue); 4259 if (skb_queue_empty(&sd->input_pkt_queue)) {
4254 if (qlen)
4255 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4256 &sd->process_queue);
4257
4258 if (qlen < quota - work) {
4259 /* 4260 /*
4260 * Inline a custom version of __napi_complete(). 4261 * Inline a custom version of __napi_complete().
4261 * only current cpu owns and manipulates this napi, 4262 * only current cpu owns and manipulates this napi,
4262 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4263 * and NAPI_STATE_SCHED is the only possible flag set
4263 * we can use a plain write instead of clear_bit(), 4264 * on backlog.
4265 * We can use a plain write instead of clear_bit(),
4264 * and we dont need an smp_mb() memory barrier. 4266 * and we dont need an smp_mb() memory barrier.
4265 */ 4267 */
4266 list_del(&napi->poll_list); 4268 list_del(&napi->poll_list);
4267 napi->state = 0; 4269 napi->state = 0;
4270 rps_unlock(sd);
4268 4271
4269 quota = work + qlen; 4272 break;
4270 } 4273 }
4274
4275 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4276 &sd->process_queue);
4271 rps_unlock(sd); 4277 rps_unlock(sd);
4272 } 4278 }
4273 local_irq_enable(); 4279 local_irq_enable();
diff --git a/net/core/iovec.c b/net/core/iovec.c
index b61869429f4c..827dd6beb49c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -75,61 +75,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75} 75}
76 76
77/* 77/*
78 * Copy kernel to iovec. Returns -EFAULT on error.
79 */
80
81int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
82 int offset, int len)
83{
84 int copy;
85 for (; len > 0; ++iov) {
86 /* Skip over the finished iovecs */
87 if (unlikely(offset >= iov->iov_len)) {
88 offset -= iov->iov_len;
89 continue;
90 }
91 copy = min_t(unsigned int, iov->iov_len - offset, len);
92 if (copy_to_user(iov->iov_base + offset, kdata, copy))
93 return -EFAULT;
94 offset = 0;
95 kdata += copy;
96 len -= copy;
97 }
98
99 return 0;
100}
101EXPORT_SYMBOL(memcpy_toiovecend);
102
103/*
104 * Copy iovec to kernel. Returns -EFAULT on error.
105 */
106
107int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
108 int offset, int len)
109{
110 /* Skip over the finished iovecs */
111 while (offset >= iov->iov_len) {
112 offset -= iov->iov_len;
113 iov++;
114 }
115
116 while (len > 0) {
117 u8 __user *base = iov->iov_base + offset;
118 int copy = min_t(unsigned int, len, iov->iov_len - offset);
119
120 offset = 0;
121 if (copy_from_user(kdata, base, copy))
122 return -EFAULT;
123 len -= copy;
124 kdata += copy;
125 iov++;
126 }
127
128 return 0;
129}
130EXPORT_SYMBOL(memcpy_fromiovecend);
131
132/*
133 * And now for the all-in-one: copy and checksum from a user iovec 78 * And now for the all-in-one: copy and checksum from a user iovec
134 * directly to a datagram 79 * directly to a datagram
135 * Calls to csum_partial but the last must be in 32 bit chunks 80 * Calls to csum_partial but the last must be in 32 bit chunks
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 32d872eec7f5..559890b0f0a2 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3061 } else { 3061 } else {
3062 struct neigh_table *tbl = p->tbl;
3062 dev_name_source = "default"; 3063 dev_name_source = "default";
3063 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); 3064 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3064 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; 3065 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3065 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; 3066 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3066 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; 3067 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3067 } 3068 }
3068 3069
3069 if (handler) { 3070 if (handler) {
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4e9619bca732..0485bf7f8f03 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
68 68
69 skb_push(skb, hdr_len); 69 skb_push(skb, hdr_len);
70 70
71 skb_reset_transport_header(skb);
71 greh = (struct gre_base_hdr *)skb->data; 72 greh = (struct gre_base_hdr *)skb->data;
72 greh->flags = tnl_flags_to_gre_flags(tpi->flags); 73 greh->flags = tnl_flags_to_gre_flags(tpi->flags);
73 greh->protocol = tpi->proto; 74 greh->protocol = tpi->proto;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79c3d947a481..42b7bcf8045b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
739 /* fall through */ 739 /* fall through */
740 case 0: 740 case 0:
741 info = ntohs(icmph->un.frag.mtu); 741 info = ntohs(icmph->un.frag.mtu);
742 if (!info)
743 goto out;
744 } 742 }
745 break; 743 break;
746 case ICMP_SR_FAILED: 744 case ICMP_SR_FAILED:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6748d420f714..db710b059bab 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1944 1944
1945 rtnl_lock(); 1945 rtnl_lock();
1946 in_dev = ip_mc_find_dev(net, imr); 1946 in_dev = ip_mc_find_dev(net, imr);
1947 if (!in_dev) {
1948 ret = -ENODEV;
1949 goto out;
1950 }
1947 ifindex = imr->imr_ifindex; 1951 ifindex = imr->imr_ifindex;
1948 for (imlp = &inet->mc_list; 1952 for (imlp = &inet->mc_list;
1949 (iml = rtnl_dereference(*imlp)) != NULL; 1953 (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1961 1965
1962 *imlp = iml->next_rcu; 1966 *imlp = iml->next_rcu;
1963 1967
1964 if (in_dev) 1968 ip_mc_dec_group(in_dev, group);
1965 ip_mc_dec_group(in_dev, group);
1966 rtnl_unlock(); 1969 rtnl_unlock();
1967 /* decrease mem now to avoid the memleak warning */ 1970 /* decrease mem now to avoid the memleak warning */
1968 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 1971 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1969 kfree_rcu(iml, rcu); 1972 kfree_rcu(iml, rcu);
1970 return 0; 1973 return 0;
1971 } 1974 }
1972 if (!in_dev) 1975out:
1973 ret = -ENODEV;
1974 rtnl_unlock(); 1976 rtnl_unlock();
1975 return ret; 1977 return ret;
1976} 1978}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 0157a7af20a8..dd8c8c765799 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -169,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
169 169
170 hlist_for_each_entry_rcu(t, head, hash_node) { 170 hlist_for_each_entry_rcu(t, head, hash_node) {
171 if (remote != t->parms.iph.daddr || 171 if (remote != t->parms.iph.daddr ||
172 t->parms.iph.saddr != 0 ||
172 !(t->dev->flags & IFF_UP)) 173 !(t->dev->flags & IFF_UP))
173 continue; 174 continue;
174 175
@@ -185,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
185 head = &itn->tunnels[hash]; 186 head = &itn->tunnels[hash];
186 187
187 hlist_for_each_entry_rcu(t, head, hash_node) { 188 hlist_for_each_entry_rcu(t, head, hash_node) {
188 if ((local != t->parms.iph.saddr && 189 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
189 (local != t->parms.iph.daddr || 190 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
190 !ipv4_is_multicast(local))) || 191 continue;
191 !(t->dev->flags & IFF_UP)) 192
193 if (!(t->dev->flags & IFF_UP))
192 continue; 194 continue;
193 195
194 if (!ip_tunnel_key_match(&t->parms, flags, key)) 196 if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -205,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
205 207
206 hlist_for_each_entry_rcu(t, head, hash_node) { 208 hlist_for_each_entry_rcu(t, head, hash_node) {
207 if (t->parms.i_key != key || 209 if (t->parms.i_key != key ||
210 t->parms.iph.saddr != 0 ||
211 t->parms.iph.daddr != 0 ||
208 !(t->dev->flags & IFF_UP)) 212 !(t->dev->flags & IFF_UP))
209 continue; 213 continue;
210 214
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 082239ffe34a..3162ea923ded 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1010,7 +1010,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 const struct iphdr *iph = (const struct iphdr *) skb->data; 1010 const struct iphdr *iph = (const struct iphdr *) skb->data;
1011 struct flowi4 fl4; 1011 struct flowi4 fl4;
1012 struct rtable *rt; 1012 struct rtable *rt;
1013 struct dst_entry *dst; 1013 struct dst_entry *odst = NULL;
1014 bool new = false; 1014 bool new = false;
1015 1015
1016 bh_lock_sock(sk); 1016 bh_lock_sock(sk);
@@ -1018,16 +1018,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1018 if (!ip_sk_accept_pmtu(sk)) 1018 if (!ip_sk_accept_pmtu(sk))
1019 goto out; 1019 goto out;
1020 1020
1021 rt = (struct rtable *) __sk_dst_get(sk); 1021 odst = sk_dst_get(sk);
1022 1022
1023 if (sock_owned_by_user(sk) || !rt) { 1023 if (sock_owned_by_user(sk) || !odst) {
1024 __ipv4_sk_update_pmtu(skb, sk, mtu); 1024 __ipv4_sk_update_pmtu(skb, sk, mtu);
1025 goto out; 1025 goto out;
1026 } 1026 }
1027 1027
1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1029 1029
1030 if (!__sk_dst_check(sk, 0)) { 1030 rt = (struct rtable *)odst;
1031 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1031 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1032 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1032 if (IS_ERR(rt)) 1033 if (IS_ERR(rt))
1033 goto out; 1034 goto out;
@@ -1037,8 +1038,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1037 1038
1038 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); 1039 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1039 1040
1040 dst = dst_check(&rt->dst, 0); 1041 if (!dst_check(&rt->dst, 0)) {
1041 if (!dst) {
1042 if (new) 1042 if (new)
1043 dst_release(&rt->dst); 1043 dst_release(&rt->dst);
1044 1044
@@ -1050,10 +1050,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1050 } 1050 }
1051 1051
1052 if (new) 1052 if (new)
1053 __sk_dst_set(sk, &rt->dst); 1053 sk_dst_set(sk, &rt->dst);
1054 1054
1055out: 1055out:
1056 bh_unlock_sock(sk); 1056 bh_unlock_sock(sk);
1057 dst_release(odst);
1057} 1058}
1058EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1059EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1059 1060
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb1dde37e678..9d2118e5fbc7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1108 if (unlikely(tp->repair)) { 1108 if (unlikely(tp->repair)) {
1109 if (tp->repair_queue == TCP_RECV_QUEUE) { 1109 if (tp->repair_queue == TCP_RECV_QUEUE) {
1110 copied = tcp_send_rcvq(sk, msg, size); 1110 copied = tcp_send_rcvq(sk, msg, size);
1111 goto out; 1111 goto out_nopush;
1112 } 1112 }
1113 1113
1114 err = -EINVAL; 1114 err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
1282out: 1282out:
1283 if (copied) 1283 if (copied)
1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1285out_nopush:
1285 release_sock(sk); 1286 release_sock(sk);
1286 return copied + copied_syn; 1287 return copied + copied_syn;
1287 1288
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2e16afba182c..7832d941dbcd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1106 } 1106 }
1107 1107
1108 /* D-SACK for already forgotten data... Do dumb counting. */ 1108 /* D-SACK for already forgotten data... Do dumb counting. */
1109 if (dup_sack && tp->undo_marker && tp->undo_retrans && 1109 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
1110 !after(end_seq_0, prior_snd_una) && 1110 !after(end_seq_0, prior_snd_una) &&
1111 after(end_seq_0, tp->undo_marker)) 1111 after(end_seq_0, tp->undo_marker))
1112 tp->undo_retrans--; 1112 tp->undo_retrans--;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1187 1187
1188 /* Account D-SACK for retransmitted packet. */ 1188 /* Account D-SACK for retransmitted packet. */
1189 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1189 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1190 if (tp->undo_marker && tp->undo_retrans && 1190 if (tp->undo_marker && tp->undo_retrans > 0 &&
1191 after(end_seq, tp->undo_marker)) 1191 after(end_seq, tp->undo_marker))
1192 tp->undo_retrans--; 1192 tp->undo_retrans--;
1193 if (sacked & TCPCB_SACKED_ACKED) 1193 if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
1893 tp->lost_out = 0; 1893 tp->lost_out = 0;
1894 1894
1895 tp->undo_marker = 0; 1895 tp->undo_marker = 0;
1896 tp->undo_retrans = 0; 1896 tp->undo_retrans = -1;
1897} 1897}
1898 1898
1899void tcp_clear_retrans(struct tcp_sock *tp) 1899void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2664,7 +2664,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2664 2664
2665 tp->prior_ssthresh = 0; 2665 tp->prior_ssthresh = 0;
2666 tp->undo_marker = tp->snd_una; 2666 tp->undo_marker = tp->snd_una;
2667 tp->undo_retrans = tp->retrans_out; 2667 tp->undo_retrans = tp->retrans_out ? : -1;
2668 2668
2669 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2669 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2670 if (!ece_ack) 2670 if (!ece_ack)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 306dd5dead7d..8fcfc91964ec 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2526,8 +2526,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2526 if (!tp->retrans_stamp) 2526 if (!tp->retrans_stamp)
2527 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2527 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2528 2528
2529 tp->undo_retrans += tcp_skb_pcount(skb);
2530
2531 /* snd_nxt is stored to detect loss of retransmitted segment, 2529 /* snd_nxt is stored to detect loss of retransmitted segment,
2532 * see tcp_input.c tcp_sacktag_write_queue(). 2530 * see tcp_input.c tcp_sacktag_write_queue().
2533 */ 2531 */
@@ -2535,6 +2533,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2535 } else if (err != -EBUSY) { 2533 } else if (err != -EBUSY) {
2536 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2534 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2537 } 2535 }
2536
2537 if (tp->undo_retrans < 0)
2538 tp->undo_retrans = 0;
2539 tp->undo_retrans += tcp_skb_pcount(skb);
2538 return err; 2540 return err;
2539} 2541}
2540 2542
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f6dfe525584f..668af516f094 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1587,8 +1587,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1587 goto csum_error; 1587 goto csum_error;
1588 1588
1589 1589
1590 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 1590 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
1591 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1592 is_udplite);
1591 goto drop; 1593 goto drop;
1594 }
1592 1595
1593 rc = 0; 1596 rc = 0;
1594 1597
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 08b367c6b9cf..617f0958e164 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); 1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1302 len -= skb_network_header_len(skb); 1302 len -= skb_network_header_len(skb);
1303 1303
1304 /* Drop queries with not link local source */ 1304 /* RFC3810 6.2
1305 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) 1305 * Upon reception of an MLD message that contains a Query, the node
1306 * checks if the source address of the message is a valid link-local
1307 * address, if the Hop Limit is set to 1, and if the Router Alert
1308 * option is present in the Hop-By-Hop Options header of the IPv6
1309 * packet. If any of these checks fails, the packet is dropped.
1310 */
1311 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1312 ipv6_hdr(skb)->hop_limit != 1 ||
1313 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1314 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1306 return -EINVAL; 1315 return -EINVAL;
1307 1316
1308 idev = __in6_dev_get(skb->dev); 1317 idev = __in6_dev_get(skb->dev);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c2bd28fd43e4..b4481df3d5fa 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -673,8 +673,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
673 goto csum_error; 673 goto csum_error;
674 } 674 }
675 675
676 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 676 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
677 UDP6_INC_STATS_BH(sock_net(sk),
678 UDP_MIB_RCVBUFERRORS, is_udplite);
677 goto drop; 679 goto drop;
680 }
678 681
679 skb_dst_drop(skb); 682 skb_dst_drop(skb);
680 683
@@ -689,6 +692,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
689 bh_unlock_sock(sk); 692 bh_unlock_sock(sk);
690 693
691 return rc; 694 return rc;
695
692csum_error: 696csum_error:
693 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 697 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
694drop: 698drop:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 950909f04ee6..13752d96275e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1365 int err; 1365 int err;
1366 1366
1367 if (level != SOL_PPPOL2TP) 1367 if (level != SOL_PPPOL2TP)
1368 return udp_prot.setsockopt(sk, level, optname, optval, optlen); 1368 return -EINVAL;
1369 1369
1370 if (optlen < sizeof(int)) 1370 if (optlen < sizeof(int))
1371 return -EINVAL; 1371 return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1491 struct pppol2tp_session *ps; 1491 struct pppol2tp_session *ps;
1492 1492
1493 if (level != SOL_PPPOL2TP) 1493 if (level != SOL_PPPOL2TP)
1494 return udp_prot.getsockopt(sk, level, optname, optval, optlen); 1494 return -EINVAL;
1495 1495
1496 if (get_user(len, optlen)) 1496 if (get_user(len, optlen))
1497 return -EFAULT; 1497 return -EFAULT;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index e8c9f9704216..837ff9a57503 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
636 while (nlk->cb_running && netlink_dump_space(nlk)) { 636 while (nlk->cb_running && netlink_dump_space(nlk)) {
637 err = netlink_dump(sk); 637 err = netlink_dump(sk);
638 if (err < 0) { 638 if (err < 0) {
639 sk->sk_err = err; 639 sk->sk_err = -err;
640 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
641 break; 641 break;
642 } 642 }
@@ -2480,7 +2480,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2480 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 2480 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2481 ret = netlink_dump(sk); 2481 ret = netlink_dump(sk);
2482 if (ret) { 2482 if (ret) {
2483 sk->sk_err = ret; 2483 sk->sk_err = -ret;
2484 sk->sk_error_report(sk); 2484 sk->sk_error_report(sk);
2485 } 2485 }
2486 } 2486 }
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c36856a457ca..e70d8b18e962 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
551 551
552 case OVS_ACTION_ATTR_SAMPLE: 552 case OVS_ACTION_ATTR_SAMPLE:
553 err = sample(dp, skb, a); 553 err = sample(dp, skb, a);
554 if (unlikely(err)) /* skb already freed. */
555 return err;
554 break; 556 break;
555 } 557 }
556 558
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index fe95b6c224a7..493b5141a618 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
276 OVS_CB(skb)->flow = flow; 276 OVS_CB(skb)->flow = flow;
277 OVS_CB(skb)->pkt_key = &key; 277 OVS_CB(skb)->pkt_key = &key;
278 278
279 ovs_flow_stats_update(OVS_CB(skb)->flow, skb); 279 ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
280 ovs_execute_actions(dp, skb); 280 ovs_execute_actions(dp, skb);
281 stats_counter = &stats->n_hit; 281 stats_counter = &stats->n_hit;
282 282
@@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
889 } 889 }
890 /* The unmasked key has to be the same for flow updates. */ 890 /* The unmasked key has to be the same for flow updates. */
891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { 891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
892 error = -EEXIST; 892 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
893 goto err_unlock_ovs; 893 if (!flow) {
894 error = -ENOENT;
895 goto err_unlock_ovs;
896 }
894 } 897 }
895 /* Update actions. */ 898 /* Update actions. */
896 old_acts = ovsl_dereference(flow->sf_acts); 899 old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
981 goto err_unlock_ovs; 984 goto err_unlock_ovs;
982 } 985 }
983 /* Check that the flow exists. */ 986 /* Check that the flow exists. */
984 flow = ovs_flow_tbl_lookup(&dp->table, &key); 987 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
985 if (unlikely(!flow)) { 988 if (unlikely(!flow)) {
986 error = -ENOENT; 989 error = -ENOENT;
987 goto err_unlock_ovs; 990 goto err_unlock_ovs;
988 } 991 }
989 /* The unmasked key has to be the same for flow updates. */ 992
990 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
991 error = -EEXIST;
992 goto err_unlock_ovs;
993 }
994 /* Update actions, if present. */ 993 /* Update actions, if present. */
995 if (likely(acts)) { 994 if (likely(acts)) {
996 old_acts = ovsl_dereference(flow->sf_acts); 995 old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1063 goto unlock; 1062 goto unlock;
1064 } 1063 }
1065 1064
1066 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1065 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1067 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 1066 if (!flow) {
1068 err = -ENOENT; 1067 err = -ENOENT;
1069 goto unlock; 1068 goto unlock;
1070 } 1069 }
@@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1113 goto unlock; 1112 goto unlock;
1114 } 1113 }
1115 1114
1116 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1115 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1117 if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) { 1116 if (unlikely(!flow)) {
1118 err = -ENOENT; 1117 err = -ENOENT;
1119 goto unlock; 1118 goto unlock;
1120 } 1119 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 334751cb1528..d07ab538fc9d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
61 61
62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
63 63
64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
65 struct sk_buff *skb)
65{ 66{
66 struct flow_stats *stats; 67 struct flow_stats *stats;
67 __be16 tcp_flags = flow->key.tp.flags;
68 int node = numa_node_id(); 68 int node = numa_node_id();
69 69
70 stats = rcu_dereference(flow->stats[node]); 70 stats = rcu_dereference(flow->stats[node]);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index ac395d2cd821..5e5aaed3a85b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
180 unsigned char ar_tip[4]; /* target IP address */ 180 unsigned char ar_tip[4]; /* target IP address */
181} __packed; 181} __packed;
182 182
183void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *); 183void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
184 struct sk_buff *);
184void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, 185void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
185 unsigned long *used, __be16 *tcp_flags); 186 unsigned long *used, __be16 *tcp_flags);
186void ovs_flow_stats_clear(struct sw_flow *); 187void ovs_flow_stats_clear(struct sw_flow *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 574c3abc9b30..cf2d853646f0 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); 456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
457} 457}
458 458
459struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
460 struct sw_flow_match *match)
461{
462 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
463 struct sw_flow_mask *mask;
464 struct sw_flow *flow;
465
466 /* Always called under ovs-mutex. */
467 list_for_each_entry(mask, &tbl->mask_list, list) {
468 flow = masked_flow_lookup(ti, match->key, mask);
469 if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
470 return flow;
471 }
472 return NULL;
473}
474
459int ovs_flow_tbl_num_masks(const struct flow_table *table) 475int ovs_flow_tbl_num_masks(const struct flow_table *table)
460{ 476{
461 struct sw_flow_mask *mask; 477 struct sw_flow_mask *mask;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index ca8a5820f615..5918bff7f3f6 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
76 u32 *n_mask_hit); 76 u32 *n_mask_hit);
77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, 77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
78 const struct sw_flow_key *); 78 const struct sw_flow_key *);
79 79struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
80 struct sw_flow_match *match);
80bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 81bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
81 struct sw_flow_match *match); 82 struct sw_flow_match *match);
82 83
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 35ec4fed09e2..f49148a07da2 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
110 return PACKET_RCVD; 110 return PACKET_RCVD;
111} 111}
112 112
113/* Called with rcu_read_lock and BH disabled. */
114static int gre_err(struct sk_buff *skb, u32 info,
115 const struct tnl_ptk_info *tpi)
116{
117 struct ovs_net *ovs_net;
118 struct vport *vport;
119
120 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
121 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
122
123 if (unlikely(!vport))
124 return PACKET_REJECT;
125 else
126 return PACKET_RCVD;
127}
128
113static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) 129static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
114{ 130{
115 struct net *net = ovs_dp_get_net(vport->dp); 131 struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
186 202
187static struct gre_cisco_protocol gre_protocol = { 203static struct gre_cisco_protocol gre_protocol = {
188 .handler = gre_rcv, 204 .handler = gre_rcv,
205 .err_handler = gre_err,
189 .priority = 1, 206 .priority = 1,
190}; 207};
191 208
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..b6842fdb53d4 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -366,9 +366,10 @@ fail:
366 * specification [SCTP] and any extensions for a list of possible 366 * specification [SCTP] and any extensions for a list of possible
367 * error formats. 367 * error formats.
368 */ 368 */
369struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 369struct sctp_ulpevent *
370 const struct sctp_association *asoc, struct sctp_chunk *chunk, 370sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
371 __u16 flags, gfp_t gfp) 371 struct sctp_chunk *chunk, __u16 flags,
372 gfp_t gfp)
372{ 373{
373 struct sctp_ulpevent *event; 374 struct sctp_ulpevent *event;
374 struct sctp_remote_error *sre; 375 struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
387 /* Copy the skb to a new skb with room for us to prepend 388 /* Copy the skb to a new skb with room for us to prepend
388 * notification with. 389 * notification with.
389 */ 390 */
390 skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), 391 skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
391 0, gfp);
392 392
393 /* Pull off the rest of the cause TLV from the chunk. */ 393 /* Pull off the rest of the cause TLV from the chunk. */
394 skb_pull(chunk->skb, elen); 394 skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
399 event = sctp_skb2event(skb); 399 event = sctp_skb2event(skb);
400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
401 401
402 sre = (struct sctp_remote_error *) 402 sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
403 skb_push(skb, sizeof(struct sctp_remote_error));
404 403
405 /* Trim the buffer to the right length. */ 404 /* Trim the buffer to the right length. */
406 skb_trim(skb, sizeof(struct sctp_remote_error) + elen); 405 skb_trim(skb, sizeof(*sre) + elen);
407 406
408 /* Socket Extensions for SCTP 407 /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
409 * 5.3.1.3 SCTP_REMOTE_ERROR 408 memset(sre, 0, sizeof(*sre));
410 *
411 * sre_type:
412 * It should be SCTP_REMOTE_ERROR.
413 */
414 sre->sre_type = SCTP_REMOTE_ERROR; 409 sre->sre_type = SCTP_REMOTE_ERROR;
415
416 /*
417 * Socket Extensions for SCTP
418 * 5.3.1.3 SCTP_REMOTE_ERROR
419 *
420 * sre_flags: 16 bits (unsigned integer)
421 * Currently unused.
422 */
423 sre->sre_flags = 0; 410 sre->sre_flags = 0;
424
425 /* Socket Extensions for SCTP
426 * 5.3.1.3 SCTP_REMOTE_ERROR
427 *
428 * sre_length: sizeof (__u32)
429 *
430 * This field is the total length of the notification data,
431 * including the notification header.
432 */
433 sre->sre_length = skb->len; 411 sre->sre_length = skb->len;
434
435 /* Socket Extensions for SCTP
436 * 5.3.1.3 SCTP_REMOTE_ERROR
437 *
438 * sre_error: 16 bits (unsigned integer)
439 * This value represents one of the Operational Error causes defined in
440 * the SCTP specification, in network byte order.
441 */
442 sre->sre_error = cause; 412 sre->sre_error = cause;
443
444 /* Socket Extensions for SCTP
445 * 5.3.1.3 SCTP_REMOTE_ERROR
446 *
447 * sre_assoc_id: sizeof (sctp_assoc_t)
448 *
449 * The association id field, holds the identifier for the association.
450 * All notifications for a given association have the same association
451 * identifier. For TCP style socket, this field is ignored.
452 */
453 sctp_ulpevent_set_owner(event, asoc); 413 sctp_ulpevent_set_owner(event, asoc);
454 sre->sre_assoc_id = sctp_assoc2id(asoc); 414 sre->sre_assoc_id = sctp_assoc2id(asoc);
455 415
456 return event; 416 return event;
457
458fail: 417fail:
459 return NULL; 418 return NULL;
460} 419}
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
899 return notification->sn_header.sn_type; 858 return notification->sn_header.sn_type;
900} 859}
901 860
902/* Copy out the sndrcvinfo into a msghdr. */ 861/* RFC6458, Section 5.3.2. SCTP Header Information Structure
862 * (SCTP_SNDRCV, DEPRECATED)
863 */
903void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 864void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
904 struct msghdr *msghdr) 865 struct msghdr *msghdr)
905{ 866{
@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
908 if (sctp_ulpevent_is_notification(event)) 869 if (sctp_ulpevent_is_notification(event))
909 return; 870 return;
910 871
911 /* Sockets API Extensions for SCTP 872 memset(&sinfo, 0, sizeof(sinfo));
912 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
913 *
914 * sinfo_stream: 16 bits (unsigned integer)
915 *
916 * For recvmsg() the SCTP stack places the message's stream number in
917 * this value.
918 */
919 sinfo.sinfo_stream = event->stream; 873 sinfo.sinfo_stream = event->stream;
920 /* sinfo_ssn: 16 bits (unsigned integer)
921 *
922 * For recvmsg() this value contains the stream sequence number that
923 * the remote endpoint placed in the DATA chunk. For fragmented
924 * messages this is the same number for all deliveries of the message
925 * (if more than one recvmsg() is needed to read the message).
926 */
927 sinfo.sinfo_ssn = event->ssn; 874 sinfo.sinfo_ssn = event->ssn;
928 /* sinfo_ppid: 32 bits (unsigned integer)
929 *
930 * In recvmsg() this value is
931 * the same information that was passed by the upper layer in the peer
932 * application. Please note that byte order issues are NOT accounted
933 * for and this information is passed opaquely by the SCTP stack from
934 * one end to the other.
935 */
936 sinfo.sinfo_ppid = event->ppid; 875 sinfo.sinfo_ppid = event->ppid;
937 /* sinfo_flags: 16 bits (unsigned integer)
938 *
939 * This field may contain any of the following flags and is composed of
940 * a bitwise OR of these values.
941 *
942 * recvmsg() flags:
943 *
944 * SCTP_UNORDERED - This flag is present when the message was sent
945 * non-ordered.
946 */
947 sinfo.sinfo_flags = event->flags; 876 sinfo.sinfo_flags = event->flags;
948 /* sinfo_tsn: 32 bit (unsigned integer)
949 *
950 * For the receiving side, this field holds a TSN that was
951 * assigned to one of the SCTP Data Chunks.
952 */
953 sinfo.sinfo_tsn = event->tsn; 877 sinfo.sinfo_tsn = event->tsn;
954 /* sinfo_cumtsn: 32 bit (unsigned integer)
955 *
956 * This field will hold the current cumulative TSN as
957 * known by the underlying SCTP layer. Note this field is
958 * ignored when sending and only valid for a receive
959 * operation when sinfo_flags are set to SCTP_UNORDERED.
960 */
961 sinfo.sinfo_cumtsn = event->cumtsn; 878 sinfo.sinfo_cumtsn = event->cumtsn;
962 /* sinfo_assoc_id: sizeof (sctp_assoc_t)
963 *
964 * The association handle field, sinfo_assoc_id, holds the identifier
965 * for the association announced in the COMMUNICATION_UP notification.
966 * All notifications for a given association have the same identifier.
967 * Ignored for one-to-one style sockets.
968 */
969 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); 879 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
970 880 /* Context value that is set via SCTP_CONTEXT socket option. */
971 /* context value that is set via SCTP_CONTEXT socket option. */
972 sinfo.sinfo_context = event->asoc->default_rcv_context; 881 sinfo.sinfo_context = event->asoc->default_rcv_context;
973
974 /* These fields are not used while receiving. */ 882 /* These fields are not used while receiving. */
975 sinfo.sinfo_timetolive = 0; 883 sinfo.sinfo_timetolive = 0;
976 884
977 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, 885 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
978 sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); 886 sizeof(sinfo), &sinfo);
979} 887}
980 888
981/* Do accounting for bytes received and hold a reference to the association 889/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 26631679a1fa..55c6c9d3e1ce 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -559,6 +559,7 @@ receive:
559 559
560 buf = node->bclink.deferred_head; 560 buf = node->bclink.deferred_head;
561 node->bclink.deferred_head = buf->next; 561 node->bclink.deferred_head = buf->next;
562 buf->next = NULL;
562 node->bclink.deferred_size--; 563 node->bclink.deferred_size--;
563 goto receive; 564 goto receive;
564 } 565 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 6ec958401f78..ce6d929d66d2 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -96,9 +96,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
96} 96}
97 97
98/* tipc_buf_append(): Append a buffer to the fragment list of another buffer 98/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
99 * Let first buffer become head buffer 99 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
100 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0 100 * out: set when successful non-complete reassembly, otherwise NULL
101 * Leaves headbuf pointer at NULL if failure 101 * @*buf: in: the buffer to append. Always defined
102 * out: head buf after sucessful complete reassembly, otherwise NULL
103 * Returns 1 when reassembly complete, otherwise 0
102 */ 104 */
103int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 105int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
104{ 106{
@@ -117,6 +119,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
117 goto out_free; 119 goto out_free;
118 head = *headbuf = frag; 120 head = *headbuf = frag;
119 skb_frag_list_init(head); 121 skb_frag_list_init(head);
122 *buf = NULL;
120 return 0; 123 return 0;
121 } 124 }
122 if (!head) 125 if (!head)
@@ -145,6 +148,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
145out_free: 148out_free:
146 pr_warn_ratelimited("Unable to build fragment list\n"); 149 pr_warn_ratelimited("Unable to build fragment list\n");
147 kfree_skb(*buf); 150 kfree_skb(*buf);
151 kfree_skb(*headbuf);
152 *buf = *headbuf = NULL;
148 return 0; 153 return 0;
149} 154}
150 155