aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/fragmentation.c4
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/bluetooth/6lowpan.c1
-rw-r--r--net/bluetooth/bnep/core.c3
-rw-r--r--net/bluetooth/cmtp/core.c3
-rw-r--r--net/bluetooth/hci_event.c16
-rw-r--r--net/bluetooth/hidp/core.c3
-rw-r--r--net/core/dev.c175
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/ipv4/geneve.c6
-rw-r--r--net/ipv6/tcp_ipv6.c45
-rw-r--r--net/mpls/mpls_gso.c5
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netlink/af_netlink.c38
-rw-r--r--net/netlink/af_netlink.h8
-rw-r--r--net/netlink/genetlink.c56
-rw-r--r--net/openvswitch/actions.c3
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow_netlink.c13
-rw-r--r--net/openvswitch/vport-geneve.c3
-rw-r--r--net/openvswitch/vport-gre.c18
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/openvswitch/vport.c5
-rw-r--r--net/packet/af_packet.c11
24 files changed, 272 insertions, 154 deletions
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index fc1835c6bb40..00f9e144cc97 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
251 kfree(entry); 251 kfree(entry);
252 252
253 /* Make room for the rest of the fragments. */ 253 /* Make room for the rest of the fragments. */
254 if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) { 254 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
255 kfree_skb(skb_out); 255 kfree_skb(skb_out);
256 skb_out = NULL; 256 skb_out = NULL;
257 goto free; 257 goto free;
@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
434 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 434 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
435 */ 435 */
436 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); 436 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
437 max_fragment_size = (mtu - header_size - ETH_HLEN); 437 max_fragment_size = mtu - header_size;
438 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; 438 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
439 439
440 /* Don't even try to fragment, if we need more than 16 fragments */ 440 /* Don't even try to fragment, if we need more than 16 fragments */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 90cff585b37d..e0bcf9e84273 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -810,7 +810,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
810 goto out; 810 goto out;
811 811
812 gw_node = batadv_gw_node_get(bat_priv, orig_dst_node); 812 gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
813 if (!gw_node->bandwidth_down == 0) 813 if (!gw_node)
814 goto out; 814 goto out;
815 815
816 switch (atomic_read(&bat_priv->gw_mode)) { 816 switch (atomic_read(&bat_priv->gw_mode)) {
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 76617be1e797..c989253737f0 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -390,7 +390,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
390 390
391drop: 391drop:
392 dev->stats.rx_dropped++; 392 dev->stats.rx_dropped++;
393 kfree_skb(skb);
394 return NET_RX_DROP; 393 return NET_RX_DROP;
395} 394}
396 395
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 85bcc21e84d2..ce82722d049b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -533,6 +533,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
533 533
534 BT_DBG(""); 534 BT_DBG("");
535 535
536 if (!l2cap_is_socket(sock))
537 return -EBADFD;
538
536 baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); 539 baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
537 baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); 540 baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
538 541
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 67fe5e84e68f..278a194e6af4 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
334 334
335 BT_DBG(""); 335 BT_DBG("");
336 336
337 if (!l2cap_is_socket(sock))
338 return -EBADFD;
339
337 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); 340 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
338 if (!session) 341 if (!session)
339 return -ENOMEM; 342 return -ENOMEM;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 39a5c8a01726..3f2e8b830cbd 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -242,7 +242,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
242 if (rp->status) 242 if (rp->status)
243 return; 243 return;
244 244
245 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 245 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
246 test_bit(HCI_CONFIG, &hdev->dev_flags))
246 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 247 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
247} 248}
248 249
@@ -509,7 +510,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
509 if (rp->status) 510 if (rp->status)
510 return; 511 return;
511 512
512 if (test_bit(HCI_SETUP, &hdev->dev_flags)) { 513 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
514 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
513 hdev->hci_ver = rp->hci_ver; 515 hdev->hci_ver = rp->hci_ver;
514 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 516 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
515 hdev->lmp_ver = rp->lmp_ver; 517 hdev->lmp_ver = rp->lmp_ver;
@@ -528,7 +530,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
528 if (rp->status) 530 if (rp->status)
529 return; 531 return;
530 532
531 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 533 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
534 test_bit(HCI_CONFIG, &hdev->dev_flags))
532 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 535 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
533} 536}
534 537
@@ -2194,7 +2197,12 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2194 return; 2197 return;
2195 } 2198 }
2196 2199
2197 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && 2200 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2201 * connection. These features are only touched through mgmt so
2202 * only do the checks if HCI_MGMT is set.
2203 */
2204 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2205 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2198 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, 2206 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2199 BDADDR_BREDR)) { 2207 BDADDR_BREDR)) {
2200 hci_reject_conn(hdev, &ev->bdaddr); 2208 hci_reject_conn(hdev, &ev->bdaddr);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index cc25d0b74b36..07348e142f16 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1314,13 +1314,14 @@ int hidp_connection_add(struct hidp_connadd_req *req,
1314{ 1314{
1315 struct hidp_session *session; 1315 struct hidp_session *session;
1316 struct l2cap_conn *conn; 1316 struct l2cap_conn *conn;
1317 struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan; 1317 struct l2cap_chan *chan;
1318 int ret; 1318 int ret;
1319 1319
1320 ret = hidp_verify_sockets(ctrl_sock, intr_sock); 1320 ret = hidp_verify_sockets(ctrl_sock, intr_sock);
1321 if (ret) 1321 if (ret)
1322 return ret; 1322 return ret;
1323 1323
1324 chan = l2cap_pi(ctrl_sock->sk)->chan;
1324 conn = NULL; 1325 conn = NULL;
1325 l2cap_chan_lock(chan); 1326 l2cap_chan_lock(chan);
1326 if (chan->conn) 1327 if (chan->conn)
diff --git a/net/core/dev.c b/net/core/dev.c
index f411c28d0a66..683d493aa1bf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1694,6 +1694,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1694 1694
1695 skb_scrub_packet(skb, true); 1695 skb_scrub_packet(skb, true);
1696 skb->protocol = eth_type_trans(skb, dev); 1696 skb->protocol = eth_type_trans(skb, dev);
1697 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1697 1698
1698 return 0; 1699 return 0;
1699} 1700}
@@ -2522,7 +2523,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2522/* If MPLS offload request, verify we are testing hardware MPLS features 2523/* If MPLS offload request, verify we are testing hardware MPLS features
2523 * instead of standard features for the netdev. 2524 * instead of standard features for the netdev.
2524 */ 2525 */
2525#ifdef CONFIG_NET_MPLS_GSO 2526#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2526static netdev_features_t net_mpls_features(struct sk_buff *skb, 2527static netdev_features_t net_mpls_features(struct sk_buff *skb,
2527 netdev_features_t features, 2528 netdev_features_t features,
2528 __be16 type) 2529 __be16 type)
@@ -2562,7 +2563,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2562 2563
2563netdev_features_t netif_skb_features(struct sk_buff *skb) 2564netdev_features_t netif_skb_features(struct sk_buff *skb)
2564{ 2565{
2565 const struct net_device *dev = skb->dev; 2566 struct net_device *dev = skb->dev;
2566 netdev_features_t features = dev->features; 2567 netdev_features_t features = dev->features;
2567 u16 gso_segs = skb_shinfo(skb)->gso_segs; 2568 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2568 __be16 protocol = skb->protocol; 2569 __be16 protocol = skb->protocol;
@@ -2570,11 +2571,21 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2570 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) 2571 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2571 features &= ~NETIF_F_GSO_MASK; 2572 features &= ~NETIF_F_GSO_MASK;
2572 2573
2573 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2574 /* If encapsulation offload request, verify we are testing
2574 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2575 * hardware encapsulation features instead of standard
2575 protocol = veh->h_vlan_encapsulated_proto; 2576 * features for the netdev
2576 } else if (!vlan_tx_tag_present(skb)) { 2577 */
2577 return harmonize_features(skb, features); 2578 if (skb->encapsulation)
2579 features &= dev->hw_enc_features;
2580
2581 if (!vlan_tx_tag_present(skb)) {
2582 if (unlikely(protocol == htons(ETH_P_8021Q) ||
2583 protocol == htons(ETH_P_8021AD))) {
2584 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2585 protocol = veh->h_vlan_encapsulated_proto;
2586 } else {
2587 goto finalize;
2588 }
2578 } 2589 }
2579 2590
2580 features = netdev_intersect_features(features, 2591 features = netdev_intersect_features(features,
@@ -2591,6 +2602,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2591 NETIF_F_HW_VLAN_CTAG_TX | 2602 NETIF_F_HW_VLAN_CTAG_TX |
2592 NETIF_F_HW_VLAN_STAG_TX); 2603 NETIF_F_HW_VLAN_STAG_TX);
2593 2604
2605finalize:
2606 if (dev->netdev_ops->ndo_features_check)
2607 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2608 features);
2609
2594 return harmonize_features(skb, features); 2610 return harmonize_features(skb, features);
2595} 2611}
2596EXPORT_SYMBOL(netif_skb_features); 2612EXPORT_SYMBOL(netif_skb_features);
@@ -2661,19 +2677,12 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
2661 if (unlikely(!skb)) 2677 if (unlikely(!skb))
2662 goto out_null; 2678 goto out_null;
2663 2679
2664 /* If encapsulation offload request, verify we are testing
2665 * hardware encapsulation features instead of standard
2666 * features for the netdev
2667 */
2668 if (skb->encapsulation)
2669 features &= dev->hw_enc_features;
2670
2671 if (netif_needs_gso(dev, skb, features)) { 2680 if (netif_needs_gso(dev, skb, features)) {
2672 struct sk_buff *segs; 2681 struct sk_buff *segs;
2673 2682
2674 segs = skb_gso_segment(skb, features); 2683 segs = skb_gso_segment(skb, features);
2675 if (IS_ERR(segs)) { 2684 if (IS_ERR(segs)) {
2676 segs = NULL; 2685 goto out_kfree_skb;
2677 } else if (segs) { 2686 } else if (segs) {
2678 consume_skb(skb); 2687 consume_skb(skb);
2679 skb = segs; 2688 skb = segs;
@@ -4557,6 +4566,68 @@ void netif_napi_del(struct napi_struct *napi)
4557} 4566}
4558EXPORT_SYMBOL(netif_napi_del); 4567EXPORT_SYMBOL(netif_napi_del);
4559 4568
4569static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4570{
4571 void *have;
4572 int work, weight;
4573
4574 list_del_init(&n->poll_list);
4575
4576 have = netpoll_poll_lock(n);
4577
4578 weight = n->weight;
4579
4580 /* This NAPI_STATE_SCHED test is for avoiding a race
4581 * with netpoll's poll_napi(). Only the entity which
4582 * obtains the lock and sees NAPI_STATE_SCHED set will
4583 * actually make the ->poll() call. Therefore we avoid
4584 * accidentally calling ->poll() when NAPI is not scheduled.
4585 */
4586 work = 0;
4587 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4588 work = n->poll(n, weight);
4589 trace_napi_poll(n);
4590 }
4591
4592 WARN_ON_ONCE(work > weight);
4593
4594 if (likely(work < weight))
4595 goto out_unlock;
4596
4597 /* Drivers must not modify the NAPI state if they
4598 * consume the entire weight. In such cases this code
4599 * still "owns" the NAPI instance and therefore can
4600 * move the instance around on the list at-will.
4601 */
4602 if (unlikely(napi_disable_pending(n))) {
4603 napi_complete(n);
4604 goto out_unlock;
4605 }
4606
4607 if (n->gro_list) {
4608 /* flush too old packets
4609 * If HZ < 1000, flush all packets.
4610 */
4611 napi_gro_flush(n, HZ >= 1000);
4612 }
4613
4614 /* Some drivers may have called napi_schedule
4615 * prior to exhausting their budget.
4616 */
4617 if (unlikely(!list_empty(&n->poll_list))) {
4618 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4619 n->dev ? n->dev->name : "backlog");
4620 goto out_unlock;
4621 }
4622
4623 list_add_tail(&n->poll_list, repoll);
4624
4625out_unlock:
4626 netpoll_poll_unlock(have);
4627
4628 return work;
4629}
4630
4560static void net_rx_action(struct softirq_action *h) 4631static void net_rx_action(struct softirq_action *h)
4561{ 4632{
4562 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4633 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4564,74 +4635,34 @@ static void net_rx_action(struct softirq_action *h)
4564 int budget = netdev_budget; 4635 int budget = netdev_budget;
4565 LIST_HEAD(list); 4636 LIST_HEAD(list);
4566 LIST_HEAD(repoll); 4637 LIST_HEAD(repoll);
4567 void *have;
4568 4638
4569 local_irq_disable(); 4639 local_irq_disable();
4570 list_splice_init(&sd->poll_list, &list); 4640 list_splice_init(&sd->poll_list, &list);
4571 local_irq_enable(); 4641 local_irq_enable();
4572 4642
4573 while (!list_empty(&list)) { 4643 for (;;) {
4574 struct napi_struct *n; 4644 struct napi_struct *n;
4575 int work, weight;
4576
4577 /* If softirq window is exhausted then punt.
4578 * Allow this to run for 2 jiffies since which will allow
4579 * an average latency of 1.5/HZ.
4580 */
4581 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4582 goto softnet_break;
4583
4584
4585 n = list_first_entry(&list, struct napi_struct, poll_list);
4586 list_del_init(&n->poll_list);
4587 4645
4588 have = netpoll_poll_lock(n); 4646 if (list_empty(&list)) {
4589 4647 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4590 weight = n->weight; 4648 return;
4591 4649 break;
4592 /* This NAPI_STATE_SCHED test is for avoiding a race
4593 * with netpoll's poll_napi(). Only the entity which
4594 * obtains the lock and sees NAPI_STATE_SCHED set will
4595 * actually make the ->poll() call. Therefore we avoid
4596 * accidentally calling ->poll() when NAPI is not scheduled.
4597 */
4598 work = 0;
4599 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4600 work = n->poll(n, weight);
4601 trace_napi_poll(n);
4602 } 4650 }
4603 4651
4604 WARN_ON_ONCE(work > weight); 4652 n = list_first_entry(&list, struct napi_struct, poll_list);
4605 4653 budget -= napi_poll(n, &repoll);
4606 budget -= work;
4607 4654
4608 /* Drivers must not modify the NAPI state if they 4655 /* If softirq window is exhausted then punt.
4609 * consume the entire weight. In such cases this code 4656 * Allow this to run for 2 jiffies since which will allow
4610 * still "owns" the NAPI instance and therefore can 4657 * an average latency of 1.5/HZ.
4611 * move the instance around on the list at-will.
4612 */ 4658 */
4613 if (unlikely(work == weight)) { 4659 if (unlikely(budget <= 0 ||
4614 if (unlikely(napi_disable_pending(n))) { 4660 time_after_eq(jiffies, time_limit))) {
4615 napi_complete(n); 4661 sd->time_squeeze++;
4616 } else { 4662 break;
4617 if (n->gro_list) {
4618 /* flush too old packets
4619 * If HZ < 1000, flush all packets.
4620 */
4621 napi_gro_flush(n, HZ >= 1000);
4622 }
4623 list_add_tail(&n->poll_list, &repoll);
4624 }
4625 } 4663 }
4626
4627 netpoll_poll_unlock(have);
4628 } 4664 }
4629 4665
4630 if (!sd_has_rps_ipi_waiting(sd) &&
4631 list_empty(&list) &&
4632 list_empty(&repoll))
4633 return;
4634out:
4635 local_irq_disable(); 4666 local_irq_disable();
4636 4667
4637 list_splice_tail_init(&sd->poll_list, &list); 4668 list_splice_tail_init(&sd->poll_list, &list);
@@ -4641,12 +4672,6 @@ out:
4641 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4672 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4642 4673
4643 net_rps_action_and_irq_enable(sd); 4674 net_rps_action_and_irq_enable(sd);
4644
4645 return;
4646
4647softnet_break:
4648 sd->time_squeeze++;
4649 goto out;
4650} 4675}
4651 4676
4652struct netdev_adjacent { 4677struct netdev_adjacent {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ae13ef6b3ea7..395c15b82087 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4148,6 +4148,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4148 skb->ignore_df = 0; 4148 skb->ignore_df = 0;
4149 skb_dst_drop(skb); 4149 skb_dst_drop(skb);
4150 skb->mark = 0; 4150 skb->mark = 0;
4151 skb_init_secmark(skb);
4151 secpath_reset(skb); 4152 secpath_reset(skb);
4152 nf_reset(skb); 4153 nf_reset(skb);
4153 nf_reset_trace(skb); 4154 nf_reset_trace(skb);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 95e47c97585e..394a200f93c1 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -122,14 +122,18 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
122 int err; 122 int err;
123 123
124 skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx); 124 skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
125 if (IS_ERR(skb))
126 return PTR_ERR(skb);
125 127
126 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 128 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
127 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) 129 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
128 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 130 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
129 131
130 err = skb_cow_head(skb, min_headroom); 132 err = skb_cow_head(skb, min_headroom);
131 if (unlikely(err)) 133 if (unlikely(err)) {
134 kfree_skb(skb);
132 return err; 135 return err;
136 }
133 137
134 skb = vlan_hwaccel_push_inside(skb); 138 skb = vlan_hwaccel_push_inside(skb);
135 if (unlikely(!skb)) 139 if (unlikely(!skb))
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5ff87805258e..9c0b54e87b47 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1387,6 +1387,28 @@ ipv6_pktoptions:
1387 return 0; 1387 return 0;
1388} 1388}
1389 1389
1390static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1391 const struct tcphdr *th)
1392{
1393 /* This is tricky: we move IP6CB at its correct location into
1394 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1395 * _decode_session6() uses IP6CB().
1396 * barrier() makes sure compiler won't play aliasing games.
1397 */
1398 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1399 sizeof(struct inet6_skb_parm));
1400 barrier();
1401
1402 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1403 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1404 skb->len - th->doff*4);
1405 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1406 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1407 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1408 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1409 TCP_SKB_CB(skb)->sacked = 0;
1410}
1411
1390static int tcp_v6_rcv(struct sk_buff *skb) 1412static int tcp_v6_rcv(struct sk_buff *skb)
1391{ 1413{
1392 const struct tcphdr *th; 1414 const struct tcphdr *th;
@@ -1418,24 +1440,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1418 1440
1419 th = tcp_hdr(skb); 1441 th = tcp_hdr(skb);
1420 hdr = ipv6_hdr(skb); 1442 hdr = ipv6_hdr(skb);
1421 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1422 * barrier() makes sure compiler wont play fool^Waliasing games.
1423 */
1424 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1425 sizeof(struct inet6_skb_parm));
1426 barrier();
1427
1428 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1429 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1430 skb->len - th->doff*4);
1431 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1432 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1433 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1434 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1435 TCP_SKB_CB(skb)->sacked = 0;
1436 1443
1437 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, 1444 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1438 tcp_v6_iif(skb)); 1445 inet6_iif(skb));
1439 if (!sk) 1446 if (!sk)
1440 goto no_tcp_socket; 1447 goto no_tcp_socket;
1441 1448
@@ -1451,6 +1458,8 @@ process:
1451 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1458 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1452 goto discard_and_relse; 1459 goto discard_and_relse;
1453 1460
1461 tcp_v6_fill_cb(skb, hdr, th);
1462
1454#ifdef CONFIG_TCP_MD5SIG 1463#ifdef CONFIG_TCP_MD5SIG
1455 if (tcp_v6_inbound_md5_hash(sk, skb)) 1464 if (tcp_v6_inbound_md5_hash(sk, skb))
1456 goto discard_and_relse; 1465 goto discard_and_relse;
@@ -1482,6 +1491,8 @@ no_tcp_socket:
1482 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1491 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1483 goto discard_it; 1492 goto discard_it;
1484 1493
1494 tcp_v6_fill_cb(skb, hdr, th);
1495
1485 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1496 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1486csum_error: 1497csum_error:
1487 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); 1498 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
@@ -1505,6 +1516,8 @@ do_time_wait:
1505 goto discard_it; 1516 goto discard_it;
1506 } 1517 }
1507 1518
1519 tcp_v6_fill_cb(skb, hdr, th);
1520
1508 if (skb->len < (th->doff<<2)) { 1521 if (skb->len < (th->doff<<2)) {
1509 inet_twsk_put(inet_twsk(sk)); 1522 inet_twsk_put(inet_twsk(sk));
1510 goto bad_packet; 1523 goto bad_packet;
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index ca27837974fe..349295d21946 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -31,10 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
31 SKB_GSO_TCPV6 | 31 SKB_GSO_TCPV6 |
32 SKB_GSO_UDP | 32 SKB_GSO_UDP |
33 SKB_GSO_DODGY | 33 SKB_GSO_DODGY |
34 SKB_GSO_TCP_ECN | 34 SKB_GSO_TCP_ECN)))
35 SKB_GSO_GRE |
36 SKB_GSO_GRE_CSUM |
37 SKB_GSO_IPIP)))
38 goto out; 35 goto out;
39 36
40 /* Setup inner SKB. */ 37 /* Setup inner SKB. */
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 13c2e17bbe27..cde4a6702fa3 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -463,7 +463,7 @@ static void nfnetlink_rcv(struct sk_buff *skb)
463} 463}
464 464
465#ifdef CONFIG_MODULES 465#ifdef CONFIG_MODULES
466static int nfnetlink_bind(int group) 466static int nfnetlink_bind(struct net *net, int group)
467{ 467{
468 const struct nfnetlink_subsystem *ss; 468 const struct nfnetlink_subsystem *ss;
469 int type; 469 int type;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 074cf3e91c6f..84ea76ca3f1f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1091,8 +1091,10 @@ static void netlink_remove(struct sock *sk)
1091 mutex_unlock(&nl_sk_hash_lock); 1091 mutex_unlock(&nl_sk_hash_lock);
1092 1092
1093 netlink_table_grab(); 1093 netlink_table_grab();
1094 if (nlk_sk(sk)->subscriptions) 1094 if (nlk_sk(sk)->subscriptions) {
1095 __sk_del_bind_node(sk); 1095 __sk_del_bind_node(sk);
1096 netlink_update_listeners(sk);
1097 }
1096 netlink_table_ungrab(); 1098 netlink_table_ungrab();
1097} 1099}
1098 1100
@@ -1139,8 +1141,8 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
1139 struct module *module = NULL; 1141 struct module *module = NULL;
1140 struct mutex *cb_mutex; 1142 struct mutex *cb_mutex;
1141 struct netlink_sock *nlk; 1143 struct netlink_sock *nlk;
1142 int (*bind)(int group); 1144 int (*bind)(struct net *net, int group);
1143 void (*unbind)(int group); 1145 void (*unbind)(struct net *net, int group);
1144 int err = 0; 1146 int err = 0;
1145 1147
1146 sock->state = SS_UNCONNECTED; 1148 sock->state = SS_UNCONNECTED;
@@ -1226,8 +1228,8 @@ static int netlink_release(struct socket *sock)
1226 1228
1227 module_put(nlk->module); 1229 module_put(nlk->module);
1228 1230
1229 netlink_table_grab();
1230 if (netlink_is_kernel(sk)) { 1231 if (netlink_is_kernel(sk)) {
1232 netlink_table_grab();
1231 BUG_ON(nl_table[sk->sk_protocol].registered == 0); 1233 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1232 if (--nl_table[sk->sk_protocol].registered == 0) { 1234 if (--nl_table[sk->sk_protocol].registered == 0) {
1233 struct listeners *old; 1235 struct listeners *old;
@@ -1241,11 +1243,16 @@ static int netlink_release(struct socket *sock)
1241 nl_table[sk->sk_protocol].flags = 0; 1243 nl_table[sk->sk_protocol].flags = 0;
1242 nl_table[sk->sk_protocol].registered = 0; 1244 nl_table[sk->sk_protocol].registered = 0;
1243 } 1245 }
1244 } else if (nlk->subscriptions) { 1246 netlink_table_ungrab();
1245 netlink_update_listeners(sk);
1246 } 1247 }
1247 netlink_table_ungrab();
1248 1248
1249 if (nlk->netlink_unbind) {
1250 int i;
1251
1252 for (i = 0; i < nlk->ngroups; i++)
1253 if (test_bit(i, nlk->groups))
1254 nlk->netlink_unbind(sock_net(sk), i + 1);
1255 }
1249 kfree(nlk->groups); 1256 kfree(nlk->groups);
1250 nlk->groups = NULL; 1257 nlk->groups = NULL;
1251 1258
@@ -1410,9 +1417,10 @@ static int netlink_realloc_groups(struct sock *sk)
1410 return err; 1417 return err;
1411} 1418}
1412 1419
1413static void netlink_unbind(int group, long unsigned int groups, 1420static void netlink_undo_bind(int group, long unsigned int groups,
1414 struct netlink_sock *nlk) 1421 struct sock *sk)
1415{ 1422{
1423 struct netlink_sock *nlk = nlk_sk(sk);
1416 int undo; 1424 int undo;
1417 1425
1418 if (!nlk->netlink_unbind) 1426 if (!nlk->netlink_unbind)
@@ -1420,7 +1428,7 @@ static void netlink_unbind(int group, long unsigned int groups,
1420 1428
1421 for (undo = 0; undo < group; undo++) 1429 for (undo = 0; undo < group; undo++)
1422 if (test_bit(undo, &groups)) 1430 if (test_bit(undo, &groups))
1423 nlk->netlink_unbind(undo); 1431 nlk->netlink_unbind(sock_net(sk), undo);
1424} 1432}
1425 1433
1426static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1434static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1458,10 +1466,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1458 for (group = 0; group < nlk->ngroups; group++) { 1466 for (group = 0; group < nlk->ngroups; group++) {
1459 if (!test_bit(group, &groups)) 1467 if (!test_bit(group, &groups))
1460 continue; 1468 continue;
1461 err = nlk->netlink_bind(group); 1469 err = nlk->netlink_bind(net, group);
1462 if (!err) 1470 if (!err)
1463 continue; 1471 continue;
1464 netlink_unbind(group, groups, nlk); 1472 netlink_undo_bind(group, groups, sk);
1465 return err; 1473 return err;
1466 } 1474 }
1467 } 1475 }
@@ -1471,7 +1479,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1471 netlink_insert(sk, net, nladdr->nl_pid) : 1479 netlink_insert(sk, net, nladdr->nl_pid) :
1472 netlink_autobind(sock); 1480 netlink_autobind(sock);
1473 if (err) { 1481 if (err) {
1474 netlink_unbind(nlk->ngroups, groups, nlk); 1482 netlink_undo_bind(nlk->ngroups, groups, sk);
1475 return err; 1483 return err;
1476 } 1484 }
1477 } 1485 }
@@ -2122,7 +2130,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2122 if (!val || val - 1 >= nlk->ngroups) 2130 if (!val || val - 1 >= nlk->ngroups)
2123 return -EINVAL; 2131 return -EINVAL;
2124 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) { 2132 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2125 err = nlk->netlink_bind(val); 2133 err = nlk->netlink_bind(sock_net(sk), val);
2126 if (err) 2134 if (err)
2127 return err; 2135 return err;
2128 } 2136 }
@@ -2131,7 +2139,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2131 optname == NETLINK_ADD_MEMBERSHIP); 2139 optname == NETLINK_ADD_MEMBERSHIP);
2132 netlink_table_ungrab(); 2140 netlink_table_ungrab();
2133 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind) 2141 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2134 nlk->netlink_unbind(val); 2142 nlk->netlink_unbind(sock_net(sk), val);
2135 2143
2136 err = 0; 2144 err = 0;
2137 break; 2145 break;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index b20a1731759b..f123a88496f8 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -39,8 +39,8 @@ struct netlink_sock {
39 struct mutex *cb_mutex; 39 struct mutex *cb_mutex;
40 struct mutex cb_def_mutex; 40 struct mutex cb_def_mutex;
41 void (*netlink_rcv)(struct sk_buff *skb); 41 void (*netlink_rcv)(struct sk_buff *skb);
42 int (*netlink_bind)(int group); 42 int (*netlink_bind)(struct net *net, int group);
43 void (*netlink_unbind)(int group); 43 void (*netlink_unbind)(struct net *net, int group);
44 struct module *module; 44 struct module *module;
45#ifdef CONFIG_NETLINK_MMAP 45#ifdef CONFIG_NETLINK_MMAP
46 struct mutex pg_vec_lock; 46 struct mutex pg_vec_lock;
@@ -65,8 +65,8 @@ struct netlink_table {
65 unsigned int groups; 65 unsigned int groups;
66 struct mutex *cb_mutex; 66 struct mutex *cb_mutex;
67 struct module *module; 67 struct module *module;
68 int (*bind)(int group); 68 int (*bind)(struct net *net, int group);
69 void (*unbind)(int group); 69 void (*unbind)(struct net *net, int group);
70 bool (*compare)(struct net *net, struct sock *sock); 70 bool (*compare)(struct net *net, struct sock *sock);
71 int registered; 71 int registered;
72}; 72};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 76393f2f4b22..2e11061ef885 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -983,11 +983,67 @@ static struct genl_multicast_group genl_ctrl_groups[] = {
983 { .name = "notify", }, 983 { .name = "notify", },
984}; 984};
985 985
986static int genl_bind(struct net *net, int group)
987{
988 int i, err = 0;
989
990 down_read(&cb_lock);
991 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
992 struct genl_family *f;
993
994 list_for_each_entry(f, genl_family_chain(i), family_list) {
995 if (group >= f->mcgrp_offset &&
996 group < f->mcgrp_offset + f->n_mcgrps) {
997 int fam_grp = group - f->mcgrp_offset;
998
999 if (!f->netnsok && net != &init_net)
1000 err = -ENOENT;
1001 else if (f->mcast_bind)
1002 err = f->mcast_bind(net, fam_grp);
1003 else
1004 err = 0;
1005 break;
1006 }
1007 }
1008 }
1009 up_read(&cb_lock);
1010
1011 return err;
1012}
1013
1014static void genl_unbind(struct net *net, int group)
1015{
1016 int i;
1017 bool found = false;
1018
1019 down_read(&cb_lock);
1020 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
1021 struct genl_family *f;
1022
1023 list_for_each_entry(f, genl_family_chain(i), family_list) {
1024 if (group >= f->mcgrp_offset &&
1025 group < f->mcgrp_offset + f->n_mcgrps) {
1026 int fam_grp = group - f->mcgrp_offset;
1027
1028 if (f->mcast_unbind)
1029 f->mcast_unbind(net, fam_grp);
1030 found = true;
1031 break;
1032 }
1033 }
1034 }
1035 up_read(&cb_lock);
1036
1037 WARN_ON(!found);
1038}
1039
986static int __net_init genl_pernet_init(struct net *net) 1040static int __net_init genl_pernet_init(struct net *net)
987{ 1041{
988 struct netlink_kernel_cfg cfg = { 1042 struct netlink_kernel_cfg cfg = {
989 .input = genl_rcv, 1043 .input = genl_rcv,
990 .flags = NL_CFG_F_NONROOT_RECV, 1044 .flags = NL_CFG_F_NONROOT_RECV,
1045 .bind = genl_bind,
1046 .unbind = genl_unbind,
991 }; 1047 };
992 1048
993 /* we'll bump the group number right afterwards */ 1049 /* we'll bump the group number right afterwards */
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 764fdc39c63b..770064c83711 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -147,7 +147,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
147 hdr = eth_hdr(skb); 147 hdr = eth_hdr(skb);
148 hdr->h_proto = mpls->mpls_ethertype; 148 hdr->h_proto = mpls->mpls_ethertype;
149 149
150 skb_set_inner_protocol(skb, skb->protocol); 150 if (!skb->inner_protocol)
151 skb_set_inner_protocol(skb, skb->protocol);
151 skb->protocol = mpls->mpls_ethertype; 152 skb->protocol = mpls->mpls_ethertype;
152 153
153 invalidate_flow_key(key); 154 invalidate_flow_key(key);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 332b5a031739..4e9a5f035cbc 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -83,8 +83,7 @@ static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
83 unsigned int group) 83 unsigned int group)
84{ 84{
85 return info->nlhdr->nlmsg_flags & NLM_F_ECHO || 85 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
86 genl_has_listeners(family, genl_info_net(info)->genl_sock, 86 genl_has_listeners(family, genl_info_net(info), group);
87 group);
88} 87}
89 88
90static void ovs_notify(struct genl_family *family, 89static void ovs_notify(struct genl_family *family,
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 9645a21d9eaa..d1eecf707613 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1753,7 +1753,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
1753 __be16 eth_type, __be16 vlan_tci, bool log) 1753 __be16 eth_type, __be16 vlan_tci, bool log)
1754{ 1754{
1755 const struct nlattr *a; 1755 const struct nlattr *a;
1756 bool out_tnl_port = false;
1757 int rem, err; 1756 int rem, err;
1758 1757
1759 if (depth >= SAMPLE_ACTION_DEPTH) 1758 if (depth >= SAMPLE_ACTION_DEPTH)
@@ -1796,8 +1795,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
1796 case OVS_ACTION_ATTR_OUTPUT: 1795 case OVS_ACTION_ATTR_OUTPUT:
1797 if (nla_get_u32(a) >= DP_MAX_PORTS) 1796 if (nla_get_u32(a) >= DP_MAX_PORTS)
1798 return -EINVAL; 1797 return -EINVAL;
1799 out_tnl_port = false;
1800
1801 break; 1798 break;
1802 1799
1803 case OVS_ACTION_ATTR_HASH: { 1800 case OVS_ACTION_ATTR_HASH: {
@@ -1832,12 +1829,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
1832 case OVS_ACTION_ATTR_PUSH_MPLS: { 1829 case OVS_ACTION_ATTR_PUSH_MPLS: {
1833 const struct ovs_action_push_mpls *mpls = nla_data(a); 1830 const struct ovs_action_push_mpls *mpls = nla_data(a);
1834 1831
1835 /* Networking stack do not allow simultaneous Tunnel
1836 * and MPLS GSO.
1837 */
1838 if (out_tnl_port)
1839 return -EINVAL;
1840
1841 if (!eth_p_mpls(mpls->mpls_ethertype)) 1832 if (!eth_p_mpls(mpls->mpls_ethertype))
1842 return -EINVAL; 1833 return -EINVAL;
1843 /* Prohibit push MPLS other than to a white list 1834 /* Prohibit push MPLS other than to a white list
@@ -1873,11 +1864,9 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
1873 1864
1874 case OVS_ACTION_ATTR_SET: 1865 case OVS_ACTION_ATTR_SET:
1875 err = validate_set(a, key, sfa, 1866 err = validate_set(a, key, sfa,
1876 &out_tnl_port, eth_type, log); 1867 &skip_copy, eth_type, log);
1877 if (err) 1868 if (err)
1878 return err; 1869 return err;
1879
1880 skip_copy = out_tnl_port;
1881 break; 1870 break;
1882 1871
1883 case OVS_ACTION_ATTR_SAMPLE: 1872 case OVS_ACTION_ATTR_SAMPLE:
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 347fa2325b22..484864dd0e68 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -219,7 +219,10 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
219 false); 219 false);
220 if (err < 0) 220 if (err < 0)
221 ip_rt_put(rt); 221 ip_rt_put(rt);
222 return err;
223
222error: 224error:
225 kfree_skb(skb);
223 return err; 226 return err;
224} 227}
225 228
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 6b69df545b1d..d4168c442db5 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -73,7 +73,7 @@ static struct sk_buff *__build_header(struct sk_buff *skb,
73 73
74 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); 74 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
75 if (IS_ERR(skb)) 75 if (IS_ERR(skb))
76 return NULL; 76 return skb;
77 77
78 tpi.flags = filter_tnl_flags(tun_key->tun_flags); 78 tpi.flags = filter_tnl_flags(tun_key->tun_flags);
79 tpi.proto = htons(ETH_P_TEB); 79 tpi.proto = htons(ETH_P_TEB);
@@ -144,7 +144,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
144 144
145 if (unlikely(!OVS_CB(skb)->egress_tun_info)) { 145 if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
146 err = -EINVAL; 146 err = -EINVAL;
147 goto error; 147 goto err_free_skb;
148 } 148 }
149 149
150 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; 150 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
@@ -157,8 +157,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
157 fl.flowi4_proto = IPPROTO_GRE; 157 fl.flowi4_proto = IPPROTO_GRE;
158 158
159 rt = ip_route_output_key(net, &fl); 159 rt = ip_route_output_key(net, &fl);
160 if (IS_ERR(rt)) 160 if (IS_ERR(rt)) {
161 return PTR_ERR(rt); 161 err = PTR_ERR(rt);
162 goto err_free_skb;
163 }
162 164
163 tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags); 165 tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
164 166
@@ -183,8 +185,9 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
183 185
184 /* Push Tunnel header. */ 186 /* Push Tunnel header. */
185 skb = __build_header(skb, tunnel_hlen); 187 skb = __build_header(skb, tunnel_hlen);
186 if (unlikely(!skb)) { 188 if (IS_ERR(skb)) {
187 err = 0; 189 err = PTR_ERR(skb);
190 skb = NULL;
188 goto err_free_rt; 191 goto err_free_rt;
189 } 192 }
190 193
@@ -198,7 +201,8 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
198 tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false); 201 tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
199err_free_rt: 202err_free_rt:
200 ip_rt_put(rt); 203 ip_rt_put(rt);
201error: 204err_free_skb:
205 kfree_skb(skb);
202 return err; 206 return err;
203} 207}
204 208
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 38f95a52241b..d7c46b301024 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -187,7 +187,9 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
187 false); 187 false);
188 if (err < 0) 188 if (err < 0)
189 ip_rt_put(rt); 189 ip_rt_put(rt);
190 return err;
190error: 191error:
192 kfree_skb(skb);
191 return err; 193 return err;
192} 194}
193 195
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 9584526c0778..53f3ebbfceab 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -519,10 +519,9 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
519 u64_stats_update_end(&stats->syncp); 519 u64_stats_update_end(&stats->syncp);
520 } else if (sent < 0) { 520 } else if (sent < 0) {
521 ovs_vport_record_error(vport, VPORT_E_TX_ERROR); 521 ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
522 kfree_skb(skb); 522 } else {
523 } else
524 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); 523 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
525 524 }
526 return sent; 525 return sent;
527} 526}
528 527
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e52a44785681..6880f34a529a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -785,6 +785,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
785 785
786 struct tpacket3_hdr *last_pkt; 786 struct tpacket3_hdr *last_pkt;
787 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 787 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
788 struct sock *sk = &po->sk;
788 789
789 if (po->stats.stats3.tp_drops) 790 if (po->stats.stats3.tp_drops)
790 status |= TP_STATUS_LOSING; 791 status |= TP_STATUS_LOSING;
@@ -809,6 +810,8 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
809 /* Flush the block */ 810 /* Flush the block */
810 prb_flush_block(pkc1, pbd1, status); 811 prb_flush_block(pkc1, pbd1, status);
811 812
813 sk->sk_data_ready(sk);
814
812 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 815 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
813} 816}
814 817
@@ -2052,12 +2055,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2052 smp_wmb(); 2055 smp_wmb();
2053#endif 2056#endif
2054 2057
2055 if (po->tp_version <= TPACKET_V2) 2058 if (po->tp_version <= TPACKET_V2) {
2056 __packet_set_status(po, h.raw, status); 2059 __packet_set_status(po, h.raw, status);
2057 else 2060 sk->sk_data_ready(sk);
2061 } else {
2058 prb_clear_blk_fill_status(&po->rx_ring); 2062 prb_clear_blk_fill_status(&po->rx_ring);
2059 2063 }
2060 sk->sk_data_ready(sk);
2061 2064
2062drop_n_restore: 2065drop_n_restore:
2063 if (skb_head != skb->data && skb_shared(skb)) { 2066 if (skb_head != skb->data && skb_shared(skb)) {