aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-23 11:52:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-23 11:52:55 -0500
commita84a8ab94ed5cb65a1355fe9e8d1d55283375808 (patch)
tree648f8a8fa9ab76ba8c7d22cd1ebed66517374ab5
parent1995266727fa8143897e89b55f5d3c79aa828420 (diff)
parent7a8c4dd9be91a7e8f8f0e0419a560663adc694a3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix divide by zero in mlx5, from Talut Batheesh. 2) Guard against invalid GSO packets coming from untrusted guests and arriving in qdisc_pkt_len_init(), from Eric Dumazet. 3) Similarly add such protection to the various protocol GSO handlers. From Willem de Bruijn. 4) Fix regression added to IGMP source address checking for IGMPv3 reports, from Felix Feitkau. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: tls: Correct length of scatterlist in tls_sw_sendpage be2net: restore properly promisc mode after queues reconfiguration net: igmp: fix source address check for IGMPv3 reports gso: validate gso_type in GSO handlers net: qdisc_pkt_len_init() should be more robust ibmvnic: Allocate and request vpd in init_resources ibmvnic: Revert to previous mtu when unsupported value requested ibmvnic: Modify buffer size and number of queues on failover rds: tcp: compute m_ack_seq as offset from ->write_seq usbnet: silence an unnecessary warning cxgb4: fix endianness for vlan value in cxgb4_tc_flower cxgb4: set filter type to 1 for ETH_P_IPV6 net/mlx5e: Fix fixpoint divide exception in mlx5e_am_stats_compare
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c73
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c6
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--net/core/dev.c19
-rw-r--r--net/ipv4/esp4_offload.c3
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/tcp_offload.c3
-rw-r--r--net/ipv4/udp_offload.c3
-rw-r--r--net/ipv6/esp6_offload.c3
-rw-r--r--net/ipv6/tcpv6_offload.c3
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/rds/tcp.c5
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/sctp/offload.c3
-rw-r--r--net/tls/tls_sw.c2
19 files changed, 128 insertions, 32 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d4a548a6a55c..a452d5a1b0f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
111 ethtype_mask = 0; 111 ethtype_mask = 0;
112 } 112 }
113 113
114 if (ethtype_key == ETH_P_IPV6)
115 fs->type = 1;
116
114 fs->val.ethtype = ethtype_key; 117 fs->val.ethtype = ethtype_key;
115 fs->mask.ethtype = ethtype_mask; 118 fs->mask.ethtype = ethtype_mask;
116 fs->val.proto = key->ip_proto; 119 fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
205 VLAN_PRIO_SHIFT); 208 VLAN_PRIO_SHIFT);
206 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << 209 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
207 VLAN_PRIO_SHIFT); 210 VLAN_PRIO_SHIFT);
208 fs->val.ivlan = cpu_to_be16(vlan_tci); 211 fs->val.ivlan = vlan_tci;
209 fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); 212 fs->mask.ivlan = vlan_tci_mask;
210 213
211 /* Chelsio adapters use ivlan_vld bit to match vlan packets 214 /* Chelsio adapters use ivlan_vld bit to match vlan packets
212 * as 802.1Q. Also, when vlan tag is present in packets, 215 * as 802.1Q. Also, when vlan tag is present in packets,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c6e859a27ee6..e180657a02ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
4634 4634
4635 be_schedule_worker(adapter); 4635 be_schedule_worker(adapter);
4636 4636
4637 /*
4638 * The IF was destroyed and re-created. We need to clear
4639 * all promiscuous flags valid for the destroyed IF.
4640 * Without this promisc mode is not restored during
4641 * be_open() because the driver thinks that it is
4642 * already enabled in HW.
4643 */
4644 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4645
4637 if (netif_running(netdev)) 4646 if (netif_running(netdev))
4638 status = be_open(netdev); 4647 status = be_open(netdev);
4639 4648
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ab2e1917cd04..b65f5f3ac034 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
410 struct ibmvnic_rx_pool *rx_pool; 410 struct ibmvnic_rx_pool *rx_pool;
411 int rx_scrqs; 411 int rx_scrqs;
412 int i, j, rc; 412 int i, j, rc;
413 u64 *size_array;
414
415 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
416 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
413 417
414 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 418 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
415 for (i = 0; i < rx_scrqs; i++) { 419 for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
417 421
418 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 422 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
419 423
420 rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); 424 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
425 free_long_term_buff(adapter, &rx_pool->long_term_buff);
426 rx_pool->buff_size = be64_to_cpu(size_array[i]);
427 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
428 rx_pool->size *
429 rx_pool->buff_size);
430 } else {
431 rc = reset_long_term_buff(adapter,
432 &rx_pool->long_term_buff);
433 }
434
421 if (rc) 435 if (rc)
422 return rc; 436 return rc;
423 437
@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
439static void release_rx_pools(struct ibmvnic_adapter *adapter) 453static void release_rx_pools(struct ibmvnic_adapter *adapter)
440{ 454{
441 struct ibmvnic_rx_pool *rx_pool; 455 struct ibmvnic_rx_pool *rx_pool;
442 int rx_scrqs;
443 int i, j; 456 int i, j;
444 457
445 if (!adapter->rx_pool) 458 if (!adapter->rx_pool)
446 return; 459 return;
447 460
448 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 461 for (i = 0; i < adapter->num_active_rx_pools; i++) {
449 for (i = 0; i < rx_scrqs; i++) {
450 rx_pool = &adapter->rx_pool[i]; 462 rx_pool = &adapter->rx_pool[i];
451 463
452 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 464 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
469 481
470 kfree(adapter->rx_pool); 482 kfree(adapter->rx_pool);
471 adapter->rx_pool = NULL; 483 adapter->rx_pool = NULL;
484 adapter->num_active_rx_pools = 0;
472} 485}
473 486
474static int init_rx_pools(struct net_device *netdev) 487static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
493 return -1; 506 return -1;
494 } 507 }
495 508
509 adapter->num_active_rx_pools = 0;
510
496 for (i = 0; i < rxadd_subcrqs; i++) { 511 for (i = 0; i < rxadd_subcrqs; i++) {
497 rx_pool = &adapter->rx_pool[i]; 512 rx_pool = &adapter->rx_pool[i];
498 513
@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
536 rx_pool->next_free = 0; 551 rx_pool->next_free = 0;
537 } 552 }
538 553
554 adapter->num_active_rx_pools = rxadd_subcrqs;
555
539 return 0; 556 return 0;
540} 557}
541 558
@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
586static void release_tx_pools(struct ibmvnic_adapter *adapter) 603static void release_tx_pools(struct ibmvnic_adapter *adapter)
587{ 604{
588 struct ibmvnic_tx_pool *tx_pool; 605 struct ibmvnic_tx_pool *tx_pool;
589 int i, tx_scrqs; 606 int i;
590 607
591 if (!adapter->tx_pool) 608 if (!adapter->tx_pool)
592 return; 609 return;
593 610
594 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 611 for (i = 0; i < adapter->num_active_tx_pools; i++) {
595 for (i = 0; i < tx_scrqs; i++) {
596 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); 612 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
597 tx_pool = &adapter->tx_pool[i]; 613 tx_pool = &adapter->tx_pool[i];
598 kfree(tx_pool->tx_buff); 614 kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
603 619
604 kfree(adapter->tx_pool); 620 kfree(adapter->tx_pool);
605 adapter->tx_pool = NULL; 621 adapter->tx_pool = NULL;
622 adapter->num_active_tx_pools = 0;
606} 623}
607 624
608static int init_tx_pools(struct net_device *netdev) 625static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
619 if (!adapter->tx_pool) 636 if (!adapter->tx_pool)
620 return -1; 637 return -1;
621 638
639 adapter->num_active_tx_pools = 0;
640
622 for (i = 0; i < tx_subcrqs; i++) { 641 for (i = 0; i < tx_subcrqs; i++) {
623 tx_pool = &adapter->tx_pool[i]; 642 tx_pool = &adapter->tx_pool[i];
624 643
@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
666 tx_pool->producer_index = 0; 685 tx_pool->producer_index = 0;
667 } 686 }
668 687
688 adapter->num_active_tx_pools = tx_subcrqs;
689
669 return 0; 690 return 0;
670} 691}
671 692
@@ -860,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
860 if (adapter->vpd->buff) 881 if (adapter->vpd->buff)
861 len = adapter->vpd->len; 882 len = adapter->vpd->len;
862 883
863 reinit_completion(&adapter->fw_done); 884 init_completion(&adapter->fw_done);
864 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 885 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
865 crq.get_vpd_size.cmd = GET_VPD_SIZE; 886 crq.get_vpd_size.cmd = GET_VPD_SIZE;
866 ibmvnic_send_crq(adapter, &crq); 887 ibmvnic_send_crq(adapter, &crq);
@@ -922,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
922 if (!adapter->vpd) 943 if (!adapter->vpd)
923 return -ENOMEM; 944 return -ENOMEM;
924 945
946 /* Vital Product Data (VPD) */
947 rc = ibmvnic_get_vpd(adapter);
948 if (rc) {
949 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
950 return rc;
951 }
952
925 adapter->map_id = 1; 953 adapter->map_id = 1;
926 adapter->napi = kcalloc(adapter->req_rx_queues, 954 adapter->napi = kcalloc(adapter->req_rx_queues,
927 sizeof(struct napi_struct), GFP_KERNEL); 955 sizeof(struct napi_struct), GFP_KERNEL);
@@ -995,7 +1023,7 @@ static int __ibmvnic_open(struct net_device *netdev)
995static int ibmvnic_open(struct net_device *netdev) 1023static int ibmvnic_open(struct net_device *netdev)
996{ 1024{
997 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1025 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
998 int rc, vpd; 1026 int rc;
999 1027
1000 mutex_lock(&adapter->reset_lock); 1028 mutex_lock(&adapter->reset_lock);
1001 1029
@@ -1018,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
1018 rc = __ibmvnic_open(netdev); 1046 rc = __ibmvnic_open(netdev);
1019 netif_carrier_on(netdev); 1047 netif_carrier_on(netdev);
1020 1048
1021 /* Vital Product Data (VPD) */
1022 vpd = ibmvnic_get_vpd(adapter);
1023 if (vpd)
1024 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1025
1026 mutex_unlock(&adapter->reset_lock); 1049 mutex_unlock(&adapter->reset_lock);
1027 1050
1028 return rc; 1051 return rc;
@@ -1548,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1548static int do_reset(struct ibmvnic_adapter *adapter, 1571static int do_reset(struct ibmvnic_adapter *adapter,
1549 struct ibmvnic_rwi *rwi, u32 reset_state) 1572 struct ibmvnic_rwi *rwi, u32 reset_state)
1550{ 1573{
1574 u64 old_num_rx_queues, old_num_tx_queues;
1551 struct net_device *netdev = adapter->netdev; 1575 struct net_device *netdev = adapter->netdev;
1552 int i, rc; 1576 int i, rc;
1553 1577
@@ -1557,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1557 netif_carrier_off(netdev); 1581 netif_carrier_off(netdev);
1558 adapter->reset_reason = rwi->reset_reason; 1582 adapter->reset_reason = rwi->reset_reason;
1559 1583
1584 old_num_rx_queues = adapter->req_rx_queues;
1585 old_num_tx_queues = adapter->req_tx_queues;
1586
1560 if (rwi->reset_reason == VNIC_RESET_MOBILITY) { 1587 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1561 rc = ibmvnic_reenable_crq_queue(adapter); 1588 rc = ibmvnic_reenable_crq_queue(adapter);
1562 if (rc) 1589 if (rc)
@@ -1601,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1601 rc = init_resources(adapter); 1628 rc = init_resources(adapter);
1602 if (rc) 1629 if (rc)
1603 return rc; 1630 return rc;
1631 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1632 adapter->req_tx_queues != old_num_tx_queues) {
1633 release_rx_pools(adapter);
1634 release_tx_pools(adapter);
1635 init_rx_pools(netdev);
1636 init_tx_pools(netdev);
1604 } else { 1637 } else {
1605 rc = reset_tx_pools(adapter); 1638 rc = reset_tx_pools(adapter);
1606 if (rc) 1639 if (rc)
@@ -3592,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3592 *req_value, 3625 *req_value,
3593 (long int)be64_to_cpu(crq->request_capability_rsp. 3626 (long int)be64_to_cpu(crq->request_capability_rsp.
3594 number), name); 3627 number), name);
3595 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3628
3629 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3630 REQ_MTU) {
3631 pr_err("mtu of %llu is not supported. Reverting.\n",
3632 *req_value);
3633 *req_value = adapter->fallback.mtu;
3634 } else {
3635 *req_value =
3636 be64_to_cpu(crq->request_capability_rsp.number);
3637 }
3638
3596 ibmvnic_send_req_caps(adapter, 1); 3639 ibmvnic_send_req_caps(adapter, 1);
3597 return; 3640 return;
3598 default: 3641 default:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4487f1e2c266..3aec42118db2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
1091 u64 opt_rxba_entries_per_subcrq; 1091 u64 opt_rxba_entries_per_subcrq;
1092 __be64 tx_rx_desc_req; 1092 __be64 tx_rx_desc_req;
1093 u8 map_id; 1093 u8 map_id;
1094 u64 num_active_rx_pools;
1095 u64 num_active_tx_pools;
1094 1096
1095 struct tasklet_struct tasklet; 1097 struct tasklet_struct tasklet;
1096 enum vnic_state state; 1098 enum vnic_state state;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index e401d9d245f3..b69a705fd787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
201 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : 201 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
202 MLX5E_AM_STATS_WORSE; 202 MLX5E_AM_STATS_WORSE;
203 203
204 if (!prev->ppms)
205 return curr->ppms ? MLX5E_AM_STATS_BETTER :
206 MLX5E_AM_STATS_SAME;
207
204 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) 208 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
205 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : 209 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
206 MLX5E_AM_STATS_WORSE; 210 MLX5E_AM_STATS_WORSE;
211 if (!prev->epms)
212 return MLX5E_AM_STATS_SAME;
207 213
208 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) 214 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
209 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : 215 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d56fe32bf48d..8a22ff67b026 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
457void usbnet_defer_kevent (struct usbnet *dev, int work) 457void usbnet_defer_kevent (struct usbnet *dev, int work)
458{ 458{
459 set_bit (work, &dev->flags); 459 set_bit (work, &dev->flags);
460 if (!schedule_work (&dev->kevent)) { 460 if (!schedule_work (&dev->kevent))
461 if (net_ratelimit()) 461 netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
462 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 462 else
463 } else {
464 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 463 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
465 }
466} 464}
467EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 465EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
468 466
diff --git a/net/core/dev.c b/net/core/dev.c
index 0e0ba36eeac9..613fb4066be7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3151,10 +3151,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
3151 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3151 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3152 3152
3153 /* + transport layer */ 3153 /* + transport layer */
3154 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3154 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3155 hdr_len += tcp_hdrlen(skb); 3155 const struct tcphdr *th;
3156 else 3156 struct tcphdr _tcphdr;
3157 hdr_len += sizeof(struct udphdr); 3157
3158 th = skb_header_pointer(skb, skb_transport_offset(skb),
3159 sizeof(_tcphdr), &_tcphdr);
3160 if (likely(th))
3161 hdr_len += __tcp_hdrlen(th);
3162 } else {
3163 struct udphdr _udphdr;
3164
3165 if (skb_header_pointer(skb, skb_transport_offset(skb),
3166 sizeof(_udphdr), &_udphdr))
3167 hdr_len += sizeof(struct udphdr);
3168 }
3158 3169
3159 if (shinfo->gso_type & SKB_GSO_DODGY) 3170 if (shinfo->gso_type & SKB_GSO_DODGY)
3160 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3171 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index b1338e576d00..29b333a62ab0 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -122,6 +122,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
122 if (!xo) 122 if (!xo)
123 goto out; 123 goto out;
124 124
125 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
126 goto out;
127
125 seq = xo->seq.low; 128 seq = xo->seq.low;
126 129
127 x = skb->sp->xvec[skb->sp->len - 1]; 130 x = skb->sp->xvec[skb->sp->len - 1];
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 726f6b608274..2d49717a7421 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
332 return htonl(INADDR_ANY); 332 return htonl(INADDR_ANY);
333 333
334 for_ifa(in_dev) { 334 for_ifa(in_dev) {
335 if (inet_ifa_match(fl4->saddr, ifa)) 335 if (fl4->saddr == ifa->ifa_local)
336 return fl4->saddr; 336 return fl4->saddr;
337 } endfor_ifa(in_dev); 337 } endfor_ifa(in_dev);
338 338
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index b6a2aa1dcf56..4d58e2ce0b5b 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, 32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features) 33 netdev_features_t features)
34{ 34{
35 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
36 return ERR_PTR(-EINVAL);
37
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 38 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL); 39 return ERR_PTR(-EINVAL);
37 40
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 01801b77bd0d..ea6e6e7df0ee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
203 goto out; 203 goto out;
204 } 204 }
205 205
206 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
207 goto out;
208
206 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 209 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
207 goto out; 210 goto out;
208 211
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index dd9627490c7c..f52c314d4c97 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -149,6 +149,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
149 if (!xo) 149 if (!xo)
150 goto out; 150 goto out;
151 151
152 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
153 goto out;
154
152 seq = xo->seq.low; 155 seq = xo->seq.low;
153 156
154 x = skb->sp->xvec[skb->sp->len - 1]; 157 x = skb->sp->xvec[skb->sp->len - 1];
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index d883c9204c01..278e49cd67d4 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
46{ 46{
47 struct tcphdr *th; 47 struct tcphdr *th;
48 48
49 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
50 return ERR_PTR(-EINVAL);
51
49 if (!pskb_may_pull(skb, sizeof(*th))) 52 if (!pskb_may_pull(skb, sizeof(*th)))
50 return ERR_PTR(-EINVAL); 53 return ERR_PTR(-EINVAL);
51 54
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a0f89ad76f9d..2a04dc9c781b 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
42 const struct ipv6hdr *ipv6h; 42 const struct ipv6hdr *ipv6h;
43 struct udphdr *uh; 43 struct udphdr *uh;
44 44
45 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
46 goto out;
47
45 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 48 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
46 goto out; 49 goto out;
47 50
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 6b7ee71f40c6..ab7356e0ba83 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
90 sizeof(val)); 90 sizeof(val));
91} 91}
92 92
93u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) 93u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
94{ 94{
95 return tcp_sk(tc->t_sock->sk)->snd_nxt; 95 /* seq# of the last byte of data in tcp send buffer */
96 return tcp_sk(tc->t_sock->sk)->write_seq;
96} 97}
97 98
98u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) 99u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 1aafbf7c3011..864ca7d8f019 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -54,7 +54,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
54void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp); 54void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
55void rds_tcp_restore_callbacks(struct socket *sock, 55void rds_tcp_restore_callbacks(struct socket *sock,
56 struct rds_tcp_connection *tc); 56 struct rds_tcp_connection *tc);
57u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); 57u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
58u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); 58u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
59u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); 59u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
60extern struct rds_transport rds_tcp_transport; 60extern struct rds_transport rds_tcp_transport;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index dc860d1bb608..9b76e0fa1722 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
86 * m_ack_seq is set to the sequence number of the last byte of 86 * m_ack_seq is set to the sequence number of the last byte of
87 * header and data. see rds_tcp_is_acked(). 87 * header and data. see rds_tcp_is_acked().
88 */ 88 */
89 tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); 89 tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
90 rm->m_ack_seq = tc->t_last_sent_nxt + 90 rm->m_ack_seq = tc->t_last_sent_nxt +
91 sizeof(struct rds_header) + 91 sizeof(struct rds_header) +
92 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; 92 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
98 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; 98 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
99 99
100 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", 100 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
101 rm, rds_tcp_snd_nxt(tc), 101 rm, rds_tcp_write_seq(tc),
102 (unsigned long long)rm->m_ack_seq); 102 (unsigned long long)rm->m_ack_seq);
103 } 103 }
104 104
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 275925b93b29..35bc7106d182 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
45 struct sk_buff *segs = ERR_PTR(-EINVAL); 45 struct sk_buff *segs = ERR_PTR(-EINVAL);
46 struct sctphdr *sh; 46 struct sctphdr *sh;
47 47
48 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
49 goto out;
50
48 sh = sctp_hdr(skb); 51 sh = sctp_hdr(skb);
49 if (!pskb_may_pull(skb, sizeof(*sh))) 52 if (!pskb_may_pull(skb, sizeof(*sh)))
50 goto out; 53 goto out;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 61f394d369bf..0a9b72fbd761 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -577,6 +577,8 @@ alloc_payload:
577 get_page(page); 577 get_page(page);
578 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; 578 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
579 sg_set_page(sg, page, copy, offset); 579 sg_set_page(sg, page, copy, offset);
580 sg_unmark_end(sg);
581
580 ctx->sg_plaintext_num_elem++; 582 ctx->sg_plaintext_num_elem++;
581 583
582 sk_mem_charge(sk, copy); 584 sk_mem_charge(sk, copy);