diff options
| author | David S. Miller <davem@davemloft.net> | 2017-08-09 19:28:45 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2017-08-09 19:28:45 -0400 |
| commit | 3118e6e19da7b8d76b2456b880c74a9aa3a2268b (patch) | |
| tree | 3060d11297c1195ef2d1f120d9c2247b4b1de4ae /net | |
| parent | feca7d8c135bc1527b244fe817b8b6498066ccec (diff) | |
| parent | 48fb6f4db940e92cfb16cd878cddd59ea6120d06 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The UDP offload conflict is dealt with by simply taking what is
in net-next where we have removed all of the UFO handling code
entirely.
The TCP conflict was a case of local variables in a function
being removed from both net and net-next.
In netvsc we had an assignment right next to where a missing
set of u64 stats sync object inits were added.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
| -rw-r--r-- | net/batman-adv/translation-table.c | 60 | ||||
| -rw-r--r-- | net/batman-adv/types.h | 2 | ||||
| -rw-r--r-- | net/core/dev.c | 2 | ||||
| -rw-r--r-- | net/ipv4/cipso_ipv4.c | 12 | ||||
| -rw-r--r-- | net/ipv4/fou.c | 1 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 34 | ||||
| -rw-r--r-- | net/ipv4/tcp_output.c | 27 | ||||
| -rw-r--r-- | net/ipv4/tcp_timer.c | 3 | ||||
| -rw-r--r-- | net/ipv6/route.c | 11 | ||||
| -rw-r--r-- | net/rds/ib_recv.c | 5 | ||||
| -rw-r--r-- | net/sched/act_ipt.c | 20 |
11 files changed, 112 insertions, 65 deletions
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index e1133bc634b5..8a3ce79b1307 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, | |||
| 1549 | return found; | 1549 | return found; |
| 1550 | } | 1550 | } |
| 1551 | 1551 | ||
| 1552 | /** | ||
| 1553 | * batadv_tt_global_sync_flags - update TT sync flags | ||
| 1554 | * @tt_global: the TT global entry to update sync flags in | ||
| 1555 | * | ||
| 1556 | * Updates the sync flag bits in the tt_global flag attribute with a logical | ||
| 1557 | * OR of all sync flags from any of its TT orig entries. | ||
| 1558 | */ | ||
| 1559 | static void | ||
| 1560 | batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global) | ||
| 1561 | { | ||
| 1562 | struct batadv_tt_orig_list_entry *orig_entry; | ||
| 1563 | const struct hlist_head *head; | ||
| 1564 | u16 flags = BATADV_NO_FLAGS; | ||
| 1565 | |||
| 1566 | rcu_read_lock(); | ||
| 1567 | head = &tt_global->orig_list; | ||
| 1568 | hlist_for_each_entry_rcu(orig_entry, head, list) | ||
| 1569 | flags |= orig_entry->flags; | ||
| 1570 | rcu_read_unlock(); | ||
| 1571 | |||
| 1572 | flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK); | ||
| 1573 | tt_global->common.flags = flags; | ||
| 1574 | } | ||
| 1575 | |||
| 1576 | /** | ||
| 1577 | * batadv_tt_global_orig_entry_add - add or update a TT orig entry | ||
| 1578 | * @tt_global: the TT global entry to add an orig entry in | ||
| 1579 | * @orig_node: the originator to add an orig entry for | ||
| 1580 | * @ttvn: translation table version number of this changeset | ||
| 1581 | * @flags: TT sync flags | ||
| 1582 | */ | ||
| 1552 | static void | 1583 | static void |
| 1553 | batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | 1584 | batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, |
| 1554 | struct batadv_orig_node *orig_node, int ttvn) | 1585 | struct batadv_orig_node *orig_node, int ttvn, |
| 1586 | u8 flags) | ||
| 1555 | { | 1587 | { |
| 1556 | struct batadv_tt_orig_list_entry *orig_entry; | 1588 | struct batadv_tt_orig_list_entry *orig_entry; |
| 1557 | 1589 | ||
| @@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
| 1561 | * was added during a "temporary client detection" | 1593 | * was added during a "temporary client detection" |
| 1562 | */ | 1594 | */ |
| 1563 | orig_entry->ttvn = ttvn; | 1595 | orig_entry->ttvn = ttvn; |
| 1564 | goto out; | 1596 | orig_entry->flags = flags; |
| 1597 | goto sync_flags; | ||
| 1565 | } | 1598 | } |
| 1566 | 1599 | ||
| 1567 | orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); | 1600 | orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); |
| @@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
| 1573 | batadv_tt_global_size_inc(orig_node, tt_global->common.vid); | 1606 | batadv_tt_global_size_inc(orig_node, tt_global->common.vid); |
| 1574 | orig_entry->orig_node = orig_node; | 1607 | orig_entry->orig_node = orig_node; |
| 1575 | orig_entry->ttvn = ttvn; | 1608 | orig_entry->ttvn = ttvn; |
| 1609 | orig_entry->flags = flags; | ||
| 1576 | kref_init(&orig_entry->refcount); | 1610 | kref_init(&orig_entry->refcount); |
| 1577 | 1611 | ||
| 1578 | spin_lock_bh(&tt_global->list_lock); | 1612 | spin_lock_bh(&tt_global->list_lock); |
| @@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
| 1582 | spin_unlock_bh(&tt_global->list_lock); | 1616 | spin_unlock_bh(&tt_global->list_lock); |
| 1583 | atomic_inc(&tt_global->orig_list_count); | 1617 | atomic_inc(&tt_global->orig_list_count); |
| 1584 | 1618 | ||
| 1619 | sync_flags: | ||
| 1620 | batadv_tt_global_sync_flags(tt_global); | ||
| 1585 | out: | 1621 | out: |
| 1586 | if (orig_entry) | 1622 | if (orig_entry) |
| 1587 | batadv_tt_orig_list_entry_put(orig_entry); | 1623 | batadv_tt_orig_list_entry_put(orig_entry); |
| @@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
| 1703 | } | 1739 | } |
| 1704 | 1740 | ||
| 1705 | /* the change can carry possible "attribute" flags like the | 1741 | /* the change can carry possible "attribute" flags like the |
| 1706 | * TT_CLIENT_WIFI, therefore they have to be copied in the | 1742 | * TT_CLIENT_TEMP, therefore they have to be copied in the |
| 1707 | * client entry | 1743 | * client entry |
| 1708 | */ | 1744 | */ |
| 1709 | common->flags |= flags; | 1745 | common->flags |= flags & (~BATADV_TT_SYNC_MASK); |
| 1710 | 1746 | ||
| 1711 | /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only | 1747 | /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only |
| 1712 | * one originator left in the list and we previously received a | 1748 | * one originator left in the list and we previously received a |
| @@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
| 1723 | } | 1759 | } |
| 1724 | add_orig_entry: | 1760 | add_orig_entry: |
| 1725 | /* add the new orig_entry (if needed) or update it */ | 1761 | /* add the new orig_entry (if needed) or update it */ |
| 1726 | batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); | 1762 | batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn, |
| 1763 | flags & BATADV_TT_SYNC_MASK); | ||
| 1727 | 1764 | ||
| 1728 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 1765 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
| 1729 | "Creating new global tt entry: %pM (vid: %d, via %pM)\n", | 1766 | "Creating new global tt entry: %pM (vid: %d, via %pM)\n", |
| @@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, | |||
| 1946 | struct batadv_tt_orig_list_entry *orig, | 1983 | struct batadv_tt_orig_list_entry *orig, |
| 1947 | bool best) | 1984 | bool best) |
| 1948 | { | 1985 | { |
| 1986 | u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags; | ||
| 1949 | void *hdr; | 1987 | void *hdr; |
| 1950 | struct batadv_orig_node_vlan *vlan; | 1988 | struct batadv_orig_node_vlan *vlan; |
| 1951 | u8 last_ttvn; | 1989 | u8 last_ttvn; |
| @@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, | |||
| 1975 | nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || | 2013 | nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || |
| 1976 | nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || | 2014 | nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || |
| 1977 | nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || | 2015 | nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || |
| 1978 | nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) | 2016 | nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags)) |
| 1979 | goto nla_put_failure; | 2017 | goto nla_put_failure; |
| 1980 | 2018 | ||
| 1981 | if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) | 2019 | if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) |
| @@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
| 2589 | unsigned short vid) | 2627 | unsigned short vid) |
| 2590 | { | 2628 | { |
| 2591 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; | 2629 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
| 2630 | struct batadv_tt_orig_list_entry *tt_orig; | ||
| 2592 | struct batadv_tt_common_entry *tt_common; | 2631 | struct batadv_tt_common_entry *tt_common; |
| 2593 | struct batadv_tt_global_entry *tt_global; | 2632 | struct batadv_tt_global_entry *tt_global; |
| 2594 | struct hlist_head *head; | 2633 | struct hlist_head *head; |
| @@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
| 2627 | /* find out if this global entry is announced by this | 2666 | /* find out if this global entry is announced by this |
| 2628 | * originator | 2667 | * originator |
| 2629 | */ | 2668 | */ |
| 2630 | if (!batadv_tt_global_entry_has_orig(tt_global, | 2669 | tt_orig = batadv_tt_global_orig_entry_find(tt_global, |
| 2631 | orig_node)) | 2670 | orig_node); |
| 2671 | if (!tt_orig) | ||
| 2632 | continue; | 2672 | continue; |
| 2633 | 2673 | ||
| 2634 | /* use network order to read the VID: this ensures that | 2674 | /* use network order to read the VID: this ensures that |
| @@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
| 2640 | /* compute the CRC on flags that have to be kept in sync | 2680 | /* compute the CRC on flags that have to be kept in sync |
| 2641 | * among nodes | 2681 | * among nodes |
| 2642 | */ | 2682 | */ |
| 2643 | flags = tt_common->flags & BATADV_TT_SYNC_MASK; | 2683 | flags = tt_orig->flags; |
| 2644 | crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); | 2684 | crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); |
| 2645 | 2685 | ||
| 2646 | crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); | 2686 | crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); |
| 2687 | |||
| 2688 | batadv_tt_orig_list_entry_put(tt_orig); | ||
| 2647 | } | 2689 | } |
| 2648 | rcu_read_unlock(); | 2690 | rcu_read_unlock(); |
| 2649 | } | 2691 | } |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index ea43a6449247..a62795868794 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
| @@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry { | |||
| 1260 | * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client | 1260 | * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client |
| 1261 | * @orig_node: pointer to orig node announcing this non-mesh client | 1261 | * @orig_node: pointer to orig node announcing this non-mesh client |
| 1262 | * @ttvn: translation table version number which added the non-mesh client | 1262 | * @ttvn: translation table version number which added the non-mesh client |
| 1263 | * @flags: per orig entry TT sync flags | ||
| 1263 | * @list: list node for batadv_tt_global_entry::orig_list | 1264 | * @list: list node for batadv_tt_global_entry::orig_list |
| 1264 | * @refcount: number of contexts the object is used | 1265 | * @refcount: number of contexts the object is used |
| 1265 | * @rcu: struct used for freeing in an RCU-safe manner | 1266 | * @rcu: struct used for freeing in an RCU-safe manner |
| @@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry { | |||
| 1267 | struct batadv_tt_orig_list_entry { | 1268 | struct batadv_tt_orig_list_entry { |
| 1268 | struct batadv_orig_node *orig_node; | 1269 | struct batadv_orig_node *orig_node; |
| 1269 | u8 ttvn; | 1270 | u8 ttvn; |
| 1271 | u8 flags; | ||
| 1270 | struct hlist_node list; | 1272 | struct hlist_node list; |
| 1271 | struct kref refcount; | 1273 | struct kref refcount; |
| 1272 | struct rcu_head rcu; | 1274 | struct rcu_head rcu; |
diff --git a/net/core/dev.c b/net/core/dev.c index 1d75499add72..3f69f6e71824 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2732,7 +2732,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) | |||
| 2732 | { | 2732 | { |
| 2733 | if (tx_path) | 2733 | if (tx_path) |
| 2734 | return skb->ip_summed != CHECKSUM_PARTIAL && | 2734 | return skb->ip_summed != CHECKSUM_PARTIAL && |
| 2735 | skb->ip_summed != CHECKSUM_NONE; | 2735 | skb->ip_summed != CHECKSUM_UNNECESSARY; |
| 2736 | 2736 | ||
| 2737 | return skb->ip_summed == CHECKSUM_NONE; | 2737 | return skb->ip_summed == CHECKSUM_NONE; |
| 2738 | } | 2738 | } |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index c4c6e1969ed0..2ae8f54cb321 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
| @@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) | |||
| 1523 | int taglen; | 1523 | int taglen; |
| 1524 | 1524 | ||
| 1525 | for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { | 1525 | for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { |
| 1526 | if (optptr[0] == IPOPT_CIPSO) | 1526 | switch (optptr[0]) { |
| 1527 | case IPOPT_CIPSO: | ||
| 1527 | return optptr; | 1528 | return optptr; |
| 1528 | taglen = optptr[1]; | 1529 | case IPOPT_END: |
| 1530 | return NULL; | ||
| 1531 | case IPOPT_NOOP: | ||
| 1532 | taglen = 1; | ||
| 1533 | break; | ||
| 1534 | default: | ||
| 1535 | taglen = optptr[1]; | ||
| 1536 | } | ||
| 1529 | optlen -= taglen; | 1537 | optlen -= taglen; |
| 1530 | optptr += taglen; | 1538 | optptr += taglen; |
| 1531 | } | 1539 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 8e0257d01200..1540db65241a 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
| @@ -450,6 +450,7 @@ out_unlock: | |||
| 450 | out: | 450 | out: |
| 451 | NAPI_GRO_CB(skb)->flush |= flush; | 451 | NAPI_GRO_CB(skb)->flush |= flush; |
| 452 | skb_gro_remcsum_cleanup(skb, &grc); | 452 | skb_gro_remcsum_cleanup(skb, &grc); |
| 453 | skb->remcsum_offload = 0; | ||
| 453 | 454 | ||
| 454 | return pp; | 455 | return pp; |
| 455 | } | 456 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 842ed75ccb25..d73903fe8c83 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -106,6 +106,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; | |||
| 106 | #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ | 106 | #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ |
| 107 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | 107 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ |
| 108 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ | 108 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
| 109 | #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ | ||
| 109 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ | 110 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ |
| 110 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ | 111 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ |
| 111 | #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ | 112 | #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ |
| @@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk) | |||
| 2520 | return; | 2521 | return; |
| 2521 | 2522 | ||
| 2522 | /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ | 2523 | /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ |
| 2523 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || | 2524 | if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && |
| 2524 | (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { | 2525 | (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { |
| 2525 | tp->snd_cwnd = tp->snd_ssthresh; | 2526 | tp->snd_cwnd = tp->snd_ssthresh; |
| 2526 | tp->snd_cwnd_stamp = tcp_jiffies32; | 2527 | tp->snd_cwnd_stamp = tcp_jiffies32; |
| 2527 | } | 2528 | } |
| @@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk) | |||
| 3004 | /* Offset the time elapsed after installing regular RTO */ | 3005 | /* Offset the time elapsed after installing regular RTO */ |
| 3005 | if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || | 3006 | if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || |
| 3006 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { | 3007 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { |
| 3007 | struct sk_buff *skb = tcp_write_queue_head(sk); | 3008 | s64 delta_us = tcp_rto_delta_us(sk); |
| 3008 | u64 rto_time_stamp = skb->skb_mstamp + | ||
| 3009 | jiffies_to_usecs(rto); | ||
| 3010 | s64 delta_us = rto_time_stamp - tp->tcp_mstamp; | ||
| 3011 | /* delta_us may not be positive if the socket is locked | 3009 | /* delta_us may not be positive if the socket is locked |
| 3012 | * when the retrans timer fires and is rescheduled. | 3010 | * when the retrans timer fires and is rescheduled. |
| 3013 | */ | 3011 | */ |
| @@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk) | |||
| 3019 | } | 3017 | } |
| 3020 | } | 3018 | } |
| 3021 | 3019 | ||
| 3020 | /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ | ||
| 3021 | static void tcp_set_xmit_timer(struct sock *sk) | ||
| 3022 | { | ||
| 3023 | if (!tcp_schedule_loss_probe(sk)) | ||
| 3024 | tcp_rearm_rto(sk); | ||
| 3025 | } | ||
| 3026 | |||
| 3022 | /* If we get here, the whole TSO packet has not been acked. */ | 3027 | /* If we get here, the whole TSO packet has not been acked. */ |
| 3023 | static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) | 3028 | static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) |
| 3024 | { | 3029 | { |
| @@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
| 3180 | ca_rtt_us, sack->rate); | 3185 | ca_rtt_us, sack->rate); |
| 3181 | 3186 | ||
| 3182 | if (flag & FLAG_ACKED) { | 3187 | if (flag & FLAG_ACKED) { |
| 3183 | tcp_rearm_rto(sk); | 3188 | flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ |
| 3184 | if (unlikely(icsk->icsk_mtup.probe_size && | 3189 | if (unlikely(icsk->icsk_mtup.probe_size && |
| 3185 | !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { | 3190 | !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { |
| 3186 | tcp_mtup_probe_success(sk); | 3191 | tcp_mtup_probe_success(sk); |
| @@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
| 3208 | * after when the head was last (re)transmitted. Otherwise the | 3213 | * after when the head was last (re)transmitted. Otherwise the |
| 3209 | * timeout may continue to extend in loss recovery. | 3214 | * timeout may continue to extend in loss recovery. |
| 3210 | */ | 3215 | */ |
| 3211 | tcp_rearm_rto(sk); | 3216 | flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ |
| 3212 | } | 3217 | } |
| 3213 | 3218 | ||
| 3214 | if (icsk->icsk_ca_ops->pkts_acked) { | 3219 | if (icsk->icsk_ca_ops->pkts_acked) { |
| @@ -3575,9 +3580,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
| 3575 | if (after(ack, tp->snd_nxt)) | 3580 | if (after(ack, tp->snd_nxt)) |
| 3576 | goto invalid_ack; | 3581 | goto invalid_ack; |
| 3577 | 3582 | ||
| 3578 | if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) | ||
| 3579 | tcp_rearm_rto(sk); | ||
| 3580 | |||
| 3581 | if (after(ack, prior_snd_una)) { | 3583 | if (after(ack, prior_snd_una)) { |
| 3582 | flag |= FLAG_SND_UNA_ADVANCED; | 3584 | flag |= FLAG_SND_UNA_ADVANCED; |
| 3583 | icsk->icsk_retransmits = 0; | 3585 | icsk->icsk_retransmits = 0; |
| @@ -3626,18 +3628,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
| 3626 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, | 3628 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, |
| 3627 | &sack_state); | 3629 | &sack_state); |
| 3628 | 3630 | ||
| 3631 | if (tp->tlp_high_seq) | ||
| 3632 | tcp_process_tlp_ack(sk, ack, flag); | ||
| 3633 | /* If needed, reset TLP/RTO timer; RACK may later override this. */ | ||
| 3634 | if (flag & FLAG_SET_XMIT_TIMER) | ||
| 3635 | tcp_set_xmit_timer(sk); | ||
| 3636 | |||
| 3629 | if (tcp_ack_is_dubious(sk, flag)) { | 3637 | if (tcp_ack_is_dubious(sk, flag)) { |
| 3630 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); | 3638 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); |
| 3631 | tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); | 3639 | tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); |
| 3632 | } | 3640 | } |
| 3633 | if (tp->tlp_high_seq) | ||
| 3634 | tcp_process_tlp_ack(sk, ack, flag); | ||
| 3635 | 3641 | ||
| 3636 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) | 3642 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) |
| 3637 | sk_dst_confirm(sk); | 3643 | sk_dst_confirm(sk); |
| 3638 | 3644 | ||
| 3639 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) | ||
| 3640 | tcp_schedule_loss_probe(sk); | ||
| 3641 | delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ | 3645 | delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ |
| 3642 | lost = tp->lost - lost; /* freshly marked lost */ | 3646 | lost = tp->lost - lost; /* freshly marked lost */ |
| 3643 | tcp_rate_gen(sk, delivered, lost, sack_state.rate); | 3647 | tcp_rate_gen(sk, delivered, lost, sack_state.rate); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d49bff51bdb7..3e0d19631534 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -2375,23 +2375,14 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
| 2375 | { | 2375 | { |
| 2376 | struct inet_connection_sock *icsk = inet_csk(sk); | 2376 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2377 | struct tcp_sock *tp = tcp_sk(sk); | 2377 | struct tcp_sock *tp = tcp_sk(sk); |
| 2378 | u32 timeout, tlp_time_stamp, rto_time_stamp; | 2378 | u32 timeout, rto_delta_us; |
| 2379 | 2379 | ||
| 2380 | /* No consecutive loss probes. */ | ||
| 2381 | if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { | ||
| 2382 | tcp_rearm_rto(sk); | ||
| 2383 | return false; | ||
| 2384 | } | ||
| 2385 | /* Don't do any loss probe on a Fast Open connection before 3WHS | 2380 | /* Don't do any loss probe on a Fast Open connection before 3WHS |
| 2386 | * finishes. | 2381 | * finishes. |
| 2387 | */ | 2382 | */ |
| 2388 | if (tp->fastopen_rsk) | 2383 | if (tp->fastopen_rsk) |
| 2389 | return false; | 2384 | return false; |
| 2390 | 2385 | ||
| 2391 | /* TLP is only scheduled when next timer event is RTO. */ | ||
| 2392 | if (icsk->icsk_pending != ICSK_TIME_RETRANS) | ||
| 2393 | return false; | ||
| 2394 | |||
| 2395 | /* Schedule a loss probe in 2*RTT for SACK capable connections | 2386 | /* Schedule a loss probe in 2*RTT for SACK capable connections |
| 2396 | * in Open state, that are either limited by cwnd or application. | 2387 | * in Open state, that are either limited by cwnd or application. |
| 2397 | */ | 2388 | */ |
| @@ -2418,14 +2409,10 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
| 2418 | timeout = TCP_TIMEOUT_INIT; | 2409 | timeout = TCP_TIMEOUT_INIT; |
| 2419 | } | 2410 | } |
| 2420 | 2411 | ||
| 2421 | /* If RTO is shorter, just schedule TLP in its place. */ | 2412 | /* If the RTO formula yields an earlier time, then use that time. */ |
| 2422 | tlp_time_stamp = tcp_jiffies32 + timeout; | 2413 | rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */ |
| 2423 | rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; | 2414 | if (rto_delta_us > 0) |
| 2424 | if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { | 2415 | timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); |
| 2425 | s32 delta = rto_time_stamp - tcp_jiffies32; | ||
| 2426 | if (delta > 0) | ||
| 2427 | timeout = delta; | ||
| 2428 | } | ||
| 2429 | 2416 | ||
| 2430 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, | 2417 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, |
| 2431 | TCP_RTO_MAX); | 2418 | TCP_RTO_MAX); |
| @@ -3450,6 +3437,10 @@ int tcp_connect(struct sock *sk) | |||
| 3450 | int err; | 3437 | int err; |
| 3451 | 3438 | ||
| 3452 | tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); | 3439 | tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); |
| 3440 | |||
| 3441 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) | ||
| 3442 | return -EHOSTUNREACH; /* Routing failure or similar. */ | ||
| 3443 | |||
| 3453 | tcp_connect_init(sk); | 3444 | tcp_connect_init(sk); |
| 3454 | 3445 | ||
| 3455 | if (unlikely(tp->repair)) { | 3446 | if (unlikely(tp->repair)) { |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index f753f9d2fee3..655dd8d7f064 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -640,7 +640,8 @@ static void tcp_keepalive_timer (unsigned long data) | |||
| 640 | goto death; | 640 | goto death; |
| 641 | } | 641 | } |
| 642 | 642 | ||
| 643 | if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) | 643 | if (!sock_flag(sk, SOCK_KEEPOPEN) || |
| 644 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) | ||
| 644 | goto out; | 645 | goto out; |
| 645 | 646 | ||
| 646 | elapsed = keepalive_time_when(tp); | 647 | elapsed = keepalive_time_when(tp); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7ecbe5eb19f8..c73e61750642 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -2356,6 +2356,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu | |||
| 2356 | if (on_link) | 2356 | if (on_link) |
| 2357 | nrt->rt6i_flags &= ~RTF_GATEWAY; | 2357 | nrt->rt6i_flags &= ~RTF_GATEWAY; |
| 2358 | 2358 | ||
| 2359 | nrt->rt6i_protocol = RTPROT_REDIRECT; | ||
| 2359 | nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; | 2360 | nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; |
| 2360 | 2361 | ||
| 2361 | if (ip6_ins_rt(nrt)) | 2362 | if (ip6_ins_rt(nrt)) |
| @@ -2466,6 +2467,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net, | |||
| 2466 | .fc_dst_len = prefixlen, | 2467 | .fc_dst_len = prefixlen, |
| 2467 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | | 2468 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | |
| 2468 | RTF_UP | RTF_PREF(pref), | 2469 | RTF_UP | RTF_PREF(pref), |
| 2470 | .fc_protocol = RTPROT_RA, | ||
| 2469 | .fc_nlinfo.portid = 0, | 2471 | .fc_nlinfo.portid = 0, |
| 2470 | .fc_nlinfo.nlh = NULL, | 2472 | .fc_nlinfo.nlh = NULL, |
| 2471 | .fc_nlinfo.nl_net = net, | 2473 | .fc_nlinfo.nl_net = net, |
| @@ -2518,6 +2520,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, | |||
| 2518 | .fc_ifindex = dev->ifindex, | 2520 | .fc_ifindex = dev->ifindex, |
| 2519 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | | 2521 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | |
| 2520 | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), | 2522 | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), |
| 2523 | .fc_protocol = RTPROT_RA, | ||
| 2521 | .fc_nlinfo.portid = 0, | 2524 | .fc_nlinfo.portid = 0, |
| 2522 | .fc_nlinfo.nlh = NULL, | 2525 | .fc_nlinfo.nlh = NULL, |
| 2523 | .fc_nlinfo.nl_net = dev_net(dev), | 2526 | .fc_nlinfo.nl_net = dev_net(dev), |
| @@ -3432,14 +3435,6 @@ static int rt6_fill_node(struct net *net, | |||
| 3432 | rtm->rtm_flags = 0; | 3435 | rtm->rtm_flags = 0; |
| 3433 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | 3436 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; |
| 3434 | rtm->rtm_protocol = rt->rt6i_protocol; | 3437 | rtm->rtm_protocol = rt->rt6i_protocol; |
| 3435 | if (rt->rt6i_flags & RTF_DYNAMIC) | ||
| 3436 | rtm->rtm_protocol = RTPROT_REDIRECT; | ||
| 3437 | else if (rt->rt6i_flags & RTF_ADDRCONF) { | ||
| 3438 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) | ||
| 3439 | rtm->rtm_protocol = RTPROT_RA; | ||
| 3440 | else | ||
| 3441 | rtm->rtm_protocol = RTPROT_KERNEL; | ||
| 3442 | } | ||
| 3443 | 3438 | ||
| 3444 | if (rt->rt6i_flags & RTF_CACHE) | 3439 | if (rt->rt6i_flags & RTF_CACHE) |
| 3445 | rtm->rtm_flags |= RTM_F_CLONED; | 3440 | rtm->rtm_flags |= RTM_F_CLONED; |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index e10624aa6959..9722bf839d9d 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
| @@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, | |||
| 1015 | if (rds_ib_ring_empty(&ic->i_recv_ring)) | 1015 | if (rds_ib_ring_empty(&ic->i_recv_ring)) |
| 1016 | rds_ib_stats_inc(s_ib_rx_ring_empty); | 1016 | rds_ib_stats_inc(s_ib_rx_ring_empty); |
| 1017 | 1017 | ||
| 1018 | if (rds_ib_ring_low(&ic->i_recv_ring)) | 1018 | if (rds_ib_ring_low(&ic->i_recv_ring)) { |
| 1019 | rds_ib_recv_refill(conn, 0, GFP_NOWAIT); | 1019 | rds_ib_recv_refill(conn, 0, GFP_NOWAIT); |
| 1020 | rds_ib_stats_inc(s_ib_rx_refill_from_cq); | ||
| 1021 | } | ||
| 1020 | } | 1022 | } |
| 1021 | 1023 | ||
| 1022 | int rds_ib_recv_path(struct rds_conn_path *cp) | 1024 | int rds_ib_recv_path(struct rds_conn_path *cp) |
| @@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp) | |||
| 1029 | if (rds_conn_up(conn)) { | 1031 | if (rds_conn_up(conn)) { |
| 1030 | rds_ib_attempt_ack(ic); | 1032 | rds_ib_attempt_ack(ic); |
| 1031 | rds_ib_recv_refill(conn, 0, GFP_KERNEL); | 1033 | rds_ib_recv_refill(conn, 0, GFP_KERNEL); |
| 1034 | rds_ib_stats_inc(s_ib_rx_refill_from_thread); | ||
| 1032 | } | 1035 | } |
| 1033 | 1036 | ||
| 1034 | return ret; | 1037 | return ret; |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 36f0ced9e60c..94ba5cfab860 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
| @@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops; | |||
| 36 | static unsigned int xt_net_id; | 36 | static unsigned int xt_net_id; |
| 37 | static struct tc_action_ops act_xt_ops; | 37 | static struct tc_action_ops act_xt_ops; |
| 38 | 38 | ||
| 39 | static int ipt_init_target(struct xt_entry_target *t, char *table, | 39 | static int ipt_init_target(struct net *net, struct xt_entry_target *t, |
| 40 | unsigned int hook) | 40 | char *table, unsigned int hook) |
| 41 | { | 41 | { |
| 42 | struct xt_tgchk_param par; | 42 | struct xt_tgchk_param par; |
| 43 | struct xt_target *target; | 43 | struct xt_target *target; |
| @@ -49,6 +49,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, | |||
| 49 | return PTR_ERR(target); | 49 | return PTR_ERR(target); |
| 50 | 50 | ||
| 51 | t->u.kernel.target = target; | 51 | t->u.kernel.target = target; |
| 52 | par.net = net; | ||
| 52 | par.table = table; | 53 | par.table = table; |
| 53 | par.entryinfo = NULL; | 54 | par.entryinfo = NULL; |
| 54 | par.target = target; | 55 | par.target = target; |
| @@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { | |||
| 91 | [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, | 92 | [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, |
| 92 | }; | 93 | }; |
| 93 | 94 | ||
| 94 | static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, | 95 | static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, |
| 95 | struct nlattr *est, struct tc_action **a, | 96 | struct nlattr *est, struct tc_action **a, |
| 96 | const struct tc_action_ops *ops, int ovr, int bind) | 97 | const struct tc_action_ops *ops, int ovr, int bind) |
| 97 | { | 98 | { |
| 99 | struct tc_action_net *tn = net_generic(net, id); | ||
| 98 | struct nlattr *tb[TCA_IPT_MAX + 1]; | 100 | struct nlattr *tb[TCA_IPT_MAX + 1]; |
| 99 | struct tcf_ipt *ipt; | 101 | struct tcf_ipt *ipt; |
| 100 | struct xt_entry_target *td, *t; | 102 | struct xt_entry_target *td, *t; |
| @@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, | |||
| 159 | if (unlikely(!t)) | 161 | if (unlikely(!t)) |
| 160 | goto err2; | 162 | goto err2; |
| 161 | 163 | ||
| 162 | err = ipt_init_target(t, tname, hook); | 164 | err = ipt_init_target(net, t, tname, hook); |
| 163 | if (err < 0) | 165 | if (err < 0) |
| 164 | goto err3; | 166 | goto err3; |
| 165 | 167 | ||
| @@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, | |||
| 193 | struct nlattr *est, struct tc_action **a, int ovr, | 195 | struct nlattr *est, struct tc_action **a, int ovr, |
| 194 | int bind) | 196 | int bind) |
| 195 | { | 197 | { |
| 196 | struct tc_action_net *tn = net_generic(net, ipt_net_id); | 198 | return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, |
| 197 | 199 | bind); | |
| 198 | return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind); | ||
| 199 | } | 200 | } |
| 200 | 201 | ||
| 201 | static int tcf_xt_init(struct net *net, struct nlattr *nla, | 202 | static int tcf_xt_init(struct net *net, struct nlattr *nla, |
| 202 | struct nlattr *est, struct tc_action **a, int ovr, | 203 | struct nlattr *est, struct tc_action **a, int ovr, |
| 203 | int bind) | 204 | int bind) |
| 204 | { | 205 | { |
| 205 | struct tc_action_net *tn = net_generic(net, xt_net_id); | 206 | return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, |
| 206 | 207 | bind); | |
| 207 | return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind); | ||
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, | 210 | static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, |
