diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-26 11:43:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-26 11:43:05 -0400 |
commit | ec3b67c11df42362ccda81261d62829042f223f0 (patch) | |
tree | ab66bc43d98e38eda7162f76208993b2280f88f3 /net/ipv4 | |
parent | e868171a94b637158a3930c9adfb448d0df163cd (diff) | |
parent | 4be2700fb7b95f2a7cef9324879cafccab8774fc (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits)
[NetLabel]: correct usage of RCU locking
[TCP]: fix D-SACK cwnd handling
[NET] napi: use non-interruptible sleep in napi_disable
[SCTP] net/sctp/auth.c: make 3 functions static
[TCP]: Add missing I/O AT code to ipv6 side.
[SCTP]: #if 0 sctp_update_copy_cksum()
[INET]: Unexport icmpmsg_statistics
[NET]: Unexport sock_enable_timestamp().
[TCP]: Make tcp_match_skb_to_sack() static.
[IRDA]: Make ircomm_tty static.
[NET] fs/proc/proc_net.c: make a struct static
[NET] dev_change_name: ignore changes to same name
[NET]: Document some simple rules for actions
[NET_CLS_ACT]: Use skb_act_clone
[NET_CLS_ACT]: Introduce skb_act_clone
[TCP]: Fix scatterlist handling in MD5 signature support.
[IPSEC]: Fix scatterlist handling in skb_icv_walk().
[IPSEC]: Add missing sg_init_table() calls to ESP.
[CRYPTO]: Initialize TCRYPT on-stack scatterlist objects correctly.
[CRYPTO]: HMAC needs some more scatterlist fixups.
...
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/cipso_ipv4.c | 39 | ||||
-rw-r--r-- | net/ipv4/esp4.c | 2 | ||||
-rw-r--r-- | net/ipv4/icmp.c | 1 | ||||
-rw-r--r-- | net/ipv4/proc.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 32 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 5 | ||||
-rw-r--r-- | net/ipv4/udp.c | 2 |
7 files changed, 33 insertions, 56 deletions
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 805a78e6ed55..f18e88bc86ec 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -504,22 +504,16 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) | |||
504 | INIT_RCU_HEAD(&doi_def->rcu); | 504 | INIT_RCU_HEAD(&doi_def->rcu); |
505 | INIT_LIST_HEAD(&doi_def->dom_list); | 505 | INIT_LIST_HEAD(&doi_def->dom_list); |
506 | 506 | ||
507 | rcu_read_lock(); | ||
508 | if (cipso_v4_doi_search(doi_def->doi) != NULL) | ||
509 | goto doi_add_failure_rlock; | ||
510 | spin_lock(&cipso_v4_doi_list_lock); | 507 | spin_lock(&cipso_v4_doi_list_lock); |
511 | if (cipso_v4_doi_search(doi_def->doi) != NULL) | 508 | if (cipso_v4_doi_search(doi_def->doi) != NULL) |
512 | goto doi_add_failure_slock; | 509 | goto doi_add_failure; |
513 | list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); | 510 | list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); |
514 | spin_unlock(&cipso_v4_doi_list_lock); | 511 | spin_unlock(&cipso_v4_doi_list_lock); |
515 | rcu_read_unlock(); | ||
516 | 512 | ||
517 | return 0; | 513 | return 0; |
518 | 514 | ||
519 | doi_add_failure_slock: | 515 | doi_add_failure: |
520 | spin_unlock(&cipso_v4_doi_list_lock); | 516 | spin_unlock(&cipso_v4_doi_list_lock); |
521 | doi_add_failure_rlock: | ||
522 | rcu_read_unlock(); | ||
523 | return -EEXIST; | 517 | return -EEXIST; |
524 | } | 518 | } |
525 | 519 | ||
@@ -543,29 +537,23 @@ int cipso_v4_doi_remove(u32 doi, | |||
543 | struct cipso_v4_doi *doi_def; | 537 | struct cipso_v4_doi *doi_def; |
544 | struct cipso_v4_domhsh_entry *dom_iter; | 538 | struct cipso_v4_domhsh_entry *dom_iter; |
545 | 539 | ||
546 | rcu_read_lock(); | 540 | spin_lock(&cipso_v4_doi_list_lock); |
547 | if (cipso_v4_doi_search(doi) != NULL) { | 541 | doi_def = cipso_v4_doi_search(doi); |
548 | spin_lock(&cipso_v4_doi_list_lock); | 542 | if (doi_def != NULL) { |
549 | doi_def = cipso_v4_doi_search(doi); | ||
550 | if (doi_def == NULL) { | ||
551 | spin_unlock(&cipso_v4_doi_list_lock); | ||
552 | rcu_read_unlock(); | ||
553 | return -ENOENT; | ||
554 | } | ||
555 | doi_def->valid = 0; | 543 | doi_def->valid = 0; |
556 | list_del_rcu(&doi_def->list); | 544 | list_del_rcu(&doi_def->list); |
557 | spin_unlock(&cipso_v4_doi_list_lock); | 545 | spin_unlock(&cipso_v4_doi_list_lock); |
546 | rcu_read_lock(); | ||
558 | list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) | 547 | list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) |
559 | if (dom_iter->valid) | 548 | if (dom_iter->valid) |
560 | netlbl_domhsh_remove(dom_iter->domain, | 549 | netlbl_domhsh_remove(dom_iter->domain, |
561 | audit_info); | 550 | audit_info); |
562 | cipso_v4_cache_invalidate(); | ||
563 | rcu_read_unlock(); | 551 | rcu_read_unlock(); |
564 | 552 | cipso_v4_cache_invalidate(); | |
565 | call_rcu(&doi_def->rcu, callback); | 553 | call_rcu(&doi_def->rcu, callback); |
566 | return 0; | 554 | return 0; |
567 | } | 555 | } |
568 | rcu_read_unlock(); | 556 | spin_unlock(&cipso_v4_doi_list_lock); |
569 | 557 | ||
570 | return -ENOENT; | 558 | return -ENOENT; |
571 | } | 559 | } |
@@ -653,22 +641,19 @@ int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, const char *domain) | |||
653 | new_dom->valid = 1; | 641 | new_dom->valid = 1; |
654 | INIT_RCU_HEAD(&new_dom->rcu); | 642 | INIT_RCU_HEAD(&new_dom->rcu); |
655 | 643 | ||
656 | rcu_read_lock(); | ||
657 | spin_lock(&cipso_v4_doi_list_lock); | 644 | spin_lock(&cipso_v4_doi_list_lock); |
658 | list_for_each_entry_rcu(iter, &doi_def->dom_list, list) | 645 | list_for_each_entry(iter, &doi_def->dom_list, list) |
659 | if (iter->valid && | 646 | if (iter->valid && |
660 | ((domain != NULL && iter->domain != NULL && | 647 | ((domain != NULL && iter->domain != NULL && |
661 | strcmp(iter->domain, domain) == 0) || | 648 | strcmp(iter->domain, domain) == 0) || |
662 | (domain == NULL && iter->domain == NULL))) { | 649 | (domain == NULL && iter->domain == NULL))) { |
663 | spin_unlock(&cipso_v4_doi_list_lock); | 650 | spin_unlock(&cipso_v4_doi_list_lock); |
664 | rcu_read_unlock(); | ||
665 | kfree(new_dom->domain); | 651 | kfree(new_dom->domain); |
666 | kfree(new_dom); | 652 | kfree(new_dom); |
667 | return -EEXIST; | 653 | return -EEXIST; |
668 | } | 654 | } |
669 | list_add_tail_rcu(&new_dom->list, &doi_def->dom_list); | 655 | list_add_tail_rcu(&new_dom->list, &doi_def->dom_list); |
670 | spin_unlock(&cipso_v4_doi_list_lock); | 656 | spin_unlock(&cipso_v4_doi_list_lock); |
671 | rcu_read_unlock(); | ||
672 | 657 | ||
673 | return 0; | 658 | return 0; |
674 | } | 659 | } |
@@ -689,9 +674,8 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, | |||
689 | { | 674 | { |
690 | struct cipso_v4_domhsh_entry *iter; | 675 | struct cipso_v4_domhsh_entry *iter; |
691 | 676 | ||
692 | rcu_read_lock(); | ||
693 | spin_lock(&cipso_v4_doi_list_lock); | 677 | spin_lock(&cipso_v4_doi_list_lock); |
694 | list_for_each_entry_rcu(iter, &doi_def->dom_list, list) | 678 | list_for_each_entry(iter, &doi_def->dom_list, list) |
695 | if (iter->valid && | 679 | if (iter->valid && |
696 | ((domain != NULL && iter->domain != NULL && | 680 | ((domain != NULL && iter->domain != NULL && |
697 | strcmp(iter->domain, domain) == 0) || | 681 | strcmp(iter->domain, domain) == 0) || |
@@ -699,13 +683,10 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, | |||
699 | iter->valid = 0; | 683 | iter->valid = 0; |
700 | list_del_rcu(&iter->list); | 684 | list_del_rcu(&iter->list); |
701 | spin_unlock(&cipso_v4_doi_list_lock); | 685 | spin_unlock(&cipso_v4_doi_list_lock); |
702 | rcu_read_unlock(); | ||
703 | call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free); | 686 | call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free); |
704 | |||
705 | return 0; | 687 | return 0; |
706 | } | 688 | } |
707 | spin_unlock(&cipso_v4_doi_list_lock); | 689 | spin_unlock(&cipso_v4_doi_list_lock); |
708 | rcu_read_unlock(); | ||
709 | 690 | ||
710 | return -ENOENT; | 691 | return -ENOENT; |
711 | } | 692 | } |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 6b1a31a74cf2..ba9840195cf2 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -110,6 +110,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
110 | if (!sg) | 110 | if (!sg) |
111 | goto unlock; | 111 | goto unlock; |
112 | } | 112 | } |
113 | sg_init_table(sg, nfrags); | ||
113 | skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); | 114 | skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); |
114 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); | 115 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); |
115 | if (unlikely(sg != &esp->sgbuf[0])) | 116 | if (unlikely(sg != &esp->sgbuf[0])) |
@@ -201,6 +202,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
201 | if (!sg) | 202 | if (!sg) |
202 | goto out; | 203 | goto out; |
203 | } | 204 | } |
205 | sg_init_table(sg, nfrags); | ||
204 | skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); | 206 | skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); |
205 | err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); | 207 | err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); |
206 | if (unlikely(sg != &esp->sgbuf[0])) | 208 | if (unlikely(sg != &esp->sgbuf[0])) |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 272c69e106e9..233de0634298 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -1104,5 +1104,4 @@ void __init icmp_init(struct net_proto_family *ops) | |||
1104 | EXPORT_SYMBOL(icmp_err_convert); | 1104 | EXPORT_SYMBOL(icmp_err_convert); |
1105 | EXPORT_SYMBOL(icmp_send); | 1105 | EXPORT_SYMBOL(icmp_send); |
1106 | EXPORT_SYMBOL(icmp_statistics); | 1106 | EXPORT_SYMBOL(icmp_statistics); |
1107 | EXPORT_SYMBOL(icmpmsg_statistics); | ||
1108 | EXPORT_SYMBOL(xrlim_allow); | 1107 | EXPORT_SYMBOL(xrlim_allow); |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index fd16cb8f8abe..9be0daa9c0ec 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -121,14 +121,6 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { | |||
121 | SNMP_MIB_SENTINEL | 121 | SNMP_MIB_SENTINEL |
122 | }; | 122 | }; |
123 | 123 | ||
124 | static const struct snmp_mib snmp4_icmp_list[] = { | ||
125 | SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS), | ||
126 | SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS), | ||
127 | SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS), | ||
128 | SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS), | ||
129 | SNMP_MIB_SENTINEL | ||
130 | }; | ||
131 | |||
132 | static struct { | 124 | static struct { |
133 | char *name; | 125 | char *name; |
134 | int index; | 126 | int index; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3dbbb44b3e7d..69d8c38ccd39 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -103,7 +103,7 @@ int sysctl_tcp_abc __read_mostly; | |||
103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ | 103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ |
104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ | 104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ |
105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | 105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ |
106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ | 106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
107 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ | 107 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ |
108 | 108 | ||
109 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) | 109 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
@@ -866,7 +866,7 @@ static void tcp_disable_fack(struct tcp_sock *tp) | |||
866 | tp->rx_opt.sack_ok &= ~2; | 866 | tp->rx_opt.sack_ok &= ~2; |
867 | } | 867 | } |
868 | 868 | ||
869 | /* Take a notice that peer is sending DSACKs */ | 869 | /* Take a notice that peer is sending D-SACKs */ |
870 | static void tcp_dsack_seen(struct tcp_sock *tp) | 870 | static void tcp_dsack_seen(struct tcp_sock *tp) |
871 | { | 871 | { |
872 | tp->rx_opt.sack_ok |= 4; | 872 | tp->rx_opt.sack_ok |= 4; |
@@ -1058,7 +1058,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
1058 | * | 1058 | * |
1059 | * With D-SACK the lower bound is extended to cover sequence space below | 1059 | * With D-SACK the lower bound is extended to cover sequence space below |
1060 | * SND.UNA down to undo_marker, which is the last point of interest. Yet | 1060 | * SND.UNA down to undo_marker, which is the last point of interest. Yet |
1061 | * again, DSACK block must not to go across snd_una (for the same reason as | 1061 | * again, D-SACK block must not to go across snd_una (for the same reason as |
1062 | * for the normal SACK blocks, explained above). But there all simplicity | 1062 | * for the normal SACK blocks, explained above). But there all simplicity |
1063 | * ends, TCP might receive valid D-SACKs below that. As long as they reside | 1063 | * ends, TCP might receive valid D-SACKs below that. As long as they reside |
1064 | * fully below undo_marker they do not affect behavior in anyway and can | 1064 | * fully below undo_marker they do not affect behavior in anyway and can |
@@ -1080,7 +1080,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, | |||
1080 | if (!before(start_seq, tp->snd_nxt)) | 1080 | if (!before(start_seq, tp->snd_nxt)) |
1081 | return 0; | 1081 | return 0; |
1082 | 1082 | ||
1083 | /* In outstanding window? ...This is valid exit for DSACKs too. | 1083 | /* In outstanding window? ...This is valid exit for D-SACKs too. |
1084 | * start_seq == snd_una is non-sensical (see comments above) | 1084 | * start_seq == snd_una is non-sensical (see comments above) |
1085 | */ | 1085 | */ |
1086 | if (after(start_seq, tp->snd_una)) | 1086 | if (after(start_seq, tp->snd_una)) |
@@ -1204,8 +1204,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | |||
1204 | * which may fail and creates some hassle (caller must handle error case | 1204 | * which may fail and creates some hassle (caller must handle error case |
1205 | * returns). | 1205 | * returns). |
1206 | */ | 1206 | */ |
1207 | int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, | 1207 | static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, |
1208 | u32 start_seq, u32 end_seq) | 1208 | u32 start_seq, u32 end_seq) |
1209 | { | 1209 | { |
1210 | int in_sack, err; | 1210 | int in_sack, err; |
1211 | unsigned int pkt_len; | 1211 | unsigned int pkt_len; |
@@ -1248,6 +1248,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1248 | int cached_fack_count; | 1248 | int cached_fack_count; |
1249 | int i; | 1249 | int i; |
1250 | int first_sack_index; | 1250 | int first_sack_index; |
1251 | int force_one_sack; | ||
1251 | 1252 | ||
1252 | if (!tp->sacked_out) { | 1253 | if (!tp->sacked_out) { |
1253 | if (WARN_ON(tp->fackets_out)) | 1254 | if (WARN_ON(tp->fackets_out)) |
@@ -1272,18 +1273,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1272 | * if the only SACK change is the increase of the end_seq of | 1273 | * if the only SACK change is the increase of the end_seq of |
1273 | * the first block then only apply that SACK block | 1274 | * the first block then only apply that SACK block |
1274 | * and use retrans queue hinting otherwise slowpath */ | 1275 | * and use retrans queue hinting otherwise slowpath */ |
1275 | flag = 1; | 1276 | force_one_sack = 1; |
1276 | for (i = 0; i < num_sacks; i++) { | 1277 | for (i = 0; i < num_sacks; i++) { |
1277 | __be32 start_seq = sp[i].start_seq; | 1278 | __be32 start_seq = sp[i].start_seq; |
1278 | __be32 end_seq = sp[i].end_seq; | 1279 | __be32 end_seq = sp[i].end_seq; |
1279 | 1280 | ||
1280 | if (i == 0) { | 1281 | if (i == 0) { |
1281 | if (tp->recv_sack_cache[i].start_seq != start_seq) | 1282 | if (tp->recv_sack_cache[i].start_seq != start_seq) |
1282 | flag = 0; | 1283 | force_one_sack = 0; |
1283 | } else { | 1284 | } else { |
1284 | if ((tp->recv_sack_cache[i].start_seq != start_seq) || | 1285 | if ((tp->recv_sack_cache[i].start_seq != start_seq) || |
1285 | (tp->recv_sack_cache[i].end_seq != end_seq)) | 1286 | (tp->recv_sack_cache[i].end_seq != end_seq)) |
1286 | flag = 0; | 1287 | force_one_sack = 0; |
1287 | } | 1288 | } |
1288 | tp->recv_sack_cache[i].start_seq = start_seq; | 1289 | tp->recv_sack_cache[i].start_seq = start_seq; |
1289 | tp->recv_sack_cache[i].end_seq = end_seq; | 1290 | tp->recv_sack_cache[i].end_seq = end_seq; |
@@ -1295,7 +1296,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1295 | } | 1296 | } |
1296 | 1297 | ||
1297 | first_sack_index = 0; | 1298 | first_sack_index = 0; |
1298 | if (flag) | 1299 | if (force_one_sack) |
1299 | num_sacks = 1; | 1300 | num_sacks = 1; |
1300 | else { | 1301 | else { |
1301 | int j; | 1302 | int j; |
@@ -1321,9 +1322,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1321 | } | 1322 | } |
1322 | } | 1323 | } |
1323 | 1324 | ||
1324 | /* clear flag as used for different purpose in following code */ | ||
1325 | flag = 0; | ||
1326 | |||
1327 | /* Use SACK fastpath hint if valid */ | 1325 | /* Use SACK fastpath hint if valid */ |
1328 | cached_skb = tp->fastpath_skb_hint; | 1326 | cached_skb = tp->fastpath_skb_hint; |
1329 | cached_fack_count = tp->fastpath_cnt_hint; | 1327 | cached_fack_count = tp->fastpath_cnt_hint; |
@@ -1615,7 +1613,7 @@ void tcp_enter_frto(struct sock *sk) | |||
1615 | !icsk->icsk_retransmits)) { | 1613 | !icsk->icsk_retransmits)) { |
1616 | tp->prior_ssthresh = tcp_current_ssthresh(sk); | 1614 | tp->prior_ssthresh = tcp_current_ssthresh(sk); |
1617 | /* Our state is too optimistic in ssthresh() call because cwnd | 1615 | /* Our state is too optimistic in ssthresh() call because cwnd |
1618 | * is not reduced until tcp_enter_frto_loss() when previous FRTO | 1616 | * is not reduced until tcp_enter_frto_loss() when previous F-RTO |
1619 | * recovery has not yet completed. Pattern would be this: RTO, | 1617 | * recovery has not yet completed. Pattern would be this: RTO, |
1620 | * Cumulative ACK, RTO (2xRTO for the same segment does not end | 1618 | * Cumulative ACK, RTO (2xRTO for the same segment does not end |
1621 | * up here twice). | 1619 | * up here twice). |
@@ -1801,7 +1799,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1801 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1799 | tcp_set_ca_state(sk, TCP_CA_Loss); |
1802 | tp->high_seq = tp->snd_nxt; | 1800 | tp->high_seq = tp->snd_nxt; |
1803 | TCP_ECN_queue_cwr(tp); | 1801 | TCP_ECN_queue_cwr(tp); |
1804 | /* Abort FRTO algorithm if one is in progress */ | 1802 | /* Abort F-RTO algorithm if one is in progress */ |
1805 | tp->frto_counter = 0; | 1803 | tp->frto_counter = 0; |
1806 | } | 1804 | } |
1807 | 1805 | ||
@@ -1946,7 +1944,7 @@ static int tcp_time_to_recover(struct sock *sk) | |||
1946 | struct tcp_sock *tp = tcp_sk(sk); | 1944 | struct tcp_sock *tp = tcp_sk(sk); |
1947 | __u32 packets_out; | 1945 | __u32 packets_out; |
1948 | 1946 | ||
1949 | /* Do not perform any recovery during FRTO algorithm */ | 1947 | /* Do not perform any recovery during F-RTO algorithm */ |
1950 | if (tp->frto_counter) | 1948 | if (tp->frto_counter) |
1951 | return 0; | 1949 | return 0; |
1952 | 1950 | ||
@@ -2962,7 +2960,7 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
2962 | } | 2960 | } |
2963 | 2961 | ||
2964 | if (tp->frto_counter == 1) { | 2962 | if (tp->frto_counter == 1) { |
2965 | /* Sending of the next skb must be allowed or no FRTO */ | 2963 | /* Sending of the next skb must be allowed or no F-RTO */ |
2966 | if (!tcp_send_head(sk) || | 2964 | if (!tcp_send_head(sk) || |
2967 | after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, | 2965 | after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, |
2968 | tp->snd_una + tp->snd_wnd)) { | 2966 | tp->snd_una + tp->snd_wnd)) { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 38cf73a56731..ad759f1c3777 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1055,6 +1055,9 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
1055 | bp->pad = 0; | 1055 | bp->pad = 0; |
1056 | bp->protocol = protocol; | 1056 | bp->protocol = protocol; |
1057 | bp->len = htons(tcplen); | 1057 | bp->len = htons(tcplen); |
1058 | |||
1059 | sg_init_table(sg, 4); | ||
1060 | |||
1058 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | 1061 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); |
1059 | nbytes += sizeof(*bp); | 1062 | nbytes += sizeof(*bp); |
1060 | 1063 | ||
@@ -1080,6 +1083,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
1080 | sg_set_buf(&sg[block++], key->key, key->keylen); | 1083 | sg_set_buf(&sg[block++], key->key, key->keylen); |
1081 | nbytes += key->keylen; | 1084 | nbytes += key->keylen; |
1082 | 1085 | ||
1086 | sg_mark_end(sg, block); | ||
1087 | |||
1083 | /* Now store the Hash into the packet */ | 1088 | /* Now store the Hash into the packet */ |
1084 | err = crypto_hash_init(desc); | 1089 | err = crypto_hash_init(desc); |
1085 | if (err) | 1090 | if (err) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 35d2b0e9e10b..4bc25b46f33f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1152,7 +1152,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1152 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); | 1152 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); |
1153 | 1153 | ||
1154 | sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, | 1154 | sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, |
1155 | skb->dev->ifindex, udptable ); | 1155 | inet_iif(skb), udptable); |
1156 | 1156 | ||
1157 | if (sk != NULL) { | 1157 | if (sk != NULL) { |
1158 | int ret = udp_queue_rcv_skb(sk, skb); | 1158 | int ret = udp_queue_rcv_skb(sk, skb); |