diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/decnet/af_decnet.c | 40 | ||||
-rw-r--r-- | net/decnet/dn_nsp_out.c | 63 | ||||
-rw-r--r-- | net/ipv4/ah4.c | 18 | ||||
-rw-r--r-- | net/ipv4/esp4.c | 24 | ||||
-rw-r--r-- | net/ipv4/ipcomp.c | 3 | ||||
-rw-r--r-- | net/ipv4/netfilter/ipt_CLUSTERIP.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 17 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 36 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 55 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 6 | ||||
-rw-r--r-- | net/ipv6/ah6.c | 18 | ||||
-rw-r--r-- | net/ipv6/esp6.c | 24 | ||||
-rw-r--r-- | net/ipv6/icmp.c | 2 | ||||
-rw-r--r-- | net/ipv6/ipcomp6.c | 3 | ||||
-rw-r--r-- | net/ipv6/raw.c | 4 | ||||
-rw-r--r-- | net/sctp/endpointola.c | 3 | ||||
-rw-r--r-- | net/sctp/socket.c | 3 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_crypto.c | 5 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_mech.c | 9 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_mech.c | 12 |
20 files changed, 134 insertions, 213 deletions
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 621680f127af..348f36b529f7 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -1876,8 +1876,27 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags) | |||
1876 | return mss_now; | 1876 | return mss_now; |
1877 | } | 1877 | } |
1878 | 1878 | ||
1879 | /* | ||
1880 | * N.B. We get the timeout wrong here, but then we always did get it | ||
1881 | * wrong before and this is another step along the road to correcting | ||
1882 | * it. It ought to get updated each time we pass through the routine, | ||
1883 | * but in practise it probably doesn't matter too much for now. | ||
1884 | */ | ||
1885 | static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk, | ||
1886 | unsigned long datalen, int noblock, | ||
1887 | int *errcode) | ||
1888 | { | ||
1889 | struct sk_buff *skb = sock_alloc_send_skb(sk, datalen, | ||
1890 | noblock, errcode); | ||
1891 | if (skb) { | ||
1892 | skb->protocol = __constant_htons(ETH_P_DNA_RT); | ||
1893 | skb->pkt_type = PACKET_OUTGOING; | ||
1894 | } | ||
1895 | return skb; | ||
1896 | } | ||
1897 | |||
1879 | static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | 1898 | static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, |
1880 | struct msghdr *msg, size_t size) | 1899 | struct msghdr *msg, size_t size) |
1881 | { | 1900 | { |
1882 | struct sock *sk = sock->sk; | 1901 | struct sock *sk = sock->sk; |
1883 | struct dn_scp *scp = DN_SK(sk); | 1902 | struct dn_scp *scp = DN_SK(sk); |
@@ -1892,7 +1911,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1892 | struct dn_skb_cb *cb; | 1911 | struct dn_skb_cb *cb; |
1893 | size_t len; | 1912 | size_t len; |
1894 | unsigned char fctype; | 1913 | unsigned char fctype; |
1895 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | 1914 | long timeo; |
1896 | 1915 | ||
1897 | if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT)) | 1916 | if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT)) |
1898 | return -EOPNOTSUPP; | 1917 | return -EOPNOTSUPP; |
@@ -1900,18 +1919,21 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1900 | if (addr_len && (addr_len != sizeof(struct sockaddr_dn))) | 1919 | if (addr_len && (addr_len != sizeof(struct sockaddr_dn))) |
1901 | return -EINVAL; | 1920 | return -EINVAL; |
1902 | 1921 | ||
1922 | lock_sock(sk); | ||
1923 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | ||
1903 | /* | 1924 | /* |
1904 | * The only difference between stream sockets and sequenced packet | 1925 | * The only difference between stream sockets and sequenced packet |
1905 | * sockets is that the stream sockets always behave as if MSG_EOR | 1926 | * sockets is that the stream sockets always behave as if MSG_EOR |
1906 | * has been set. | 1927 | * has been set. |
1907 | */ | 1928 | */ |
1908 | if (sock->type == SOCK_STREAM) { | 1929 | if (sock->type == SOCK_STREAM) { |
1909 | if (flags & MSG_EOR) | 1930 | if (flags & MSG_EOR) { |
1910 | return -EINVAL; | 1931 | err = -EINVAL; |
1932 | goto out; | ||
1933 | } | ||
1911 | flags |= MSG_EOR; | 1934 | flags |= MSG_EOR; |
1912 | } | 1935 | } |
1913 | 1936 | ||
1914 | lock_sock(sk); | ||
1915 | 1937 | ||
1916 | err = dn_check_state(sk, addr, addr_len, &timeo, flags); | 1938 | err = dn_check_state(sk, addr, addr_len, &timeo, flags); |
1917 | if (err) | 1939 | if (err) |
@@ -1980,8 +2002,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1980 | 2002 | ||
1981 | /* | 2003 | /* |
1982 | * Get a suitably sized skb. | 2004 | * Get a suitably sized skb. |
2005 | * 64 is a bit of a hack really, but its larger than any | ||
2006 | * link-layer headers and has served us well as a good | ||
2007 | * guess as to their real length. | ||
1983 | */ | 2008 | */ |
1984 | skb = dn_alloc_send_skb(sk, &len, flags & MSG_DONTWAIT, timeo, &err); | 2009 | skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER, |
2010 | flags & MSG_DONTWAIT, &err); | ||
1985 | 2011 | ||
1986 | if (err) | 2012 | if (err) |
1987 | break; | 2013 | break; |
@@ -1991,7 +2017,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1991 | 2017 | ||
1992 | cb = DN_SKB_CB(skb); | 2018 | cb = DN_SKB_CB(skb); |
1993 | 2019 | ||
1994 | skb_reserve(skb, DN_MAX_NSP_DATA_HEADER); | 2020 | skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER); |
1995 | 2021 | ||
1996 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { | 2022 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { |
1997 | err = -EFAULT; | 2023 | err = -EFAULT; |
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index e0bebf4bbcad..53633d352868 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c | |||
@@ -137,69 +137,6 @@ struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri) | |||
137 | } | 137 | } |
138 | 138 | ||
139 | /* | 139 | /* |
140 | * Wrapper for the above, for allocs of data skbs. We try and get the | ||
141 | * whole size thats been asked for (plus 11 bytes of header). If this | ||
142 | * fails, then we try for any size over 16 bytes for SOCK_STREAMS. | ||
143 | */ | ||
144 | struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err) | ||
145 | { | ||
146 | int space; | ||
147 | int len; | ||
148 | struct sk_buff *skb = NULL; | ||
149 | |||
150 | *err = 0; | ||
151 | |||
152 | while(skb == NULL) { | ||
153 | if (signal_pending(current)) { | ||
154 | *err = sock_intr_errno(timeo); | ||
155 | break; | ||
156 | } | ||
157 | |||
158 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | ||
159 | *err = EINVAL; | ||
160 | break; | ||
161 | } | ||
162 | |||
163 | if (sk->sk_err) | ||
164 | break; | ||
165 | |||
166 | len = *size + 11; | ||
167 | space = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | ||
168 | |||
169 | if (space < len) { | ||
170 | if ((sk->sk_socket->type == SOCK_STREAM) && | ||
171 | (space >= (16 + 11))) | ||
172 | len = space; | ||
173 | } | ||
174 | |||
175 | if (space < len) { | ||
176 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | ||
177 | if (noblock) { | ||
178 | *err = EWOULDBLOCK; | ||
179 | break; | ||
180 | } | ||
181 | |||
182 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | ||
183 | SOCK_SLEEP_PRE(sk) | ||
184 | |||
185 | if ((sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc)) < | ||
186 | len) | ||
187 | schedule(); | ||
188 | |||
189 | SOCK_SLEEP_POST(sk) | ||
190 | continue; | ||
191 | } | ||
192 | |||
193 | if ((skb = dn_alloc_skb(sk, len, sk->sk_allocation)) == NULL) | ||
194 | continue; | ||
195 | |||
196 | *size = len - 11; | ||
197 | } | ||
198 | |||
199 | return skb; | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Calculate persist timer based upon the smoothed round | 140 | * Calculate persist timer based upon the smoothed round |
204 | * trip time and the variance. Backoff according to the | 141 | * trip time and the variance. Backoff according to the |
205 | * nsp_backoff[] array. | 142 | * nsp_backoff[] array. |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 514c85b2631a..035ad2c9e1ba 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -263,10 +263,8 @@ static int ah_init_state(struct xfrm_state *x) | |||
263 | 263 | ||
264 | error: | 264 | error: |
265 | if (ahp) { | 265 | if (ahp) { |
266 | if (ahp->work_icv) | 266 | kfree(ahp->work_icv); |
267 | kfree(ahp->work_icv); | 267 | crypto_free_tfm(ahp->tfm); |
268 | if (ahp->tfm) | ||
269 | crypto_free_tfm(ahp->tfm); | ||
270 | kfree(ahp); | 268 | kfree(ahp); |
271 | } | 269 | } |
272 | return -EINVAL; | 270 | return -EINVAL; |
@@ -279,14 +277,10 @@ static void ah_destroy(struct xfrm_state *x) | |||
279 | if (!ahp) | 277 | if (!ahp) |
280 | return; | 278 | return; |
281 | 279 | ||
282 | if (ahp->work_icv) { | 280 | kfree(ahp->work_icv); |
283 | kfree(ahp->work_icv); | 281 | ahp->work_icv = NULL; |
284 | ahp->work_icv = NULL; | 282 | crypto_free_tfm(ahp->tfm); |
285 | } | 283 | ahp->tfm = NULL; |
286 | if (ahp->tfm) { | ||
287 | crypto_free_tfm(ahp->tfm); | ||
288 | ahp->tfm = NULL; | ||
289 | } | ||
290 | kfree(ahp); | 284 | kfree(ahp); |
291 | } | 285 | } |
292 | 286 | ||
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index b31ffc5053d2..1b5a09d1b90b 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -343,22 +343,14 @@ static void esp_destroy(struct xfrm_state *x) | |||
343 | if (!esp) | 343 | if (!esp) |
344 | return; | 344 | return; |
345 | 345 | ||
346 | if (esp->conf.tfm) { | 346 | crypto_free_tfm(esp->conf.tfm); |
347 | crypto_free_tfm(esp->conf.tfm); | 347 | esp->conf.tfm = NULL; |
348 | esp->conf.tfm = NULL; | 348 | kfree(esp->conf.ivec); |
349 | } | 349 | esp->conf.ivec = NULL; |
350 | if (esp->conf.ivec) { | 350 | crypto_free_tfm(esp->auth.tfm); |
351 | kfree(esp->conf.ivec); | 351 | esp->auth.tfm = NULL; |
352 | esp->conf.ivec = NULL; | 352 | kfree(esp->auth.work_icv); |
353 | } | 353 | esp->auth.work_icv = NULL; |
354 | if (esp->auth.tfm) { | ||
355 | crypto_free_tfm(esp->auth.tfm); | ||
356 | esp->auth.tfm = NULL; | ||
357 | } | ||
358 | if (esp->auth.work_icv) { | ||
359 | kfree(esp->auth.work_icv); | ||
360 | esp->auth.work_icv = NULL; | ||
361 | } | ||
362 | kfree(esp); | 354 | kfree(esp); |
363 | } | 355 | } |
364 | 356 | ||
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index dcb7ee6c4858..fc718df17b40 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -345,8 +345,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms) | |||
345 | 345 | ||
346 | for_each_cpu(cpu) { | 346 | for_each_cpu(cpu) { |
347 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); | 347 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); |
348 | if (tfm) | 348 | crypto_free_tfm(tfm); |
349 | crypto_free_tfm(tfm); | ||
350 | } | 349 | } |
351 | free_percpu(tfms); | 350 | free_percpu(tfms); |
352 | } | 351 | } |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 2d05cafec221..7d38913754b1 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -144,7 +144,7 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip, | |||
144 | memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); | 144 | memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); |
145 | c->num_total_nodes = i->num_total_nodes; | 145 | c->num_total_nodes = i->num_total_nodes; |
146 | c->num_local_nodes = i->num_local_nodes; | 146 | c->num_local_nodes = i->num_local_nodes; |
147 | memcpy(&c->local_nodes, &i->local_nodes, sizeof(&c->local_nodes)); | 147 | memcpy(&c->local_nodes, &i->local_nodes, sizeof(c->local_nodes)); |
148 | c->hash_mode = i->hash_mode; | 148 | c->hash_mode = i->hash_mode; |
149 | c->hash_initval = i->hash_initval; | 149 | c->hash_initval = i->hash_initval; |
150 | atomic_set(&c->refcount, 1); | 150 | atomic_set(&c->refcount, 1); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 02fdda68718d..cbcc9fc47783 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -552,8 +552,7 @@ new_segment: | |||
552 | tcp_mark_push(tp, skb); | 552 | tcp_mark_push(tp, skb); |
553 | goto new_segment; | 553 | goto new_segment; |
554 | } | 554 | } |
555 | if (sk->sk_forward_alloc < copy && | 555 | if (!sk_stream_wmem_schedule(sk, copy)) |
556 | !sk_stream_mem_schedule(sk, copy, 0)) | ||
557 | goto wait_for_memory; | 556 | goto wait_for_memory; |
558 | 557 | ||
559 | if (can_coalesce) { | 558 | if (can_coalesce) { |
@@ -770,19 +769,23 @@ new_segment: | |||
770 | if (off == PAGE_SIZE) { | 769 | if (off == PAGE_SIZE) { |
771 | put_page(page); | 770 | put_page(page); |
772 | TCP_PAGE(sk) = page = NULL; | 771 | TCP_PAGE(sk) = page = NULL; |
772 | TCP_OFF(sk) = off = 0; | ||
773 | } | 773 | } |
774 | } | 774 | } else |
775 | BUG_ON(off); | ||
776 | |||
777 | if (copy > PAGE_SIZE - off) | ||
778 | copy = PAGE_SIZE - off; | ||
779 | |||
780 | if (!sk_stream_wmem_schedule(sk, copy)) | ||
781 | goto wait_for_memory; | ||
775 | 782 | ||
776 | if (!page) { | 783 | if (!page) { |
777 | /* Allocate new cache page. */ | 784 | /* Allocate new cache page. */ |
778 | if (!(page = sk_stream_alloc_page(sk))) | 785 | if (!(page = sk_stream_alloc_page(sk))) |
779 | goto wait_for_memory; | 786 | goto wait_for_memory; |
780 | off = 0; | ||
781 | } | 787 | } |
782 | 788 | ||
783 | if (copy > PAGE_SIZE - off) | ||
784 | copy = PAGE_SIZE - off; | ||
785 | |||
786 | /* Time to copy data. We are close to | 789 | /* Time to copy data. We are close to |
787 | * the end! */ | 790 | * the end! */ |
788 | err = skb_copy_to_page(sk, from, skb, page, | 791 | err = skb_copy_to_page(sk, from, skb, page, |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1afb080bdf0c..29222b964951 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -923,14 +923,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
923 | int flag = 0; | 923 | int flag = 0; |
924 | int i; | 924 | int i; |
925 | 925 | ||
926 | /* So, SACKs for already sent large segments will be lost. | ||
927 | * Not good, but alternative is to resegment the queue. */ | ||
928 | if (sk->sk_route_caps & NETIF_F_TSO) { | ||
929 | sk->sk_route_caps &= ~NETIF_F_TSO; | ||
930 | sock_set_flag(sk, SOCK_NO_LARGESEND); | ||
931 | tp->mss_cache = tp->mss_cache; | ||
932 | } | ||
933 | |||
934 | if (!tp->sacked_out) | 926 | if (!tp->sacked_out) |
935 | tp->fackets_out = 0; | 927 | tp->fackets_out = 0; |
936 | prior_fackets = tp->fackets_out; | 928 | prior_fackets = tp->fackets_out; |
@@ -978,20 +970,40 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
978 | flag |= FLAG_DATA_LOST; | 970 | flag |= FLAG_DATA_LOST; |
979 | 971 | ||
980 | sk_stream_for_retrans_queue(skb, sk) { | 972 | sk_stream_for_retrans_queue(skb, sk) { |
981 | u8 sacked = TCP_SKB_CB(skb)->sacked; | 973 | int in_sack, pcount; |
982 | int in_sack; | 974 | u8 sacked; |
983 | 975 | ||
984 | /* The retransmission queue is always in order, so | 976 | /* The retransmission queue is always in order, so |
985 | * we can short-circuit the walk early. | 977 | * we can short-circuit the walk early. |
986 | */ | 978 | */ |
987 | if(!before(TCP_SKB_CB(skb)->seq, end_seq)) | 979 | if (!before(TCP_SKB_CB(skb)->seq, end_seq)) |
988 | break; | 980 | break; |
989 | 981 | ||
990 | fack_count += tcp_skb_pcount(skb); | 982 | pcount = tcp_skb_pcount(skb); |
983 | |||
984 | if (pcount > 1 && | ||
985 | (after(start_seq, TCP_SKB_CB(skb)->seq) || | ||
986 | before(end_seq, TCP_SKB_CB(skb)->end_seq))) { | ||
987 | unsigned int pkt_len; | ||
988 | |||
989 | if (after(start_seq, TCP_SKB_CB(skb)->seq)) | ||
990 | pkt_len = (start_seq - | ||
991 | TCP_SKB_CB(skb)->seq); | ||
992 | else | ||
993 | pkt_len = (end_seq - | ||
994 | TCP_SKB_CB(skb)->seq); | ||
995 | if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size)) | ||
996 | break; | ||
997 | pcount = tcp_skb_pcount(skb); | ||
998 | } | ||
999 | |||
1000 | fack_count += pcount; | ||
991 | 1001 | ||
992 | in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && | 1002 | in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && |
993 | !before(end_seq, TCP_SKB_CB(skb)->end_seq); | 1003 | !before(end_seq, TCP_SKB_CB(skb)->end_seq); |
994 | 1004 | ||
1005 | sacked = TCP_SKB_CB(skb)->sacked; | ||
1006 | |||
995 | /* Account D-SACK for retransmitted packet. */ | 1007 | /* Account D-SACK for retransmitted packet. */ |
996 | if ((dup_sack && in_sack) && | 1008 | if ((dup_sack && in_sack) && |
997 | (sacked & TCPCB_RETRANS) && | 1009 | (sacked & TCPCB_RETRANS) && |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 75b68116682a..6094db5e11be 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -428,11 +428,11 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned | |||
428 | * packet to the list. This won't be called frequently, I hope. | 428 | * packet to the list. This won't be called frequently, I hope. |
429 | * Remember, these are still headerless SKBs at this point. | 429 | * Remember, these are still headerless SKBs at this point. |
430 | */ | 430 | */ |
431 | static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) | 431 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) |
432 | { | 432 | { |
433 | struct tcp_sock *tp = tcp_sk(sk); | 433 | struct tcp_sock *tp = tcp_sk(sk); |
434 | struct sk_buff *buff; | 434 | struct sk_buff *buff; |
435 | int nsize; | 435 | int nsize, old_factor; |
436 | u16 flags; | 436 | u16 flags; |
437 | 437 | ||
438 | nsize = skb_headlen(skb) - len; | 438 | nsize = skb_headlen(skb) - len; |
@@ -490,18 +490,29 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned | |||
490 | tp->left_out -= tcp_skb_pcount(skb); | 490 | tp->left_out -= tcp_skb_pcount(skb); |
491 | } | 491 | } |
492 | 492 | ||
493 | old_factor = tcp_skb_pcount(skb); | ||
494 | |||
493 | /* Fix up tso_factor for both original and new SKB. */ | 495 | /* Fix up tso_factor for both original and new SKB. */ |
494 | tcp_set_skb_tso_segs(sk, skb, mss_now); | 496 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
495 | tcp_set_skb_tso_segs(sk, buff, mss_now); | 497 | tcp_set_skb_tso_segs(sk, buff, mss_now); |
496 | 498 | ||
497 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { | 499 | /* If this packet has been sent out already, we must |
498 | tp->lost_out += tcp_skb_pcount(skb); | 500 | * adjust the various packet counters. |
499 | tp->left_out += tcp_skb_pcount(skb); | 501 | */ |
500 | } | 502 | if (after(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { |
503 | int diff = old_factor - tcp_skb_pcount(skb) - | ||
504 | tcp_skb_pcount(buff); | ||
501 | 505 | ||
502 | if (TCP_SKB_CB(buff)->sacked&TCPCB_LOST) { | 506 | tp->packets_out -= diff; |
503 | tp->lost_out += tcp_skb_pcount(buff); | 507 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { |
504 | tp->left_out += tcp_skb_pcount(buff); | 508 | tp->lost_out -= diff; |
509 | tp->left_out -= diff; | ||
510 | } | ||
511 | if (diff > 0) { | ||
512 | tp->fackets_out -= diff; | ||
513 | if ((int)tp->fackets_out < 0) | ||
514 | tp->fackets_out = 0; | ||
515 | } | ||
505 | } | 516 | } |
506 | 517 | ||
507 | /* Link BUFF into the send queue. */ | 518 | /* Link BUFF into the send queue. */ |
@@ -1350,12 +1361,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1350 | if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { | 1361 | if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { |
1351 | if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) | 1362 | if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) |
1352 | BUG(); | 1363 | BUG(); |
1353 | |||
1354 | if (sk->sk_route_caps & NETIF_F_TSO) { | ||
1355 | sk->sk_route_caps &= ~NETIF_F_TSO; | ||
1356 | sock_set_flag(sk, SOCK_NO_LARGESEND); | ||
1357 | } | ||
1358 | |||
1359 | if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) | 1364 | if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) |
1360 | return -ENOMEM; | 1365 | return -ENOMEM; |
1361 | } | 1366 | } |
@@ -1370,22 +1375,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1370 | return -EAGAIN; | 1375 | return -EAGAIN; |
1371 | 1376 | ||
1372 | if (skb->len > cur_mss) { | 1377 | if (skb->len > cur_mss) { |
1373 | int old_factor = tcp_skb_pcount(skb); | ||
1374 | int diff; | ||
1375 | |||
1376 | if (tcp_fragment(sk, skb, cur_mss, cur_mss)) | 1378 | if (tcp_fragment(sk, skb, cur_mss, cur_mss)) |
1377 | return -ENOMEM; /* We'll try again later. */ | 1379 | return -ENOMEM; /* We'll try again later. */ |
1378 | |||
1379 | /* New SKB created, account for it. */ | ||
1380 | diff = old_factor - tcp_skb_pcount(skb) - | ||
1381 | tcp_skb_pcount(skb->next); | ||
1382 | tp->packets_out -= diff; | ||
1383 | |||
1384 | if (diff > 0) { | ||
1385 | tp->fackets_out -= diff; | ||
1386 | if ((int)tp->fackets_out < 0) | ||
1387 | tp->fackets_out = 0; | ||
1388 | } | ||
1389 | } | 1380 | } |
1390 | 1381 | ||
1391 | /* Collapse two adjacent packets if worthwhile and we can. */ | 1382 | /* Collapse two adjacent packets if worthwhile and we can. */ |
@@ -1993,12 +1984,6 @@ int tcp_write_wakeup(struct sock *sk) | |||
1993 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; | 1984 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; |
1994 | if (tcp_fragment(sk, skb, seg_size, mss)) | 1985 | if (tcp_fragment(sk, skb, seg_size, mss)) |
1995 | return -1; | 1986 | return -1; |
1996 | /* SWS override triggered forced fragmentation. | ||
1997 | * Disable TSO, the connection is too sick. */ | ||
1998 | if (sk->sk_route_caps & NETIF_F_TSO) { | ||
1999 | sock_set_flag(sk, SOCK_NO_LARGESEND); | ||
2000 | sk->sk_route_caps &= ~NETIF_F_TSO; | ||
2001 | } | ||
2002 | } else if (!tcp_skb_pcount(skb)) | 1987 | } else if (!tcp_skb_pcount(skb)) |
2003 | tcp_set_skb_tso_segs(sk, skb, mss); | 1988 | tcp_set_skb_tso_segs(sk, skb, mss); |
2004 | 1989 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 937ad32db77c..6d6fb74f3b52 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3593,10 +3593,8 @@ void __exit addrconf_cleanup(void) | |||
3593 | rtnl_unlock(); | 3593 | rtnl_unlock(); |
3594 | 3594 | ||
3595 | #ifdef CONFIG_IPV6_PRIVACY | 3595 | #ifdef CONFIG_IPV6_PRIVACY |
3596 | if (likely(md5_tfm != NULL)) { | 3596 | crypto_free_tfm(md5_tfm); |
3597 | crypto_free_tfm(md5_tfm); | 3597 | md5_tfm = NULL; |
3598 | md5_tfm = NULL; | ||
3599 | } | ||
3600 | #endif | 3598 | #endif |
3601 | 3599 | ||
3602 | #ifdef CONFIG_PROC_FS | 3600 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 0ebfad907a03..f3629730eb15 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -401,10 +401,8 @@ static int ah6_init_state(struct xfrm_state *x) | |||
401 | 401 | ||
402 | error: | 402 | error: |
403 | if (ahp) { | 403 | if (ahp) { |
404 | if (ahp->work_icv) | 404 | kfree(ahp->work_icv); |
405 | kfree(ahp->work_icv); | 405 | crypto_free_tfm(ahp->tfm); |
406 | if (ahp->tfm) | ||
407 | crypto_free_tfm(ahp->tfm); | ||
408 | kfree(ahp); | 406 | kfree(ahp); |
409 | } | 407 | } |
410 | return -EINVAL; | 408 | return -EINVAL; |
@@ -417,14 +415,10 @@ static void ah6_destroy(struct xfrm_state *x) | |||
417 | if (!ahp) | 415 | if (!ahp) |
418 | return; | 416 | return; |
419 | 417 | ||
420 | if (ahp->work_icv) { | 418 | kfree(ahp->work_icv); |
421 | kfree(ahp->work_icv); | 419 | ahp->work_icv = NULL; |
422 | ahp->work_icv = NULL; | 420 | crypto_free_tfm(ahp->tfm); |
423 | } | 421 | ahp->tfm = NULL; |
424 | if (ahp->tfm) { | ||
425 | crypto_free_tfm(ahp->tfm); | ||
426 | ahp->tfm = NULL; | ||
427 | } | ||
428 | kfree(ahp); | 422 | kfree(ahp); |
429 | } | 423 | } |
430 | 424 | ||
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index e8bff9d3d96c..9b27460f0cc7 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -276,22 +276,14 @@ static void esp6_destroy(struct xfrm_state *x) | |||
276 | if (!esp) | 276 | if (!esp) |
277 | return; | 277 | return; |
278 | 278 | ||
279 | if (esp->conf.tfm) { | 279 | crypto_free_tfm(esp->conf.tfm); |
280 | crypto_free_tfm(esp->conf.tfm); | 280 | esp->conf.tfm = NULL; |
281 | esp->conf.tfm = NULL; | 281 | kfree(esp->conf.ivec); |
282 | } | 282 | esp->conf.ivec = NULL; |
283 | if (esp->conf.ivec) { | 283 | crypto_free_tfm(esp->auth.tfm); |
284 | kfree(esp->conf.ivec); | 284 | esp->auth.tfm = NULL; |
285 | esp->conf.ivec = NULL; | 285 | kfree(esp->auth.work_icv); |
286 | } | 286 | esp->auth.work_icv = NULL; |
287 | if (esp->auth.tfm) { | ||
288 | crypto_free_tfm(esp->auth.tfm); | ||
289 | esp->auth.tfm = NULL; | ||
290 | } | ||
291 | if (esp->auth.work_icv) { | ||
292 | kfree(esp->auth.work_icv); | ||
293 | esp->auth.work_icv = NULL; | ||
294 | } | ||
295 | kfree(esp); | 287 | kfree(esp); |
296 | } | 288 | } |
297 | 289 | ||
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 5176fc655ea9..fa8f1bb0aa52 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -549,7 +549,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info) | |||
549 | read_lock(&raw_v6_lock); | 549 | read_lock(&raw_v6_lock); |
550 | if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) { | 550 | if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) { |
551 | while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, | 551 | while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, |
552 | skb->dev->ifindex))) { | 552 | IP6CB(skb)->iif))) { |
553 | rawv6_err(sk, skb, NULL, type, code, inner_offset, info); | 553 | rawv6_err(sk, skb, NULL, type, code, inner_offset, info); |
554 | sk = sk_next(sk); | 554 | sk = sk_next(sk); |
555 | } | 555 | } |
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 135383ef538f..85bfbc69b2c3 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
@@ -341,8 +341,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms) | |||
341 | 341 | ||
342 | for_each_cpu(cpu) { | 342 | for_each_cpu(cpu) { |
343 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); | 343 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); |
344 | if (tfm) | 344 | crypto_free_tfm(tfm); |
345 | crypto_free_tfm(tfm); | ||
346 | } | 345 | } |
347 | free_percpu(tfms); | 346 | free_percpu(tfms); |
348 | } | 347 | } |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 7a5863298f3f..ed3a76b30fd9 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -166,7 +166,7 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) | |||
166 | if (sk == NULL) | 166 | if (sk == NULL) |
167 | goto out; | 167 | goto out; |
168 | 168 | ||
169 | sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, skb->dev->ifindex); | 169 | sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, IP6CB(skb)->iif); |
170 | 170 | ||
171 | while (sk) { | 171 | while (sk) { |
172 | delivered = 1; | 172 | delivered = 1; |
@@ -178,7 +178,7 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) | |||
178 | rawv6_rcv(sk, clone); | 178 | rawv6_rcv(sk, clone); |
179 | } | 179 | } |
180 | sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr, | 180 | sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr, |
181 | skb->dev->ifindex); | 181 | IP6CB(skb)->iif); |
182 | } | 182 | } |
183 | out: | 183 | out: |
184 | read_unlock(&raw_v6_lock); | 184 | read_unlock(&raw_v6_lock); |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index e47ac0d1a6d6..e22ccd655965 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -193,8 +193,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) | |||
193 | sctp_unhash_endpoint(ep); | 193 | sctp_unhash_endpoint(ep); |
194 | 194 | ||
195 | /* Free up the HMAC transform. */ | 195 | /* Free up the HMAC transform. */ |
196 | if (sctp_sk(ep->base.sk)->hmac) | 196 | sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); |
197 | sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); | ||
198 | 197 | ||
199 | /* Cleanup. */ | 198 | /* Cleanup. */ |
200 | sctp_inq_free(&ep->base.inqueue); | 199 | sctp_inq_free(&ep->base.inqueue); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 4454afe4727e..91ec8c936913 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -4194,8 +4194,7 @@ out: | |||
4194 | sctp_release_sock(sk); | 4194 | sctp_release_sock(sk); |
4195 | return err; | 4195 | return err; |
4196 | cleanup: | 4196 | cleanup: |
4197 | if (tfm) | 4197 | sctp_crypto_free_tfm(tfm); |
4198 | sctp_crypto_free_tfm(tfm); | ||
4199 | goto out; | 4198 | goto out; |
4200 | } | 4199 | } |
4201 | 4200 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 5a7265aeaf83..ee6ae74cd1b2 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -160,7 +160,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | |||
160 | " unsupported checksum %d", cksumtype); | 160 | " unsupported checksum %d", cksumtype); |
161 | goto out; | 161 | goto out; |
162 | } | 162 | } |
163 | if (!(tfm = crypto_alloc_tfm(cksumname, 0))) | 163 | if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP))) |
164 | goto out; | 164 | goto out; |
165 | cksum->len = crypto_tfm_alg_digestsize(tfm); | 165 | cksum->len = crypto_tfm_alg_digestsize(tfm); |
166 | if ((cksum->data = kmalloc(cksum->len, GFP_KERNEL)) == NULL) | 166 | if ((cksum->data = kmalloc(cksum->len, GFP_KERNEL)) == NULL) |
@@ -199,8 +199,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | |||
199 | crypto_digest_final(tfm, cksum->data); | 199 | crypto_digest_final(tfm, cksum->data); |
200 | code = 0; | 200 | code = 0; |
201 | out: | 201 | out: |
202 | if (tfm) | 202 | crypto_free_tfm(tfm); |
203 | crypto_free_tfm(tfm); | ||
204 | return code; | 203 | return code; |
205 | } | 204 | } |
206 | 205 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index cf726510df8e..606a8a82cafb 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -185,12 +185,9 @@ static void | |||
185 | gss_delete_sec_context_kerberos(void *internal_ctx) { | 185 | gss_delete_sec_context_kerberos(void *internal_ctx) { |
186 | struct krb5_ctx *kctx = internal_ctx; | 186 | struct krb5_ctx *kctx = internal_ctx; |
187 | 187 | ||
188 | if (kctx->seq) | 188 | crypto_free_tfm(kctx->seq); |
189 | crypto_free_tfm(kctx->seq); | 189 | crypto_free_tfm(kctx->enc); |
190 | if (kctx->enc) | 190 | kfree(kctx->mech_used.data); |
191 | crypto_free_tfm(kctx->enc); | ||
192 | if (kctx->mech_used.data) | ||
193 | kfree(kctx->mech_used.data); | ||
194 | kfree(kctx); | 191 | kfree(kctx); |
195 | } | 192 | } |
196 | 193 | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index dad05994c3eb..6c97d61baa9b 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -214,14 +214,10 @@ static void | |||
214 | gss_delete_sec_context_spkm3(void *internal_ctx) { | 214 | gss_delete_sec_context_spkm3(void *internal_ctx) { |
215 | struct spkm3_ctx *sctx = internal_ctx; | 215 | struct spkm3_ctx *sctx = internal_ctx; |
216 | 216 | ||
217 | if(sctx->derived_integ_key) | 217 | crypto_free_tfm(sctx->derived_integ_key); |
218 | crypto_free_tfm(sctx->derived_integ_key); | 218 | crypto_free_tfm(sctx->derived_conf_key); |
219 | if(sctx->derived_conf_key) | 219 | kfree(sctx->share_key.data); |
220 | crypto_free_tfm(sctx->derived_conf_key); | 220 | kfree(sctx->mech_used.data); |
221 | if(sctx->share_key.data) | ||
222 | kfree(sctx->share_key.data); | ||
223 | if(sctx->mech_used.data) | ||
224 | kfree(sctx->mech_used.data); | ||
225 | kfree(sctx); | 221 | kfree(sctx); |
226 | } | 222 | } |
227 | 223 | ||