aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-26 11:43:05 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-26 11:43:05 -0400
commitec3b67c11df42362ccda81261d62829042f223f0 (patch)
treeab66bc43d98e38eda7162f76208993b2280f88f3 /net
parente868171a94b637158a3930c9adfb448d0df163cd (diff)
parent4be2700fb7b95f2a7cef9324879cafccab8774fc (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits) [NetLabel]: correct usage of RCU locking [TCP]: fix D-SACK cwnd handling [NET] napi: use non-interruptible sleep in napi_disable [SCTP] net/sctp/auth.c: make 3 functions static [TCP]: Add missing I/O AT code to ipv6 side. [SCTP]: #if 0 sctp_update_copy_cksum() [INET]: Unexport icmpmsg_statistics [NET]: Unexport sock_enable_timestamp(). [TCP]: Make tcp_match_skb_to_sack() static. [IRDA]: Make ircomm_tty static. [NET] fs/proc/proc_net.c: make a struct static [NET] dev_change_name: ignore changes to same name [NET]: Document some simple rules for actions [NET_CLS_ACT]: Use skb_act_clone [NET_CLS_ACT]: Introduce skb_act_clone [TCP]: Fix scatterlist handling in MD5 signature support. [IPSEC]: Fix scatterlist handling in skb_icv_walk(). [IPSEC]: Add missing sg_init_table() calls to ESP. [CRYPTO]: Initialize TCRYPT on-stack scatterlist objects correctly. [CRYPTO]: HMAC needs some more scatterlist fixups. ...
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c1
-rw-r--r--net/dccp/ccids/ccid2.c4
-rw-r--r--net/dccp/ccids/ccid3.c15
-rw-r--r--net/dccp/input.c48
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/options.c33
-rw-r--r--net/ipv4/cipso_ipv4.c39
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/icmp.c1
-rw-r--r--net/ipv4/proc.c8
-rw-r--r--net/ipv4/tcp_input.c32
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/mac80211/ieee80211_sta.c5
-rw-r--r--net/netlabel/netlabel_domainhash.c37
-rw-r--r--net/netlabel/netlabel_mgmt.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c4
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sctp/auth.c6
-rw-r--r--net/sctp/crc32c.c2
-rw-r--r--net/xfrm/xfrm_algo.c5
27 files changed, 142 insertions, 143 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index f1647d7dd14b..ddfef3b45bab 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -883,6 +883,9 @@ int dev_change_name(struct net_device *dev, char *newname)
883 if (!dev_valid_name(newname)) 883 if (!dev_valid_name(newname))
884 return -EINVAL; 884 return -EINVAL;
885 885
886 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
887 return 0;
888
886 memcpy(oldname, dev->name, IFNAMSIZ); 889 memcpy(oldname, dev->name, IFNAMSIZ);
887 890
888 if (strchr(newname, '%')) { 891 if (strchr(newname, '%')) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7b7c6c44c2da..573e17240197 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -415,13 +415,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
415 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 415 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
416 n->nohdr = 0; 416 n->nohdr = 0;
417 n->destructor = NULL; 417 n->destructor = NULL;
418#ifdef CONFIG_NET_CLS_ACT
419 /* FIXME What is this and why don't we do it in copy_skb_header? */
420 n->tc_verd = SET_TC_VERD(n->tc_verd,0);
421 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
422 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
423 C(iif);
424#endif
425 C(truesize); 418 C(truesize);
426 atomic_set(&n->users, 1); 419 atomic_set(&n->users, 1);
427 C(head); 420 C(head);
diff --git a/net/core/sock.c b/net/core/sock.c
index febbcbcf8022..bba9949681ff 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1649,7 +1649,6 @@ void sock_enable_timestamp(struct sock *sk)
1649 net_enable_timestamp(); 1649 net_enable_timestamp();
1650 } 1650 }
1651} 1651}
1652EXPORT_SYMBOL(sock_enable_timestamp);
1653 1652
1654/* 1653/*
1655 * Get a socket option on an socket. 1654 * Get a socket option on an socket.
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 426008e3b7e3..d694656b8800 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -750,20 +750,16 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
750 */ 750 */
751 hctx->ccid2hctx_ssthresh = ~0; 751 hctx->ccid2hctx_ssthresh = ~0;
752 hctx->ccid2hctx_numdupack = 3; 752 hctx->ccid2hctx_numdupack = 3;
753 hctx->ccid2hctx_seqbufc = 0;
754 753
755 /* XXX init ~ to window size... */ 754 /* XXX init ~ to window size... */
756 if (ccid2_hc_tx_alloc_seq(hctx)) 755 if (ccid2_hc_tx_alloc_seq(hctx))
757 return -ENOMEM; 756 return -ENOMEM;
758 757
759 hctx->ccid2hctx_sent = 0;
760 hctx->ccid2hctx_rto = 3 * HZ; 758 hctx->ccid2hctx_rto = 3 * HZ;
761 ccid2_change_srtt(hctx, -1); 759 ccid2_change_srtt(hctx, -1);
762 hctx->ccid2hctx_rttvar = -1; 760 hctx->ccid2hctx_rttvar = -1;
763 hctx->ccid2hctx_lastrtt = 0;
764 hctx->ccid2hctx_rpdupack = -1; 761 hctx->ccid2hctx_rpdupack = -1;
765 hctx->ccid2hctx_last_cong = jiffies; 762 hctx->ccid2hctx_last_cong = jiffies;
766 hctx->ccid2hctx_high_ack = 0;
767 763
768 hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; 764 hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire;
769 hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; 765 hctx->ccid2hctx_rtotimer.data = (unsigned long)sk;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 25772c326172..19b33586333d 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -40,6 +40,8 @@
40#include "lib/tfrc.h" 40#include "lib/tfrc.h"
41#include "ccid3.h" 41#include "ccid3.h"
42 42
43#include <asm/unaligned.h>
44
43#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 45#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
44static int ccid3_debug; 46static int ccid3_debug;
45#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) 47#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
@@ -544,6 +546,7 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
544 const struct dccp_sock *dp = dccp_sk(sk); 546 const struct dccp_sock *dp = dccp_sk(sk);
545 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 547 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
546 struct ccid3_options_received *opt_recv; 548 struct ccid3_options_received *opt_recv;
549 __be32 opt_val;
547 550
548 opt_recv = &hctx->ccid3hctx_options_received; 551 opt_recv = &hctx->ccid3hctx_options_received;
549 552
@@ -563,8 +566,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
563 dccp_role(sk), sk, len); 566 dccp_role(sk), sk, len);
564 rc = -EINVAL; 567 rc = -EINVAL;
565 } else { 568 } else {
566 opt_recv->ccid3or_loss_event_rate = 569 opt_val = get_unaligned((__be32 *)value);
567 ntohl(*(__be32 *)value); 570 opt_recv->ccid3or_loss_event_rate = ntohl(opt_val);
568 ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n", 571 ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
569 dccp_role(sk), sk, 572 dccp_role(sk), sk,
570 opt_recv->ccid3or_loss_event_rate); 573 opt_recv->ccid3or_loss_event_rate);
@@ -585,8 +588,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
585 dccp_role(sk), sk, len); 588 dccp_role(sk), sk, len);
586 rc = -EINVAL; 589 rc = -EINVAL;
587 } else { 590 } else {
588 opt_recv->ccid3or_receive_rate = 591 opt_val = get_unaligned((__be32 *)value);
589 ntohl(*(__be32 *)value); 592 opt_recv->ccid3or_receive_rate = ntohl(opt_val);
590 ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n", 593 ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
591 dccp_role(sk), sk, 594 dccp_role(sk), sk,
592 opt_recv->ccid3or_receive_rate); 595 opt_recv->ccid3or_receive_rate);
@@ -601,8 +604,6 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
601{ 604{
602 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); 605 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
603 606
604 hctx->ccid3hctx_s = 0;
605 hctx->ccid3hctx_rtt = 0;
606 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; 607 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
607 INIT_LIST_HEAD(&hctx->ccid3hctx_hist); 608 INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
608 609
@@ -963,8 +964,6 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
963 INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); 964 INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
964 hcrx->ccid3hcrx_tstamp_last_feedback = 965 hcrx->ccid3hcrx_tstamp_last_feedback =
965 hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real(); 966 hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real();
966 hcrx->ccid3hcrx_s = 0;
967 hcrx->ccid3hcrx_rtt = 0;
968 return 0; 967 return 0;
969} 968}
970 969
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 3560a2a875a0..1ce101062824 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -58,6 +58,42 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
58 dccp_send_close(sk, 0); 58 dccp_send_close(sk, 0);
59} 59}
60 60
61static u8 dccp_reset_code_convert(const u8 code)
62{
63 const u8 error_code[] = {
64 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
65 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
66 [DCCP_RESET_CODE_ABORTED] = ECONNRESET,
67
68 [DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED,
69 [DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
70 [DCCP_RESET_CODE_TOO_BUSY] = EUSERS,
71 [DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
72
73 [DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG,
74 [DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR,
75 [DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC,
76 [DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ,
77 [DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP,
78 };
79
80 return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
81}
82
83static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
84{
85 u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
86
87 sk->sk_err = err;
88
89 /* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
90 dccp_fin(sk, skb);
91
92 if (err && !sock_flag(sk, SOCK_DEAD))
93 sk_wake_async(sk, 0, POLL_ERR);
94 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
95}
96
61static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) 97static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
62{ 98{
63 struct dccp_sock *dp = dccp_sk(sk); 99 struct dccp_sock *dp = dccp_sk(sk);
@@ -191,9 +227,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
191 * S.state := TIMEWAIT 227 * S.state := TIMEWAIT
192 * Set TIMEWAIT timer 228 * Set TIMEWAIT timer
193 * Drop packet and return 229 * Drop packet and return
194 */ 230 */
195 dccp_fin(sk, skb); 231 dccp_rcv_reset(sk, skb);
196 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
197 return 0; 232 return 0;
198 case DCCP_PKT_CLOSEREQ: 233 case DCCP_PKT_CLOSEREQ:
199 dccp_rcv_closereq(sk, skb); 234 dccp_rcv_closereq(sk, skb);
@@ -521,12 +556,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
521 * Drop packet and return 556 * Drop packet and return
522 */ 557 */
523 if (dh->dccph_type == DCCP_PKT_RESET) { 558 if (dh->dccph_type == DCCP_PKT_RESET) {
524 /* 559 dccp_rcv_reset(sk, skb);
525 * Queue the equivalent of TCP fin so that dccp_recvmsg
526 * exits the loop
527 */
528 dccp_fin(sk, skb);
529 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
530 return 0; 560 return 0;
531 /* 561 /*
532 * Step 7: Check for unexpected packet types 562 * Step 7: Check for unexpected packet types
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 222549ab274a..01a6a808bdb7 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -241,8 +241,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
241 goto out; 241 goto out;
242 242
243 dp = dccp_sk(sk); 243 dp = dccp_sk(sk);
244 seq = dccp_hdr_seq(skb); 244 seq = dccp_hdr_seq(dh);
245 if (sk->sk_state != DCCP_LISTEN && 245 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
246 !between48(seq, dp->dccps_swl, dp->dccps_swh)) { 246 !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
247 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 247 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
248 goto out; 248 goto out;
@@ -795,7 +795,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
795 795
796 dh = dccp_hdr(skb); 796 dh = dccp_hdr(skb);
797 797
798 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); 798 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
799 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; 799 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
800 800
801 dccp_pr_debug("%8.8s " 801 dccp_pr_debug("%8.8s "
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index bbadd6681b83..62428ff137dd 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -173,7 +173,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
173 173
174 icmpv6_err_convert(type, code, &err); 174 icmpv6_err_convert(type, code, &err);
175 175
176 seq = DCCP_SKB_CB(skb)->dccpd_seq; 176 seq = dccp_hdr_seq(dh);
177 /* Might be for an request_sock */ 177 /* Might be for an request_sock */
178 switch (sk->sk_state) { 178 switch (sk->sk_state) {
179 struct request_sock *req, **prev; 179 struct request_sock *req, **prev;
@@ -787,7 +787,7 @@ static int dccp_v6_rcv(struct sk_buff *skb)
787 787
788 dh = dccp_hdr(skb); 788 dh = dccp_hdr(skb);
789 789
790 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); 790 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
791 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; 791 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
792 792
793 if (dccp_packet_without_ack(skb)) 793 if (dccp_packet_without_ack(skb))
diff --git a/net/dccp/options.c b/net/dccp/options.c
index d361b5533309..d286cffe2c49 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -14,6 +14,7 @@
14#include <linux/dccp.h> 14#include <linux/dccp.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <asm/unaligned.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19 20
@@ -59,6 +60,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
59 unsigned char opt, len; 60 unsigned char opt, len;
60 unsigned char *value; 61 unsigned char *value;
61 u32 elapsed_time; 62 u32 elapsed_time;
63 __be32 opt_val;
62 int rc; 64 int rc;
63 int mandatory = 0; 65 int mandatory = 0;
64 66
@@ -145,7 +147,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
145 if (len != 4) 147 if (len != 4)
146 goto out_invalid_option; 148 goto out_invalid_option;
147 149
148 opt_recv->dccpor_timestamp = ntohl(*(__be32 *)value); 150 opt_val = get_unaligned((__be32 *)value);
151 opt_recv->dccpor_timestamp = ntohl(opt_val);
149 152
150 dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; 153 dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp;
151 dp->dccps_timestamp_time = ktime_get_real(); 154 dp->dccps_timestamp_time = ktime_get_real();
@@ -159,7 +162,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
159 if (len != 4 && len != 6 && len != 8) 162 if (len != 4 && len != 6 && len != 8)
160 goto out_invalid_option; 163 goto out_invalid_option;
161 164
162 opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value); 165 opt_val = get_unaligned((__be32 *)value);
166 opt_recv->dccpor_timestamp_echo = ntohl(opt_val);
163 167
164 dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, " 168 dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, "
165 "ackno=%llu", dccp_role(sk), 169 "ackno=%llu", dccp_role(sk),
@@ -168,16 +172,20 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
168 (unsigned long long) 172 (unsigned long long)
169 DCCP_SKB_CB(skb)->dccpd_ack_seq); 173 DCCP_SKB_CB(skb)->dccpd_ack_seq);
170 174
175 value += 4;
171 176
172 if (len == 4) { 177 if (len == 4) { /* no elapsed time included */
173 dccp_pr_debug_cat("\n"); 178 dccp_pr_debug_cat("\n");
174 break; 179 break;
175 } 180 }
176 181
177 if (len == 6) 182 if (len == 6) { /* 2-byte elapsed time */
178 elapsed_time = ntohs(*(__be16 *)(value + 4)); 183 __be16 opt_val2 = get_unaligned((__be16 *)value);
179 else 184 elapsed_time = ntohs(opt_val2);
180 elapsed_time = ntohl(*(__be32 *)(value + 4)); 185 } else { /* 4-byte elapsed time */
186 opt_val = get_unaligned((__be32 *)value);
187 elapsed_time = ntohl(opt_val);
188 }
181 189
182 dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time); 190 dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time);
183 191
@@ -192,10 +200,13 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
192 if (pkt_type == DCCP_PKT_DATA) 200 if (pkt_type == DCCP_PKT_DATA)
193 continue; 201 continue;
194 202
195 if (len == 2) 203 if (len == 2) {
196 elapsed_time = ntohs(*(__be16 *)value); 204 __be16 opt_val2 = get_unaligned((__be16 *)value);
197 else 205 elapsed_time = ntohs(opt_val2);
198 elapsed_time = ntohl(*(__be32 *)value); 206 } else {
207 opt_val = get_unaligned((__be32 *)value);
208 elapsed_time = ntohl(opt_val);
209 }
199 210
200 if (elapsed_time > opt_recv->dccpor_elapsed_time) 211 if (elapsed_time > opt_recv->dccpor_elapsed_time)
201 opt_recv->dccpor_elapsed_time = elapsed_time; 212 opt_recv->dccpor_elapsed_time = elapsed_time;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 805a78e6ed55..f18e88bc86ec 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -504,22 +504,16 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def)
504 INIT_RCU_HEAD(&doi_def->rcu); 504 INIT_RCU_HEAD(&doi_def->rcu);
505 INIT_LIST_HEAD(&doi_def->dom_list); 505 INIT_LIST_HEAD(&doi_def->dom_list);
506 506
507 rcu_read_lock();
508 if (cipso_v4_doi_search(doi_def->doi) != NULL)
509 goto doi_add_failure_rlock;
510 spin_lock(&cipso_v4_doi_list_lock); 507 spin_lock(&cipso_v4_doi_list_lock);
511 if (cipso_v4_doi_search(doi_def->doi) != NULL) 508 if (cipso_v4_doi_search(doi_def->doi) != NULL)
512 goto doi_add_failure_slock; 509 goto doi_add_failure;
513 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); 510 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
514 spin_unlock(&cipso_v4_doi_list_lock); 511 spin_unlock(&cipso_v4_doi_list_lock);
515 rcu_read_unlock();
516 512
517 return 0; 513 return 0;
518 514
519doi_add_failure_slock: 515doi_add_failure:
520 spin_unlock(&cipso_v4_doi_list_lock); 516 spin_unlock(&cipso_v4_doi_list_lock);
521doi_add_failure_rlock:
522 rcu_read_unlock();
523 return -EEXIST; 517 return -EEXIST;
524} 518}
525 519
@@ -543,29 +537,23 @@ int cipso_v4_doi_remove(u32 doi,
543 struct cipso_v4_doi *doi_def; 537 struct cipso_v4_doi *doi_def;
544 struct cipso_v4_domhsh_entry *dom_iter; 538 struct cipso_v4_domhsh_entry *dom_iter;
545 539
546 rcu_read_lock(); 540 spin_lock(&cipso_v4_doi_list_lock);
547 if (cipso_v4_doi_search(doi) != NULL) { 541 doi_def = cipso_v4_doi_search(doi);
548 spin_lock(&cipso_v4_doi_list_lock); 542 if (doi_def != NULL) {
549 doi_def = cipso_v4_doi_search(doi);
550 if (doi_def == NULL) {
551 spin_unlock(&cipso_v4_doi_list_lock);
552 rcu_read_unlock();
553 return -ENOENT;
554 }
555 doi_def->valid = 0; 543 doi_def->valid = 0;
556 list_del_rcu(&doi_def->list); 544 list_del_rcu(&doi_def->list);
557 spin_unlock(&cipso_v4_doi_list_lock); 545 spin_unlock(&cipso_v4_doi_list_lock);
546 rcu_read_lock();
558 list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) 547 list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list)
559 if (dom_iter->valid) 548 if (dom_iter->valid)
560 netlbl_domhsh_remove(dom_iter->domain, 549 netlbl_domhsh_remove(dom_iter->domain,
561 audit_info); 550 audit_info);
562 cipso_v4_cache_invalidate();
563 rcu_read_unlock(); 551 rcu_read_unlock();
564 552 cipso_v4_cache_invalidate();
565 call_rcu(&doi_def->rcu, callback); 553 call_rcu(&doi_def->rcu, callback);
566 return 0; 554 return 0;
567 } 555 }
568 rcu_read_unlock(); 556 spin_unlock(&cipso_v4_doi_list_lock);
569 557
570 return -ENOENT; 558 return -ENOENT;
571} 559}
@@ -653,22 +641,19 @@ int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, const char *domain)
653 new_dom->valid = 1; 641 new_dom->valid = 1;
654 INIT_RCU_HEAD(&new_dom->rcu); 642 INIT_RCU_HEAD(&new_dom->rcu);
655 643
656 rcu_read_lock();
657 spin_lock(&cipso_v4_doi_list_lock); 644 spin_lock(&cipso_v4_doi_list_lock);
658 list_for_each_entry_rcu(iter, &doi_def->dom_list, list) 645 list_for_each_entry(iter, &doi_def->dom_list, list)
659 if (iter->valid && 646 if (iter->valid &&
660 ((domain != NULL && iter->domain != NULL && 647 ((domain != NULL && iter->domain != NULL &&
661 strcmp(iter->domain, domain) == 0) || 648 strcmp(iter->domain, domain) == 0) ||
662 (domain == NULL && iter->domain == NULL))) { 649 (domain == NULL && iter->domain == NULL))) {
663 spin_unlock(&cipso_v4_doi_list_lock); 650 spin_unlock(&cipso_v4_doi_list_lock);
664 rcu_read_unlock();
665 kfree(new_dom->domain); 651 kfree(new_dom->domain);
666 kfree(new_dom); 652 kfree(new_dom);
667 return -EEXIST; 653 return -EEXIST;
668 } 654 }
669 list_add_tail_rcu(&new_dom->list, &doi_def->dom_list); 655 list_add_tail_rcu(&new_dom->list, &doi_def->dom_list);
670 spin_unlock(&cipso_v4_doi_list_lock); 656 spin_unlock(&cipso_v4_doi_list_lock);
671 rcu_read_unlock();
672 657
673 return 0; 658 return 0;
674} 659}
@@ -689,9 +674,8 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
689{ 674{
690 struct cipso_v4_domhsh_entry *iter; 675 struct cipso_v4_domhsh_entry *iter;
691 676
692 rcu_read_lock();
693 spin_lock(&cipso_v4_doi_list_lock); 677 spin_lock(&cipso_v4_doi_list_lock);
694 list_for_each_entry_rcu(iter, &doi_def->dom_list, list) 678 list_for_each_entry(iter, &doi_def->dom_list, list)
695 if (iter->valid && 679 if (iter->valid &&
696 ((domain != NULL && iter->domain != NULL && 680 ((domain != NULL && iter->domain != NULL &&
697 strcmp(iter->domain, domain) == 0) || 681 strcmp(iter->domain, domain) == 0) ||
@@ -699,13 +683,10 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
699 iter->valid = 0; 683 iter->valid = 0;
700 list_del_rcu(&iter->list); 684 list_del_rcu(&iter->list);
701 spin_unlock(&cipso_v4_doi_list_lock); 685 spin_unlock(&cipso_v4_doi_list_lock);
702 rcu_read_unlock();
703 call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free); 686 call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free);
704
705 return 0; 687 return 0;
706 } 688 }
707 spin_unlock(&cipso_v4_doi_list_lock); 689 spin_unlock(&cipso_v4_doi_list_lock);
708 rcu_read_unlock();
709 690
710 return -ENOENT; 691 return -ENOENT;
711} 692}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 6b1a31a74cf2..ba9840195cf2 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -110,6 +110,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
110 if (!sg) 110 if (!sg)
111 goto unlock; 111 goto unlock;
112 } 112 }
113 sg_init_table(sg, nfrags);
113 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 114 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
114 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 115 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
115 if (unlikely(sg != &esp->sgbuf[0])) 116 if (unlikely(sg != &esp->sgbuf[0]))
@@ -201,6 +202,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
201 if (!sg) 202 if (!sg)
202 goto out; 203 goto out;
203 } 204 }
205 sg_init_table(sg, nfrags);
204 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); 206 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen);
205 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 207 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
206 if (unlikely(sg != &esp->sgbuf[0])) 208 if (unlikely(sg != &esp->sgbuf[0]))
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 272c69e106e9..233de0634298 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1104,5 +1104,4 @@ void __init icmp_init(struct net_proto_family *ops)
1104EXPORT_SYMBOL(icmp_err_convert); 1104EXPORT_SYMBOL(icmp_err_convert);
1105EXPORT_SYMBOL(icmp_send); 1105EXPORT_SYMBOL(icmp_send);
1106EXPORT_SYMBOL(icmp_statistics); 1106EXPORT_SYMBOL(icmp_statistics);
1107EXPORT_SYMBOL(icmpmsg_statistics);
1108EXPORT_SYMBOL(xrlim_allow); 1107EXPORT_SYMBOL(xrlim_allow);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index fd16cb8f8abe..9be0daa9c0ec 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -121,14 +121,6 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
121 SNMP_MIB_SENTINEL 121 SNMP_MIB_SENTINEL
122}; 122};
123 123
124static const struct snmp_mib snmp4_icmp_list[] = {
125 SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS),
126 SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS),
127 SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS),
128 SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS),
129 SNMP_MIB_SENTINEL
130};
131
132static struct { 124static struct {
133 char *name; 125 char *name;
134 int index; 126 int index;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3dbbb44b3e7d..69d8c38ccd39 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -103,7 +103,7 @@ int sysctl_tcp_abc __read_mostly;
103#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 103#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
104#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 104#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
105#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 105#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
106#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ 106#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
107#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ 107#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
108 108
109#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 109#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
@@ -866,7 +866,7 @@ static void tcp_disable_fack(struct tcp_sock *tp)
866 tp->rx_opt.sack_ok &= ~2; 866 tp->rx_opt.sack_ok &= ~2;
867} 867}
868 868
869/* Take a notice that peer is sending DSACKs */ 869/* Take a notice that peer is sending D-SACKs */
870static void tcp_dsack_seen(struct tcp_sock *tp) 870static void tcp_dsack_seen(struct tcp_sock *tp)
871{ 871{
872 tp->rx_opt.sack_ok |= 4; 872 tp->rx_opt.sack_ok |= 4;
@@ -1058,7 +1058,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
1058 * 1058 *
1059 * With D-SACK the lower bound is extended to cover sequence space below 1059 * With D-SACK the lower bound is extended to cover sequence space below
1060 * SND.UNA down to undo_marker, which is the last point of interest. Yet 1060 * SND.UNA down to undo_marker, which is the last point of interest. Yet
1061 * again, DSACK block must not to go across snd_una (for the same reason as 1061 * again, D-SACK block must not to go across snd_una (for the same reason as
1062 * for the normal SACK blocks, explained above). But there all simplicity 1062 * for the normal SACK blocks, explained above). But there all simplicity
1063 * ends, TCP might receive valid D-SACKs below that. As long as they reside 1063 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1064 * fully below undo_marker they do not affect behavior in anyway and can 1064 * fully below undo_marker they do not affect behavior in anyway and can
@@ -1080,7 +1080,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1080 if (!before(start_seq, tp->snd_nxt)) 1080 if (!before(start_seq, tp->snd_nxt))
1081 return 0; 1081 return 0;
1082 1082
1083 /* In outstanding window? ...This is valid exit for DSACKs too. 1083 /* In outstanding window? ...This is valid exit for D-SACKs too.
1084 * start_seq == snd_una is non-sensical (see comments above) 1084 * start_seq == snd_una is non-sensical (see comments above)
1085 */ 1085 */
1086 if (after(start_seq, tp->snd_una)) 1086 if (after(start_seq, tp->snd_una))
@@ -1204,8 +1204,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
1204 * which may fail and creates some hassle (caller must handle error case 1204 * which may fail and creates some hassle (caller must handle error case
1205 * returns). 1205 * returns).
1206 */ 1206 */
1207int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1207static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1208 u32 start_seq, u32 end_seq) 1208 u32 start_seq, u32 end_seq)
1209{ 1209{
1210 int in_sack, err; 1210 int in_sack, err;
1211 unsigned int pkt_len; 1211 unsigned int pkt_len;
@@ -1248,6 +1248,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1248 int cached_fack_count; 1248 int cached_fack_count;
1249 int i; 1249 int i;
1250 int first_sack_index; 1250 int first_sack_index;
1251 int force_one_sack;
1251 1252
1252 if (!tp->sacked_out) { 1253 if (!tp->sacked_out) {
1253 if (WARN_ON(tp->fackets_out)) 1254 if (WARN_ON(tp->fackets_out))
@@ -1272,18 +1273,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1272 * if the only SACK change is the increase of the end_seq of 1273 * if the only SACK change is the increase of the end_seq of
1273 * the first block then only apply that SACK block 1274 * the first block then only apply that SACK block
1274 * and use retrans queue hinting otherwise slowpath */ 1275 * and use retrans queue hinting otherwise slowpath */
1275 flag = 1; 1276 force_one_sack = 1;
1276 for (i = 0; i < num_sacks; i++) { 1277 for (i = 0; i < num_sacks; i++) {
1277 __be32 start_seq = sp[i].start_seq; 1278 __be32 start_seq = sp[i].start_seq;
1278 __be32 end_seq = sp[i].end_seq; 1279 __be32 end_seq = sp[i].end_seq;
1279 1280
1280 if (i == 0) { 1281 if (i == 0) {
1281 if (tp->recv_sack_cache[i].start_seq != start_seq) 1282 if (tp->recv_sack_cache[i].start_seq != start_seq)
1282 flag = 0; 1283 force_one_sack = 0;
1283 } else { 1284 } else {
1284 if ((tp->recv_sack_cache[i].start_seq != start_seq) || 1285 if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
1285 (tp->recv_sack_cache[i].end_seq != end_seq)) 1286 (tp->recv_sack_cache[i].end_seq != end_seq))
1286 flag = 0; 1287 force_one_sack = 0;
1287 } 1288 }
1288 tp->recv_sack_cache[i].start_seq = start_seq; 1289 tp->recv_sack_cache[i].start_seq = start_seq;
1289 tp->recv_sack_cache[i].end_seq = end_seq; 1290 tp->recv_sack_cache[i].end_seq = end_seq;
@@ -1295,7 +1296,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1295 } 1296 }
1296 1297
1297 first_sack_index = 0; 1298 first_sack_index = 0;
1298 if (flag) 1299 if (force_one_sack)
1299 num_sacks = 1; 1300 num_sacks = 1;
1300 else { 1301 else {
1301 int j; 1302 int j;
@@ -1321,9 +1322,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1321 } 1322 }
1322 } 1323 }
1323 1324
1324 /* clear flag as used for different purpose in following code */
1325 flag = 0;
1326
1327 /* Use SACK fastpath hint if valid */ 1325 /* Use SACK fastpath hint if valid */
1328 cached_skb = tp->fastpath_skb_hint; 1326 cached_skb = tp->fastpath_skb_hint;
1329 cached_fack_count = tp->fastpath_cnt_hint; 1327 cached_fack_count = tp->fastpath_cnt_hint;
@@ -1615,7 +1613,7 @@ void tcp_enter_frto(struct sock *sk)
1615 !icsk->icsk_retransmits)) { 1613 !icsk->icsk_retransmits)) {
1616 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1614 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1617 /* Our state is too optimistic in ssthresh() call because cwnd 1615 /* Our state is too optimistic in ssthresh() call because cwnd
1618 * is not reduced until tcp_enter_frto_loss() when previous FRTO 1616 * is not reduced until tcp_enter_frto_loss() when previous F-RTO
1619 * recovery has not yet completed. Pattern would be this: RTO, 1617 * recovery has not yet completed. Pattern would be this: RTO,
1620 * Cumulative ACK, RTO (2xRTO for the same segment does not end 1618 * Cumulative ACK, RTO (2xRTO for the same segment does not end
1621 * up here twice). 1619 * up here twice).
@@ -1801,7 +1799,7 @@ void tcp_enter_loss(struct sock *sk, int how)
1801 tcp_set_ca_state(sk, TCP_CA_Loss); 1799 tcp_set_ca_state(sk, TCP_CA_Loss);
1802 tp->high_seq = tp->snd_nxt; 1800 tp->high_seq = tp->snd_nxt;
1803 TCP_ECN_queue_cwr(tp); 1801 TCP_ECN_queue_cwr(tp);
1804 /* Abort FRTO algorithm if one is in progress */ 1802 /* Abort F-RTO algorithm if one is in progress */
1805 tp->frto_counter = 0; 1803 tp->frto_counter = 0;
1806} 1804}
1807 1805
@@ -1946,7 +1944,7 @@ static int tcp_time_to_recover(struct sock *sk)
1946 struct tcp_sock *tp = tcp_sk(sk); 1944 struct tcp_sock *tp = tcp_sk(sk);
1947 __u32 packets_out; 1945 __u32 packets_out;
1948 1946
1949 /* Do not perform any recovery during FRTO algorithm */ 1947 /* Do not perform any recovery during F-RTO algorithm */
1950 if (tp->frto_counter) 1948 if (tp->frto_counter)
1951 return 0; 1949 return 0;
1952 1950
@@ -2962,7 +2960,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
2962 } 2960 }
2963 2961
2964 if (tp->frto_counter == 1) { 2962 if (tp->frto_counter == 1) {
2965 /* Sending of the next skb must be allowed or no FRTO */ 2963 /* Sending of the next skb must be allowed or no F-RTO */
2966 if (!tcp_send_head(sk) || 2964 if (!tcp_send_head(sk) ||
2967 after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, 2965 after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
2968 tp->snd_una + tp->snd_wnd)) { 2966 tp->snd_una + tp->snd_wnd)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 38cf73a56731..ad759f1c3777 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1055,6 +1055,9 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1055 bp->pad = 0; 1055 bp->pad = 0;
1056 bp->protocol = protocol; 1056 bp->protocol = protocol;
1057 bp->len = htons(tcplen); 1057 bp->len = htons(tcplen);
1058
1059 sg_init_table(sg, 4);
1060
1058 sg_set_buf(&sg[block++], bp, sizeof(*bp)); 1061 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1059 nbytes += sizeof(*bp); 1062 nbytes += sizeof(*bp);
1060 1063
@@ -1080,6 +1083,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1080 sg_set_buf(&sg[block++], key->key, key->keylen); 1083 sg_set_buf(&sg[block++], key->key, key->keylen);
1081 nbytes += key->keylen; 1084 nbytes += key->keylen;
1082 1085
1086 sg_mark_end(sg, block);
1087
1083 /* Now store the Hash into the packet */ 1088 /* Now store the Hash into the packet */
1084 err = crypto_hash_init(desc); 1089 err = crypto_hash_init(desc);
1085 if (err) 1090 if (err)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 35d2b0e9e10b..4bc25b46f33f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1152,7 +1152,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1152 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1152 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
1153 1153
1154 sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, 1154 sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest,
1155 skb->dev->ifindex, udptable ); 1155 inet_iif(skb), udptable);
1156 1156
1157 if (sk != NULL) { 1157 if (sk != NULL) {
1158 int ret = udp_queue_rcv_skb(sk, skb); 1158 int ret = udp_queue_rcv_skb(sk, skb);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 72a659806cad..f67d51a4e56d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -109,6 +109,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
109 if (!sg) 109 if (!sg)
110 goto unlock; 110 goto unlock;
111 } 111 }
112 sg_init_table(sg, nfrags);
112 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 113 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
113 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 114 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
114 if (unlikely(sg != &esp->sgbuf[0])) 115 if (unlikely(sg != &esp->sgbuf[0]))
@@ -205,6 +206,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
205 goto out; 206 goto out;
206 } 207 }
207 } 208 }
209 sg_init_table(sg, nfrags);
208 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); 210 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen);
209 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 211 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
210 if (unlikely(sg != &esp->sgbuf[0])) 212 if (unlikely(sg != &esp->sgbuf[0]))
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 737b755342bd..06fa4baddf05 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -757,6 +757,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
757 bp->len = htonl(tcplen); 757 bp->len = htonl(tcplen);
758 bp->protocol = htonl(protocol); 758 bp->protocol = htonl(protocol);
759 759
760 sg_init_table(sg, 4);
761
760 sg_set_buf(&sg[block++], bp, sizeof(*bp)); 762 sg_set_buf(&sg[block++], bp, sizeof(*bp));
761 nbytes += sizeof(*bp); 763 nbytes += sizeof(*bp);
762 764
@@ -778,6 +780,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
778 sg_set_buf(&sg[block++], key->key, key->keylen); 780 sg_set_buf(&sg[block++], key->key, key->keylen);
779 nbytes += key->keylen; 781 nbytes += key->keylen;
780 782
783 sg_mark_end(sg, block);
784
781 /* Now store the hash into the packet */ 785 /* Now store the hash into the packet */
782 err = crypto_hash_init(desc); 786 err = crypto_hash_init(desc);
783 if (err) { 787 if (err) {
@@ -1728,6 +1732,8 @@ process:
1728 if (!sock_owned_by_user(sk)) { 1732 if (!sock_owned_by_user(sk)) {
1729#ifdef CONFIG_NET_DMA 1733#ifdef CONFIG_NET_DMA
1730 struct tcp_sock *tp = tcp_sk(sk); 1734 struct tcp_sock *tp = tcp_sk(sk);
1735 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1736 tp->ucopy.dma_chan = get_softnet_dma();
1731 if (tp->ucopy.dma_chan) 1737 if (tp->ucopy.dma_chan)
1732 ret = tcp_v6_do_rcv(sk, skb); 1738 ret = tcp_v6_do_rcv(sk, skb);
1733 else 1739 else
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 3d241e415a2a..1120b150e211 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -77,7 +77,7 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
77#endif /* CONFIG_PROC_FS */ 77#endif /* CONFIG_PROC_FS */
78static struct tty_driver *driver; 78static struct tty_driver *driver;
79 79
80hashbin_t *ircomm_tty = NULL; 80static hashbin_t *ircomm_tty = NULL;
81 81
82static const struct tty_operations ops = { 82static const struct tty_operations ops = {
83 .open = ircomm_tty_open, 83 .open = ircomm_tty_open,
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index f7ffeec3913f..fda0e06453e8 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -1184,7 +1184,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev,
1184 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " 1184 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x "
1185 "status=%d aid=%d)\n", 1185 "status=%d aid=%d)\n",
1186 dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), 1186 dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa),
1187 capab_info, status_code, aid & ~(BIT(15) | BIT(14))); 1187 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
1188 1188
1189 if (status_code != WLAN_STATUS_SUCCESS) { 1189 if (status_code != WLAN_STATUS_SUCCESS) {
1190 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", 1190 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
@@ -2096,7 +2096,8 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
2096{ 2096{
2097 int tmp, hidden_ssid; 2097 int tmp, hidden_ssid;
2098 2098
2099 if (!memcmp(ifsta->ssid, ssid, ssid_len)) 2099 if (ssid_len == ifsta->ssid_len &&
2100 !memcmp(ifsta->ssid, ssid, ssid_len))
2100 return 1; 2101 return 1;
2101 2102
2102 if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) 2103 if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL)
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index b6c844b7e1c1..b3675bd7db33 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -178,11 +178,9 @@ int netlbl_domhsh_init(u32 size)
178 for (iter = 0; iter < hsh_tbl->size; iter++) 178 for (iter = 0; iter < hsh_tbl->size; iter++)
179 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); 179 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
180 180
181 rcu_read_lock();
182 spin_lock(&netlbl_domhsh_lock); 181 spin_lock(&netlbl_domhsh_lock);
183 rcu_assign_pointer(netlbl_domhsh, hsh_tbl); 182 rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
184 spin_unlock(&netlbl_domhsh_lock); 183 spin_unlock(&netlbl_domhsh_lock);
185 rcu_read_unlock();
186 184
187 return 0; 185 return 0;
188} 186}
@@ -222,7 +220,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
222 entry->valid = 1; 220 entry->valid = 1;
223 INIT_RCU_HEAD(&entry->rcu); 221 INIT_RCU_HEAD(&entry->rcu);
224 222
225 ret_val = 0;
226 rcu_read_lock(); 223 rcu_read_lock();
227 if (entry->domain != NULL) { 224 if (entry->domain != NULL) {
228 bkt = netlbl_domhsh_hash(entry->domain); 225 bkt = netlbl_domhsh_hash(entry->domain);
@@ -233,7 +230,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
233 else 230 else
234 ret_val = -EEXIST; 231 ret_val = -EEXIST;
235 spin_unlock(&netlbl_domhsh_lock); 232 spin_unlock(&netlbl_domhsh_lock);
236 } else if (entry->domain == NULL) { 233 } else {
237 INIT_LIST_HEAD(&entry->list); 234 INIT_LIST_HEAD(&entry->list);
238 spin_lock(&netlbl_domhsh_def_lock); 235 spin_lock(&netlbl_domhsh_def_lock);
239 if (rcu_dereference(netlbl_domhsh_def) == NULL) 236 if (rcu_dereference(netlbl_domhsh_def) == NULL)
@@ -241,9 +238,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
241 else 238 else
242 ret_val = -EEXIST; 239 ret_val = -EEXIST;
243 spin_unlock(&netlbl_domhsh_def_lock); 240 spin_unlock(&netlbl_domhsh_def_lock);
244 } else 241 }
245 ret_val = -EINVAL;
246
247 audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); 242 audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info);
248 if (audit_buf != NULL) { 243 if (audit_buf != NULL) {
249 audit_log_format(audit_buf, 244 audit_log_format(audit_buf,
@@ -262,7 +257,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
262 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); 257 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
263 audit_log_end(audit_buf); 258 audit_log_end(audit_buf);
264 } 259 }
265
266 rcu_read_unlock(); 260 rcu_read_unlock();
267 261
268 if (ret_val != 0) { 262 if (ret_val != 0) {
@@ -313,38 +307,30 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
313 struct audit_buffer *audit_buf; 307 struct audit_buffer *audit_buf;
314 308
315 rcu_read_lock(); 309 rcu_read_lock();
316 if (domain != NULL) 310 entry = netlbl_domhsh_search(domain, (domain != NULL ? 0 : 1));
317 entry = netlbl_domhsh_search(domain, 0);
318 else
319 entry = netlbl_domhsh_search(domain, 1);
320 if (entry == NULL) 311 if (entry == NULL)
321 goto remove_return; 312 goto remove_return;
322 switch (entry->type) { 313 switch (entry->type) {
323 case NETLBL_NLTYPE_UNLABELED:
324 break;
325 case NETLBL_NLTYPE_CIPSOV4: 314 case NETLBL_NLTYPE_CIPSOV4:
326 ret_val = cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4, 315 cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4,
327 entry->domain); 316 entry->domain);
328 if (ret_val != 0)
329 goto remove_return;
330 break; 317 break;
331 } 318 }
332 ret_val = 0;
333 if (entry != rcu_dereference(netlbl_domhsh_def)) { 319 if (entry != rcu_dereference(netlbl_domhsh_def)) {
334 spin_lock(&netlbl_domhsh_lock); 320 spin_lock(&netlbl_domhsh_lock);
335 if (entry->valid) { 321 if (entry->valid) {
336 entry->valid = 0; 322 entry->valid = 0;
337 list_del_rcu(&entry->list); 323 list_del_rcu(&entry->list);
338 } else 324 ret_val = 0;
339 ret_val = -ENOENT; 325 }
340 spin_unlock(&netlbl_domhsh_lock); 326 spin_unlock(&netlbl_domhsh_lock);
341 } else { 327 } else {
342 spin_lock(&netlbl_domhsh_def_lock); 328 spin_lock(&netlbl_domhsh_def_lock);
343 if (entry->valid) { 329 if (entry->valid) {
344 entry->valid = 0; 330 entry->valid = 0;
345 rcu_assign_pointer(netlbl_domhsh_def, NULL); 331 rcu_assign_pointer(netlbl_domhsh_def, NULL);
346 } else 332 ret_val = 0;
347 ret_val = -ENOENT; 333 }
348 spin_unlock(&netlbl_domhsh_def_lock); 334 spin_unlock(&netlbl_domhsh_def_lock);
349 } 335 }
350 336
@@ -357,11 +343,10 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
357 audit_log_end(audit_buf); 343 audit_log_end(audit_buf);
358 } 344 }
359 345
360 if (ret_val == 0)
361 call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
362
363remove_return: 346remove_return:
364 rcu_read_unlock(); 347 rcu_read_unlock();
348 if (ret_val == 0)
349 call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
365 return ret_val; 350 return ret_val;
366} 351}
367 352
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 5315dacc5222..56483377997a 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -85,11 +85,9 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = {
85 */ 85 */
86void netlbl_mgmt_protocount_inc(void) 86void netlbl_mgmt_protocount_inc(void)
87{ 87{
88 rcu_read_lock();
89 spin_lock(&netlabel_mgmt_protocount_lock); 88 spin_lock(&netlabel_mgmt_protocount_lock);
90 netlabel_mgmt_protocount++; 89 netlabel_mgmt_protocount++;
91 spin_unlock(&netlabel_mgmt_protocount_lock); 90 spin_unlock(&netlabel_mgmt_protocount_lock);
92 rcu_read_unlock();
93} 91}
94 92
95/** 93/**
@@ -103,12 +101,10 @@ void netlbl_mgmt_protocount_inc(void)
103 */ 101 */
104void netlbl_mgmt_protocount_dec(void) 102void netlbl_mgmt_protocount_dec(void)
105{ 103{
106 rcu_read_lock();
107 spin_lock(&netlabel_mgmt_protocount_lock); 104 spin_lock(&netlabel_mgmt_protocount_lock);
108 if (netlabel_mgmt_protocount > 0) 105 if (netlabel_mgmt_protocount > 0)
109 netlabel_mgmt_protocount--; 106 netlabel_mgmt_protocount--;
110 spin_unlock(&netlabel_mgmt_protocount_lock); 107 spin_unlock(&netlabel_mgmt_protocount_lock);
111 rcu_read_unlock();
112} 108}
113 109
114/** 110/**
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 5c303c68af1d..348292450deb 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -84,12 +84,10 @@ static void netlbl_unlabel_acceptflg_set(u8 value,
84 struct audit_buffer *audit_buf; 84 struct audit_buffer *audit_buf;
85 u8 old_val; 85 u8 old_val;
86 86
87 rcu_read_lock();
88 old_val = netlabel_unlabel_acceptflg;
89 spin_lock(&netlabel_unlabel_acceptflg_lock); 87 spin_lock(&netlabel_unlabel_acceptflg_lock);
88 old_val = netlabel_unlabel_acceptflg;
90 netlabel_unlabel_acceptflg = value; 89 netlabel_unlabel_acceptflg = value;
91 spin_unlock(&netlabel_unlabel_acceptflg_lock); 90 spin_unlock(&netlabel_unlabel_acceptflg_lock);
92 rcu_read_unlock();
93 91
94 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, 92 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW,
95 audit_info); 93 audit_info);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fd7bca4d5c20..c3fde9180f9d 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -166,7 +166,7 @@ bad_mirred:
166 return TC_ACT_SHOT; 166 return TC_ACT_SHOT;
167 } 167 }
168 168
169 skb2 = skb_clone(skb, GFP_ATOMIC); 169 skb2 = skb_act_clone(skb, GFP_ATOMIC);
170 if (skb2 == NULL) 170 if (skb2 == NULL)
171 goto bad_mirred; 171 goto bad_mirred;
172 if (m->tcfm_eaction != TCA_EGRESS_MIRROR && 172 if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 621113a109b2..c9dbc3afa99f 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -107,7 +107,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)
107} 107}
108 108
109/* Free the shared key stucture */ 109/* Free the shared key stucture */
110void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) 110static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
111{ 111{
112 BUG_ON(!list_empty(&sh_key->key_list)); 112 BUG_ON(!list_empty(&sh_key->key_list));
113 sctp_auth_key_put(sh_key->key); 113 sctp_auth_key_put(sh_key->key);
@@ -220,7 +220,7 @@ static struct sctp_auth_bytes *sctp_auth_make_key_vector(
220 220
221 221
222/* Make a key vector based on our local parameters */ 222/* Make a key vector based on our local parameters */
223struct sctp_auth_bytes *sctp_auth_make_local_vector( 223static struct sctp_auth_bytes *sctp_auth_make_local_vector(
224 const struct sctp_association *asoc, 224 const struct sctp_association *asoc,
225 gfp_t gfp) 225 gfp_t gfp)
226{ 226{
@@ -232,7 +232,7 @@ struct sctp_auth_bytes *sctp_auth_make_local_vector(
232} 232}
233 233
234/* Make a key vector based on peer's parameters */ 234/* Make a key vector based on peer's parameters */
235struct sctp_auth_bytes *sctp_auth_make_peer_vector( 235static struct sctp_auth_bytes *sctp_auth_make_peer_vector(
236 const struct sctp_association *asoc, 236 const struct sctp_association *asoc,
237 gfp_t gfp) 237 gfp_t gfp)
238{ 238{
diff --git a/net/sctp/crc32c.c b/net/sctp/crc32c.c
index 59cf7b06d216..181edabdb8ca 100644
--- a/net/sctp/crc32c.c
+++ b/net/sctp/crc32c.c
@@ -170,6 +170,7 @@ __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
170 return crc32; 170 return crc32;
171} 171}
172 172
173#if 0
173__u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) 174__u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32)
174{ 175{
175 __u32 i; 176 __u32 i;
@@ -186,6 +187,7 @@ __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32)
186 187
187 return crc32; 188 return crc32;
188} 189}
190#endif /* 0 */
189 191
190__u32 sctp_end_cksum(__u32 crc32) 192__u32 sctp_end_cksum(__u32 crc32)
191{ 193{
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index fa45989a716a..0426388d351d 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -553,7 +553,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
553 if (copy > len) 553 if (copy > len)
554 copy = len; 554 copy = len;
555 555
556 sg_set_buf(&sg, skb->data + offset, copy); 556 sg_init_one(&sg, skb->data + offset, copy);
557 557
558 err = icv_update(desc, &sg, copy); 558 err = icv_update(desc, &sg, copy);
559 if (unlikely(err)) 559 if (unlikely(err))
@@ -576,8 +576,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
576 if (copy > len) 576 if (copy > len)
577 copy = len; 577 copy = len;
578 578
579 sg_init_table(&sg, 1);
579 sg_set_page(&sg, frag->page, copy, 580 sg_set_page(&sg, frag->page, copy,
580 frag->page_offset + offset-start); 581 frag->page_offset + offset-start);
581 582
582 err = icv_update(desc, &sg, copy); 583 err = icv_update(desc, &sg, copy);
583 if (unlikely(err)) 584 if (unlikely(err))