aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt6
-rw-r--r--Documentation/networking/tc-actions-env-rules.txt29
-rw-r--r--MAINTAINERS8
-rw-r--r--crypto/hmac.c10
-rw-r--r--crypto/tcrypt.c24
-rw-r--r--fs/proc/proc_net.c2
-rw-r--r--include/linux/dccp.h12
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/net/inet_hashtables.h6
-rw-r--r--include/net/inet_sock.h7
-rw-r--r--include/net/irda/ircomm_tty.h1
-rw-r--r--include/net/sch_generic.h15
-rw-r--r--include/net/sctp/auth.h1
-rw-r--r--include/net/sctp/sctp.h1
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c1
-rw-r--r--net/dccp/ccids/ccid2.c4
-rw-r--r--net/dccp/ccids/ccid3.c15
-rw-r--r--net/dccp/input.c48
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/options.c33
-rw-r--r--net/ipv4/cipso_ipv4.c39
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/icmp.c1
-rw-r--r--net/ipv4/proc.c8
-rw-r--r--net/ipv4/tcp_input.c32
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/mac80211/ieee80211_sta.c5
-rw-r--r--net/netlabel/netlabel_domainhash.c37
-rw-r--r--net/netlabel/netlabel_mgmt.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c4
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sctp/auth.c6
-rw-r--r--net/sctp/crc32c.c2
-rw-r--r--net/xfrm/xfrm_algo.c5
41 files changed, 228 insertions, 181 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 747a5d15d529..6f7872ba1def 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -184,14 +184,14 @@ tcp_frto - INTEGER
184 F-RTO is an enhanced recovery algorithm for TCP retransmission 184 F-RTO is an enhanced recovery algorithm for TCP retransmission
185 timeouts. It is particularly beneficial in wireless environments 185 timeouts. It is particularly beneficial in wireless environments
186 where packet loss is typically due to random radio interference 186 where packet loss is typically due to random radio interference
187 rather than intermediate router congestion. FRTO is sender-side 187 rather than intermediate router congestion. F-RTO is sender-side
188 only modification. Therefore it does not require any support from 188 only modification. Therefore it does not require any support from
189 the peer, but in a typical case, however, where wireless link is 189 the peer, but in a typical case, however, where wireless link is
190 the local access link and most of the data flows downlink, the 190 the local access link and most of the data flows downlink, the
191 faraway servers should have FRTO enabled to take advantage of it. 191 faraway servers should have F-RTO enabled to take advantage of it.
192 If set to 1, basic version is enabled. 2 enables SACK enhanced 192 If set to 1, basic version is enabled. 2 enables SACK enhanced
193 F-RTO if flow uses SACK. The basic version can be used also when 193 F-RTO if flow uses SACK. The basic version can be used also when
194 SACK is in use though scenario(s) with it exists where FRTO 194 SACK is in use though scenario(s) with it exists where F-RTO
195 interacts badly with the packet counting of the SACK enabled TCP 195 interacts badly with the packet counting of the SACK enabled TCP
196 flow. 196 flow.
197 197
diff --git a/Documentation/networking/tc-actions-env-rules.txt b/Documentation/networking/tc-actions-env-rules.txt
new file mode 100644
index 000000000000..01e716d185f4
--- /dev/null
+++ b/Documentation/networking/tc-actions-env-rules.txt
@@ -0,0 +1,29 @@
1
2The "enviromental" rules for authors of any new tc actions are:
3
41) If you stealeth or borroweth any packet thou shalt be branching
5from the righteous path and thou shalt cloneth.
6
7For example if your action queues a packet to be processed later
8or intentionaly branches by redirecting a packet then you need to
9clone the packet.
10There are certain fields in the skb tc_verd that need to be reset so we
11avoid loops etc. A few are generic enough so much so that skb_act_clone()
12resets them for you. So invoke skb_act_clone() rather than skb_clone()
13
142) If you munge any packet thou shalt call pskb_expand_head in the case
15someone else is referencing the skb. After that you "own" the skb.
16You must also tell us if it is ok to munge the packet (TC_OK2MUNGE),
17this way any action downstream can stomp on the packet.
18
193) dropping packets you dont own is a nono. You simply return
20TC_ACT_SHOT to the caller and they will drop it.
21
22The "enviromental" rules for callers of actions (qdiscs etc) are:
23
24*) thou art responsible for freeing anything returned as being
25TC_ACT_SHOT/STOLEN/QUEUED. If none of TC_ACT_SHOT/STOLEN/QUEUED is
26returned then all is great and you dont need to do anything.
27
28Post on netdev if something is unclear.
29
diff --git a/MAINTAINERS b/MAINTAINERS
index f985dfa5941c..5b454627f211 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2449,13 +2449,15 @@ W: http://www.tazenda.demon.co.uk/phil/linux-hp
2449S: Maintained 2449S: Maintained
2450 2450
2451MAC80211 2451MAC80211
2452P: Jiri Benc
2453M: jbenc@suse.cz
2454P: Michael Wu 2452P: Michael Wu
2455M: flamingice@sourmilk.net 2453M: flamingice@sourmilk.net
2454P: Johannes Berg
2455M: johannes@sipsolutions.net
2456P: Jiri Benc
2457M: jbenc@suse.cz
2456L: linux-wireless@vger.kernel.org 2458L: linux-wireless@vger.kernel.org
2457W: http://linuxwireless.org/ 2459W: http://linuxwireless.org/
2458T: git kernel.org:/pub/scm/linux/kernel/git/jbenc/mac80211.git 2460T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
2459S: Maintained 2461S: Maintained
2460 2462
2461MACVLAN DRIVER 2463MACVLAN DRIVER
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 6691981bda11..0f05be769c34 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -61,7 +61,7 @@ static int hmac_setkey(struct crypto_hash *parent,
61 desc.tfm = tfm; 61 desc.tfm = tfm;
62 desc.flags = crypto_hash_get_flags(parent); 62 desc.flags = crypto_hash_get_flags(parent);
63 desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; 63 desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
64 sg_set_buf(&tmp, inkey, keylen); 64 sg_init_one(&tmp, inkey, keylen);
65 65
66 err = crypto_hash_digest(&desc, &tmp, keylen, digest); 66 err = crypto_hash_digest(&desc, &tmp, keylen, digest);
67 if (err) 67 if (err)
@@ -96,7 +96,7 @@ static int hmac_init(struct hash_desc *pdesc)
96 96
97 desc.tfm = ctx->child; 97 desc.tfm = ctx->child;
98 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 98 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
99 sg_set_buf(&tmp, ipad, bs); 99 sg_init_one(&tmp, ipad, bs);
100 100
101 err = crypto_hash_init(&desc); 101 err = crypto_hash_init(&desc);
102 if (unlikely(err)) 102 if (unlikely(err))
@@ -131,7 +131,7 @@ static int hmac_final(struct hash_desc *pdesc, u8 *out)
131 131
132 desc.tfm = ctx->child; 132 desc.tfm = ctx->child;
133 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 133 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
134 sg_set_buf(&tmp, opad, bs + ds); 134 sg_init_one(&tmp, opad, bs + ds);
135 135
136 err = crypto_hash_final(&desc, digest); 136 err = crypto_hash_final(&desc, digest);
137 if (unlikely(err)) 137 if (unlikely(err))
@@ -158,9 +158,11 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
158 desc.tfm = ctx->child; 158 desc.tfm = ctx->child;
159 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 159 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
160 160
161 sg_init_table(sg1, 2);
161 sg_set_buf(sg1, ipad, bs); 162 sg_set_buf(sg1, ipad, bs);
163 sg_set_page(&sg1[1], (void *) sg, 0, 0);
162 164
163 sg_set_page(&sg[1], (void *) sg, 0, 0); 165 sg_init_table(sg2, 1);
164 sg_set_buf(sg2, opad, bs + ds); 166 sg_set_buf(sg2, opad, bs + ds);
165 167
166 err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); 168 err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest);
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d741c63af42c..c457bdb2a42b 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -139,7 +139,7 @@ static void test_hash(char *algo, struct hash_testvec *template,
139 printk("test %u:\n", i + 1); 139 printk("test %u:\n", i + 1);
140 memset(result, 0, 64); 140 memset(result, 0, 64);
141 141
142 sg_set_buf(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize); 142 sg_init_one(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize);
143 143
144 if (hash_tv[i].ksize) { 144 if (hash_tv[i].ksize) {
145 ret = crypto_hash_setkey(tfm, hash_tv[i].key, 145 ret = crypto_hash_setkey(tfm, hash_tv[i].key,
@@ -176,6 +176,7 @@ static void test_hash(char *algo, struct hash_testvec *template,
176 memset(result, 0, 64); 176 memset(result, 0, 64);
177 177
178 temp = 0; 178 temp = 0;
179 sg_init_table(sg, hash_tv[i].np);
179 for (k = 0; k < hash_tv[i].np; k++) { 180 for (k = 0; k < hash_tv[i].np; k++) {
180 memcpy(&xbuf[IDX[k]], 181 memcpy(&xbuf[IDX[k]],
181 hash_tv[i].plaintext + temp, 182 hash_tv[i].plaintext + temp,
@@ -289,8 +290,8 @@ static void test_cipher(char *algo, int enc,
289 goto out; 290 goto out;
290 } 291 }
291 292
292 sg_set_buf(&sg[0], cipher_tv[i].input, 293 sg_init_one(&sg[0], cipher_tv[i].input,
293 cipher_tv[i].ilen); 294 cipher_tv[i].ilen);
294 295
295 ablkcipher_request_set_crypt(req, sg, sg, 296 ablkcipher_request_set_crypt(req, sg, sg,
296 cipher_tv[i].ilen, 297 cipher_tv[i].ilen,
@@ -353,6 +354,7 @@ static void test_cipher(char *algo, int enc,
353 } 354 }
354 355
355 temp = 0; 356 temp = 0;
357 sg_init_table(sg, cipher_tv[i].np);
356 for (k = 0; k < cipher_tv[i].np; k++) { 358 for (k = 0; k < cipher_tv[i].np; k++) {
357 memcpy(&xbuf[IDX[k]], 359 memcpy(&xbuf[IDX[k]],
358 cipher_tv[i].input + temp, 360 cipher_tv[i].input + temp,
@@ -414,7 +416,7 @@ static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p,
414 int bcount; 416 int bcount;
415 int ret; 417 int ret;
416 418
417 sg_set_buf(sg, p, blen); 419 sg_init_one(sg, p, blen);
418 420
419 for (start = jiffies, end = start + sec * HZ, bcount = 0; 421 for (start = jiffies, end = start + sec * HZ, bcount = 0;
420 time_before(jiffies, end); bcount++) { 422 time_before(jiffies, end); bcount++) {
@@ -440,7 +442,7 @@ static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, char *p,
440 int ret = 0; 442 int ret = 0;
441 int i; 443 int i;
442 444
443 sg_set_buf(sg, p, blen); 445 sg_init_one(sg, p, blen);
444 446
445 local_bh_disable(); 447 local_bh_disable();
446 local_irq_disable(); 448 local_irq_disable();
@@ -572,7 +574,7 @@ static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen,
572 574
573 for (start = jiffies, end = start + sec * HZ, bcount = 0; 575 for (start = jiffies, end = start + sec * HZ, bcount = 0;
574 time_before(jiffies, end); bcount++) { 576 time_before(jiffies, end); bcount++) {
575 sg_set_buf(sg, p, blen); 577 sg_init_one(sg, p, blen);
576 ret = crypto_hash_digest(desc, sg, blen, out); 578 ret = crypto_hash_digest(desc, sg, blen, out);
577 if (ret) 579 if (ret)
578 return ret; 580 return ret;
@@ -601,7 +603,7 @@ static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen,
601 if (ret) 603 if (ret)
602 return ret; 604 return ret;
603 for (pcount = 0; pcount < blen; pcount += plen) { 605 for (pcount = 0; pcount < blen; pcount += plen) {
604 sg_set_buf(sg, p + pcount, plen); 606 sg_init_one(sg, p + pcount, plen);
605 ret = crypto_hash_update(desc, sg, plen); 607 ret = crypto_hash_update(desc, sg, plen);
606 if (ret) 608 if (ret)
607 return ret; 609 return ret;
@@ -631,7 +633,7 @@ static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen,
631 633
632 /* Warm-up run. */ 634 /* Warm-up run. */
633 for (i = 0; i < 4; i++) { 635 for (i = 0; i < 4; i++) {
634 sg_set_buf(sg, p, blen); 636 sg_init_one(sg, p, blen);
635 ret = crypto_hash_digest(desc, sg, blen, out); 637 ret = crypto_hash_digest(desc, sg, blen, out);
636 if (ret) 638 if (ret)
637 goto out; 639 goto out;
@@ -643,7 +645,7 @@ static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen,
643 645
644 start = get_cycles(); 646 start = get_cycles();
645 647
646 sg_set_buf(sg, p, blen); 648 sg_init_one(sg, p, blen);
647 ret = crypto_hash_digest(desc, sg, blen, out); 649 ret = crypto_hash_digest(desc, sg, blen, out);
648 if (ret) 650 if (ret)
649 goto out; 651 goto out;
@@ -686,7 +688,7 @@ static int test_hash_cycles(struct hash_desc *desc, char *p, int blen,
686 if (ret) 688 if (ret)
687 goto out; 689 goto out;
688 for (pcount = 0; pcount < blen; pcount += plen) { 690 for (pcount = 0; pcount < blen; pcount += plen) {
689 sg_set_buf(sg, p + pcount, plen); 691 sg_init_one(sg, p + pcount, plen);
690 ret = crypto_hash_update(desc, sg, plen); 692 ret = crypto_hash_update(desc, sg, plen);
691 if (ret) 693 if (ret)
692 goto out; 694 goto out;
@@ -706,7 +708,7 @@ static int test_hash_cycles(struct hash_desc *desc, char *p, int blen,
706 if (ret) 708 if (ret)
707 goto out; 709 goto out;
708 for (pcount = 0; pcount < blen; pcount += plen) { 710 for (pcount = 0; pcount < blen; pcount += plen) {
709 sg_set_buf(sg, p + pcount, plen); 711 sg_init_one(sg, p + pcount, plen);
710 ret = crypto_hash_update(desc, sg, plen); 712 ret = crypto_hash_update(desc, sg, plen);
711 if (ret) 713 if (ret)
712 goto out; 714 goto out;
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 2e91fb756e9a..4edaad0d995f 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -185,7 +185,7 @@ static __net_exit void proc_net_ns_exit(struct net *net)
185 kfree(net->proc_net_root); 185 kfree(net->proc_net_root);
186} 186}
187 187
188struct pernet_operations __net_initdata proc_net_ns_ops = { 188static struct pernet_operations __net_initdata proc_net_ns_ops = {
189 .init = proc_net_ns_init, 189 .init = proc_net_ns_init,
190 .exit = proc_net_ns_exit, 190 .exit = proc_net_ns_exit,
191}; 191};
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index f3fc4392e93d..333c3ea82a5d 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -144,6 +144,8 @@ enum dccp_reset_codes {
144 DCCP_RESET_CODE_TOO_BUSY, 144 DCCP_RESET_CODE_TOO_BUSY,
145 DCCP_RESET_CODE_BAD_INIT_COOKIE, 145 DCCP_RESET_CODE_BAD_INIT_COOKIE,
146 DCCP_RESET_CODE_AGGRESSION_PENALTY, 146 DCCP_RESET_CODE_AGGRESSION_PENALTY,
147
148 DCCP_MAX_RESET_CODES /* Leave at the end! */
147}; 149};
148 150
149/* DCCP options */ 151/* DCCP options */
@@ -270,10 +272,9 @@ static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
270 return memset(skb_transport_header(skb), 0, headlen); 272 return memset(skb_transport_header(skb), 0, headlen);
271} 273}
272 274
273static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb) 275static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh)
274{ 276{
275 return (struct dccp_hdr_ext *)(skb_transport_header(skb) + 277 return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh));
276 sizeof(struct dccp_hdr));
277} 278}
278 279
279static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh) 280static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
@@ -287,13 +288,12 @@ static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
287 return __dccp_basic_hdr_len(dh); 288 return __dccp_basic_hdr_len(dh);
288} 289}
289 290
290static inline __u64 dccp_hdr_seq(const struct sk_buff *skb) 291static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
291{ 292{
292 const struct dccp_hdr *dh = dccp_hdr(skb);
293 __u64 seq_nr = ntohs(dh->dccph_seq); 293 __u64 seq_nr = ntohs(dh->dccph_seq);
294 294
295 if (dh->dccph_x != 0) 295 if (dh->dccph_x != 0)
296 seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(skb)->dccph_seq_low); 296 seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(dh)->dccph_seq_low);
297 else 297 else
298 seq_nr += (u32)dh->dccph_seq2 << 16; 298 seq_nr += (u32)dh->dccph_seq2 << 16;
299 299
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 811024e311bd..9b0c8f12373e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -390,7 +390,7 @@ static inline void napi_complete(struct napi_struct *n)
390static inline void napi_disable(struct napi_struct *n) 390static inline void napi_disable(struct napi_struct *n)
391{ 391{
392 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 392 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
393 msleep_interruptible(1); 393 msleep(1);
394} 394}
395 395
396/** 396/**
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 8228b57eb18f..4427dcd1e53a 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -26,7 +26,6 @@
26 26
27#include <net/inet_connection_sock.h> 27#include <net/inet_connection_sock.h>
28#include <net/inet_sock.h> 28#include <net/inet_sock.h>
29#include <net/route.h>
30#include <net/sock.h> 29#include <net/sock.h>
31#include <net/tcp_states.h> 30#include <net/tcp_states.h>
32 31
@@ -266,11 +265,6 @@ out:
266 wake_up(&hashinfo->lhash_wait); 265 wake_up(&hashinfo->lhash_wait);
267} 266}
268 267
269static inline int inet_iif(const struct sk_buff *skb)
270{
271 return ((struct rtable *)skb->dst)->rt_iif;
272}
273
274extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo, 268extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
275 const __be32 daddr, 269 const __be32 daddr,
276 const unsigned short hnum, 270 const unsigned short hnum,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 62daf214931f..70013c5f4e59 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -24,6 +24,7 @@
24#include <net/flow.h> 24#include <net/flow.h>
25#include <net/sock.h> 25#include <net/sock.h>
26#include <net/request_sock.h> 26#include <net/request_sock.h>
27#include <net/route.h>
27 28
28/** struct ip_options - IP Options 29/** struct ip_options - IP Options
29 * 30 *
@@ -190,4 +191,10 @@ static inline int inet_sk_ehashfn(const struct sock *sk)
190 return inet_ehashfn(laddr, lport, faddr, fport); 191 return inet_ehashfn(laddr, lport, faddr, fport);
191} 192}
192 193
194
195static inline int inet_iif(const struct sk_buff *skb)
196{
197 return ((struct rtable *)skb->dst)->rt_iif;
198}
199
193#endif /* _INET_SOCK_H */ 200#endif /* _INET_SOCK_H */
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
index 8dabdd603fe1..eea2e6152389 100644
--- a/include/net/irda/ircomm_tty.h
+++ b/include/net/irda/ircomm_tty.h
@@ -127,7 +127,6 @@ extern int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
127 unsigned int cmd, unsigned long arg); 127 unsigned int cmd, unsigned long arg);
128extern void ircomm_tty_set_termios(struct tty_struct *tty, 128extern void ircomm_tty_set_termios(struct tty_struct *tty,
129 struct ktermios *old_termios); 129 struct ktermios *old_termios);
130extern hashbin_t *ircomm_tty;
131 130
132#endif 131#endif
133 132
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a02ec9e5fea5..c9265518a378 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -316,4 +316,19 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
316 return rtab->data[slot]; 316 return rtab->data[slot];
317} 317}
318 318
319#ifdef CONFIG_NET_CLS_ACT
320static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
321{
322 struct sk_buff *n = skb_clone(skb, gfp_mask);
323
324 if (n) {
325 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
326 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
327 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
328 n->iif = skb->iif;
329 }
330 return n;
331}
332#endif
333
319#endif 334#endif
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index 4945954a16af..9e8f13b7da5a 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -88,7 +88,6 @@ static inline void sctp_auth_key_hold(struct sctp_auth_bytes *key)
88 88
89void sctp_auth_key_put(struct sctp_auth_bytes *key); 89void sctp_auth_key_put(struct sctp_auth_bytes *key);
90struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp); 90struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp);
91void sctp_auth_shkey_free(struct sctp_shared_key *sh_key);
92void sctp_auth_destroy_keys(struct list_head *keys); 91void sctp_auth_destroy_keys(struct list_head *keys);
93int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp); 92int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp);
94struct sctp_shared_key *sctp_auth_get_shkey( 93struct sctp_shared_key *sctp_auth_get_shkey(
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 119f5a1ed499..93eb708609e7 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -156,7 +156,6 @@ int sctp_primitive_ASCONF(struct sctp_association *, void *arg);
156__u32 sctp_start_cksum(__u8 *ptr, __u16 count); 156__u32 sctp_start_cksum(__u8 *ptr, __u16 count);
157__u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum); 157__u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum);
158__u32 sctp_end_cksum(__u32 cksum); 158__u32 sctp_end_cksum(__u32 cksum);
159__u32 sctp_update_copy_cksum(__u8 *, __u8 *, __u16 count, __u32 cksum);
160 159
161/* 160/*
162 * sctp/input.c 161 * sctp/input.c
diff --git a/net/core/dev.c b/net/core/dev.c
index f1647d7dd14b..ddfef3b45bab 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -883,6 +883,9 @@ int dev_change_name(struct net_device *dev, char *newname)
883 if (!dev_valid_name(newname)) 883 if (!dev_valid_name(newname))
884 return -EINVAL; 884 return -EINVAL;
885 885
886 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
887 return 0;
888
886 memcpy(oldname, dev->name, IFNAMSIZ); 889 memcpy(oldname, dev->name, IFNAMSIZ);
887 890
888 if (strchr(newname, '%')) { 891 if (strchr(newname, '%')) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7b7c6c44c2da..573e17240197 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -415,13 +415,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
415 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 415 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
416 n->nohdr = 0; 416 n->nohdr = 0;
417 n->destructor = NULL; 417 n->destructor = NULL;
418#ifdef CONFIG_NET_CLS_ACT
419 /* FIXME What is this and why don't we do it in copy_skb_header? */
420 n->tc_verd = SET_TC_VERD(n->tc_verd,0);
421 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
422 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
423 C(iif);
424#endif
425 C(truesize); 418 C(truesize);
426 atomic_set(&n->users, 1); 419 atomic_set(&n->users, 1);
427 C(head); 420 C(head);
diff --git a/net/core/sock.c b/net/core/sock.c
index febbcbcf8022..bba9949681ff 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1649,7 +1649,6 @@ void sock_enable_timestamp(struct sock *sk)
1649 net_enable_timestamp(); 1649 net_enable_timestamp();
1650 } 1650 }
1651} 1651}
1652EXPORT_SYMBOL(sock_enable_timestamp);
1653 1652
1654/* 1653/*
1655 * Get a socket option on an socket. 1654 * Get a socket option on an socket.
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 426008e3b7e3..d694656b8800 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -750,20 +750,16 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
750 */ 750 */
751 hctx->ccid2hctx_ssthresh = ~0; 751 hctx->ccid2hctx_ssthresh = ~0;
752 hctx->ccid2hctx_numdupack = 3; 752 hctx->ccid2hctx_numdupack = 3;
753 hctx->ccid2hctx_seqbufc = 0;
754 753
755 /* XXX init ~ to window size... */ 754 /* XXX init ~ to window size... */
756 if (ccid2_hc_tx_alloc_seq(hctx)) 755 if (ccid2_hc_tx_alloc_seq(hctx))
757 return -ENOMEM; 756 return -ENOMEM;
758 757
759 hctx->ccid2hctx_sent = 0;
760 hctx->ccid2hctx_rto = 3 * HZ; 758 hctx->ccid2hctx_rto = 3 * HZ;
761 ccid2_change_srtt(hctx, -1); 759 ccid2_change_srtt(hctx, -1);
762 hctx->ccid2hctx_rttvar = -1; 760 hctx->ccid2hctx_rttvar = -1;
763 hctx->ccid2hctx_lastrtt = 0;
764 hctx->ccid2hctx_rpdupack = -1; 761 hctx->ccid2hctx_rpdupack = -1;
765 hctx->ccid2hctx_last_cong = jiffies; 762 hctx->ccid2hctx_last_cong = jiffies;
766 hctx->ccid2hctx_high_ack = 0;
767 763
768 hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; 764 hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire;
769 hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; 765 hctx->ccid2hctx_rtotimer.data = (unsigned long)sk;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 25772c326172..19b33586333d 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -40,6 +40,8 @@
40#include "lib/tfrc.h" 40#include "lib/tfrc.h"
41#include "ccid3.h" 41#include "ccid3.h"
42 42
43#include <asm/unaligned.h>
44
43#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 45#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
44static int ccid3_debug; 46static int ccid3_debug;
45#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) 47#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
@@ -544,6 +546,7 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
544 const struct dccp_sock *dp = dccp_sk(sk); 546 const struct dccp_sock *dp = dccp_sk(sk);
545 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 547 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
546 struct ccid3_options_received *opt_recv; 548 struct ccid3_options_received *opt_recv;
549 __be32 opt_val;
547 550
548 opt_recv = &hctx->ccid3hctx_options_received; 551 opt_recv = &hctx->ccid3hctx_options_received;
549 552
@@ -563,8 +566,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
563 dccp_role(sk), sk, len); 566 dccp_role(sk), sk, len);
564 rc = -EINVAL; 567 rc = -EINVAL;
565 } else { 568 } else {
566 opt_recv->ccid3or_loss_event_rate = 569 opt_val = get_unaligned((__be32 *)value);
567 ntohl(*(__be32 *)value); 570 opt_recv->ccid3or_loss_event_rate = ntohl(opt_val);
568 ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n", 571 ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
569 dccp_role(sk), sk, 572 dccp_role(sk), sk,
570 opt_recv->ccid3or_loss_event_rate); 573 opt_recv->ccid3or_loss_event_rate);
@@ -585,8 +588,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
585 dccp_role(sk), sk, len); 588 dccp_role(sk), sk, len);
586 rc = -EINVAL; 589 rc = -EINVAL;
587 } else { 590 } else {
588 opt_recv->ccid3or_receive_rate = 591 opt_val = get_unaligned((__be32 *)value);
589 ntohl(*(__be32 *)value); 592 opt_recv->ccid3or_receive_rate = ntohl(opt_val);
590 ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n", 593 ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
591 dccp_role(sk), sk, 594 dccp_role(sk), sk,
592 opt_recv->ccid3or_receive_rate); 595 opt_recv->ccid3or_receive_rate);
@@ -601,8 +604,6 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
601{ 604{
602 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); 605 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
603 606
604 hctx->ccid3hctx_s = 0;
605 hctx->ccid3hctx_rtt = 0;
606 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; 607 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
607 INIT_LIST_HEAD(&hctx->ccid3hctx_hist); 608 INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
608 609
@@ -963,8 +964,6 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
963 INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); 964 INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
964 hcrx->ccid3hcrx_tstamp_last_feedback = 965 hcrx->ccid3hcrx_tstamp_last_feedback =
965 hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real(); 966 hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real();
966 hcrx->ccid3hcrx_s = 0;
967 hcrx->ccid3hcrx_rtt = 0;
968 return 0; 967 return 0;
969} 968}
970 969
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 3560a2a875a0..1ce101062824 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -58,6 +58,42 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
58 dccp_send_close(sk, 0); 58 dccp_send_close(sk, 0);
59} 59}
60 60
61static u8 dccp_reset_code_convert(const u8 code)
62{
63 const u8 error_code[] = {
64 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
65 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
66 [DCCP_RESET_CODE_ABORTED] = ECONNRESET,
67
68 [DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED,
69 [DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
70 [DCCP_RESET_CODE_TOO_BUSY] = EUSERS,
71 [DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
72
73 [DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG,
74 [DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR,
75 [DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC,
76 [DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ,
77 [DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP,
78 };
79
80 return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
81}
82
83static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
84{
85 u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
86
87 sk->sk_err = err;
88
89 /* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
90 dccp_fin(sk, skb);
91
92 if (err && !sock_flag(sk, SOCK_DEAD))
93 sk_wake_async(sk, 0, POLL_ERR);
94 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
95}
96
61static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) 97static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
62{ 98{
63 struct dccp_sock *dp = dccp_sk(sk); 99 struct dccp_sock *dp = dccp_sk(sk);
@@ -191,9 +227,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
191 * S.state := TIMEWAIT 227 * S.state := TIMEWAIT
192 * Set TIMEWAIT timer 228 * Set TIMEWAIT timer
193 * Drop packet and return 229 * Drop packet and return
194 */ 230 */
195 dccp_fin(sk, skb); 231 dccp_rcv_reset(sk, skb);
196 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
197 return 0; 232 return 0;
198 case DCCP_PKT_CLOSEREQ: 233 case DCCP_PKT_CLOSEREQ:
199 dccp_rcv_closereq(sk, skb); 234 dccp_rcv_closereq(sk, skb);
@@ -521,12 +556,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
521 * Drop packet and return 556 * Drop packet and return
522 */ 557 */
523 if (dh->dccph_type == DCCP_PKT_RESET) { 558 if (dh->dccph_type == DCCP_PKT_RESET) {
524 /* 559 dccp_rcv_reset(sk, skb);
525 * Queue the equivalent of TCP fin so that dccp_recvmsg
526 * exits the loop
527 */
528 dccp_fin(sk, skb);
529 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
530 return 0; 560 return 0;
531 /* 561 /*
532 * Step 7: Check for unexpected packet types 562 * Step 7: Check for unexpected packet types
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 222549ab274a..01a6a808bdb7 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -241,8 +241,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
241 goto out; 241 goto out;
242 242
243 dp = dccp_sk(sk); 243 dp = dccp_sk(sk);
244 seq = dccp_hdr_seq(skb); 244 seq = dccp_hdr_seq(dh);
245 if (sk->sk_state != DCCP_LISTEN && 245 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
246 !between48(seq, dp->dccps_swl, dp->dccps_swh)) { 246 !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
247 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 247 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
248 goto out; 248 goto out;
@@ -795,7 +795,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
795 795
796 dh = dccp_hdr(skb); 796 dh = dccp_hdr(skb);
797 797
798 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); 798 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
799 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; 799 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
800 800
801 dccp_pr_debug("%8.8s " 801 dccp_pr_debug("%8.8s "
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index bbadd6681b83..62428ff137dd 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -173,7 +173,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
173 173
174 icmpv6_err_convert(type, code, &err); 174 icmpv6_err_convert(type, code, &err);
175 175
176 seq = DCCP_SKB_CB(skb)->dccpd_seq; 176 seq = dccp_hdr_seq(dh);
177 /* Might be for an request_sock */ 177 /* Might be for an request_sock */
178 switch (sk->sk_state) { 178 switch (sk->sk_state) {
179 struct request_sock *req, **prev; 179 struct request_sock *req, **prev;
@@ -787,7 +787,7 @@ static int dccp_v6_rcv(struct sk_buff *skb)
787 787
788 dh = dccp_hdr(skb); 788 dh = dccp_hdr(skb);
789 789
790 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); 790 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
791 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; 791 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
792 792
793 if (dccp_packet_without_ack(skb)) 793 if (dccp_packet_without_ack(skb))
diff --git a/net/dccp/options.c b/net/dccp/options.c
index d361b5533309..d286cffe2c49 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -14,6 +14,7 @@
14#include <linux/dccp.h> 14#include <linux/dccp.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <asm/unaligned.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19 20
@@ -59,6 +60,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
59 unsigned char opt, len; 60 unsigned char opt, len;
60 unsigned char *value; 61 unsigned char *value;
61 u32 elapsed_time; 62 u32 elapsed_time;
63 __be32 opt_val;
62 int rc; 64 int rc;
63 int mandatory = 0; 65 int mandatory = 0;
64 66
@@ -145,7 +147,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
145 if (len != 4) 147 if (len != 4)
146 goto out_invalid_option; 148 goto out_invalid_option;
147 149
148 opt_recv->dccpor_timestamp = ntohl(*(__be32 *)value); 150 opt_val = get_unaligned((__be32 *)value);
151 opt_recv->dccpor_timestamp = ntohl(opt_val);
149 152
150 dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; 153 dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp;
151 dp->dccps_timestamp_time = ktime_get_real(); 154 dp->dccps_timestamp_time = ktime_get_real();
@@ -159,7 +162,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
159 if (len != 4 && len != 6 && len != 8) 162 if (len != 4 && len != 6 && len != 8)
160 goto out_invalid_option; 163 goto out_invalid_option;
161 164
162 opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value); 165 opt_val = get_unaligned((__be32 *)value);
166 opt_recv->dccpor_timestamp_echo = ntohl(opt_val);
163 167
164 dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, " 168 dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, "
165 "ackno=%llu", dccp_role(sk), 169 "ackno=%llu", dccp_role(sk),
@@ -168,16 +172,20 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
168 (unsigned long long) 172 (unsigned long long)
169 DCCP_SKB_CB(skb)->dccpd_ack_seq); 173 DCCP_SKB_CB(skb)->dccpd_ack_seq);
170 174
175 value += 4;
171 176
172 if (len == 4) { 177 if (len == 4) { /* no elapsed time included */
173 dccp_pr_debug_cat("\n"); 178 dccp_pr_debug_cat("\n");
174 break; 179 break;
175 } 180 }
176 181
177 if (len == 6) 182 if (len == 6) { /* 2-byte elapsed time */
178 elapsed_time = ntohs(*(__be16 *)(value + 4)); 183 __be16 opt_val2 = get_unaligned((__be16 *)value);
179 else 184 elapsed_time = ntohs(opt_val2);
180 elapsed_time = ntohl(*(__be32 *)(value + 4)); 185 } else { /* 4-byte elapsed time */
186 opt_val = get_unaligned((__be32 *)value);
187 elapsed_time = ntohl(opt_val);
188 }
181 189
182 dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time); 190 dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time);
183 191
@@ -192,10 +200,13 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
192 if (pkt_type == DCCP_PKT_DATA) 200 if (pkt_type == DCCP_PKT_DATA)
193 continue; 201 continue;
194 202
195 if (len == 2) 203 if (len == 2) {
196 elapsed_time = ntohs(*(__be16 *)value); 204 __be16 opt_val2 = get_unaligned((__be16 *)value);
197 else 205 elapsed_time = ntohs(opt_val2);
198 elapsed_time = ntohl(*(__be32 *)value); 206 } else {
207 opt_val = get_unaligned((__be32 *)value);
208 elapsed_time = ntohl(opt_val);
209 }
199 210
200 if (elapsed_time > opt_recv->dccpor_elapsed_time) 211 if (elapsed_time > opt_recv->dccpor_elapsed_time)
201 opt_recv->dccpor_elapsed_time = elapsed_time; 212 opt_recv->dccpor_elapsed_time = elapsed_time;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 805a78e6ed55..f18e88bc86ec 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -504,22 +504,16 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def)
504 INIT_RCU_HEAD(&doi_def->rcu); 504 INIT_RCU_HEAD(&doi_def->rcu);
505 INIT_LIST_HEAD(&doi_def->dom_list); 505 INIT_LIST_HEAD(&doi_def->dom_list);
506 506
507 rcu_read_lock();
508 if (cipso_v4_doi_search(doi_def->doi) != NULL)
509 goto doi_add_failure_rlock;
510 spin_lock(&cipso_v4_doi_list_lock); 507 spin_lock(&cipso_v4_doi_list_lock);
511 if (cipso_v4_doi_search(doi_def->doi) != NULL) 508 if (cipso_v4_doi_search(doi_def->doi) != NULL)
512 goto doi_add_failure_slock; 509 goto doi_add_failure;
513 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); 510 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
514 spin_unlock(&cipso_v4_doi_list_lock); 511 spin_unlock(&cipso_v4_doi_list_lock);
515 rcu_read_unlock();
516 512
517 return 0; 513 return 0;
518 514
519doi_add_failure_slock: 515doi_add_failure:
520 spin_unlock(&cipso_v4_doi_list_lock); 516 spin_unlock(&cipso_v4_doi_list_lock);
521doi_add_failure_rlock:
522 rcu_read_unlock();
523 return -EEXIST; 517 return -EEXIST;
524} 518}
525 519
@@ -543,29 +537,23 @@ int cipso_v4_doi_remove(u32 doi,
543 struct cipso_v4_doi *doi_def; 537 struct cipso_v4_doi *doi_def;
544 struct cipso_v4_domhsh_entry *dom_iter; 538 struct cipso_v4_domhsh_entry *dom_iter;
545 539
546 rcu_read_lock(); 540 spin_lock(&cipso_v4_doi_list_lock);
547 if (cipso_v4_doi_search(doi) != NULL) { 541 doi_def = cipso_v4_doi_search(doi);
548 spin_lock(&cipso_v4_doi_list_lock); 542 if (doi_def != NULL) {
549 doi_def = cipso_v4_doi_search(doi);
550 if (doi_def == NULL) {
551 spin_unlock(&cipso_v4_doi_list_lock);
552 rcu_read_unlock();
553 return -ENOENT;
554 }
555 doi_def->valid = 0; 543 doi_def->valid = 0;
556 list_del_rcu(&doi_def->list); 544 list_del_rcu(&doi_def->list);
557 spin_unlock(&cipso_v4_doi_list_lock); 545 spin_unlock(&cipso_v4_doi_list_lock);
546 rcu_read_lock();
558 list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) 547 list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list)
559 if (dom_iter->valid) 548 if (dom_iter->valid)
560 netlbl_domhsh_remove(dom_iter->domain, 549 netlbl_domhsh_remove(dom_iter->domain,
561 audit_info); 550 audit_info);
562 cipso_v4_cache_invalidate();
563 rcu_read_unlock(); 551 rcu_read_unlock();
564 552 cipso_v4_cache_invalidate();
565 call_rcu(&doi_def->rcu, callback); 553 call_rcu(&doi_def->rcu, callback);
566 return 0; 554 return 0;
567 } 555 }
568 rcu_read_unlock(); 556 spin_unlock(&cipso_v4_doi_list_lock);
569 557
570 return -ENOENT; 558 return -ENOENT;
571} 559}
@@ -653,22 +641,19 @@ int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, const char *domain)
653 new_dom->valid = 1; 641 new_dom->valid = 1;
654 INIT_RCU_HEAD(&new_dom->rcu); 642 INIT_RCU_HEAD(&new_dom->rcu);
655 643
656 rcu_read_lock();
657 spin_lock(&cipso_v4_doi_list_lock); 644 spin_lock(&cipso_v4_doi_list_lock);
658 list_for_each_entry_rcu(iter, &doi_def->dom_list, list) 645 list_for_each_entry(iter, &doi_def->dom_list, list)
659 if (iter->valid && 646 if (iter->valid &&
660 ((domain != NULL && iter->domain != NULL && 647 ((domain != NULL && iter->domain != NULL &&
661 strcmp(iter->domain, domain) == 0) || 648 strcmp(iter->domain, domain) == 0) ||
662 (domain == NULL && iter->domain == NULL))) { 649 (domain == NULL && iter->domain == NULL))) {
663 spin_unlock(&cipso_v4_doi_list_lock); 650 spin_unlock(&cipso_v4_doi_list_lock);
664 rcu_read_unlock();
665 kfree(new_dom->domain); 651 kfree(new_dom->domain);
666 kfree(new_dom); 652 kfree(new_dom);
667 return -EEXIST; 653 return -EEXIST;
668 } 654 }
669 list_add_tail_rcu(&new_dom->list, &doi_def->dom_list); 655 list_add_tail_rcu(&new_dom->list, &doi_def->dom_list);
670 spin_unlock(&cipso_v4_doi_list_lock); 656 spin_unlock(&cipso_v4_doi_list_lock);
671 rcu_read_unlock();
672 657
673 return 0; 658 return 0;
674} 659}
@@ -689,9 +674,8 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
689{ 674{
690 struct cipso_v4_domhsh_entry *iter; 675 struct cipso_v4_domhsh_entry *iter;
691 676
692 rcu_read_lock();
693 spin_lock(&cipso_v4_doi_list_lock); 677 spin_lock(&cipso_v4_doi_list_lock);
694 list_for_each_entry_rcu(iter, &doi_def->dom_list, list) 678 list_for_each_entry(iter, &doi_def->dom_list, list)
695 if (iter->valid && 679 if (iter->valid &&
696 ((domain != NULL && iter->domain != NULL && 680 ((domain != NULL && iter->domain != NULL &&
697 strcmp(iter->domain, domain) == 0) || 681 strcmp(iter->domain, domain) == 0) ||
@@ -699,13 +683,10 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
699 iter->valid = 0; 683 iter->valid = 0;
700 list_del_rcu(&iter->list); 684 list_del_rcu(&iter->list);
701 spin_unlock(&cipso_v4_doi_list_lock); 685 spin_unlock(&cipso_v4_doi_list_lock);
702 rcu_read_unlock();
703 call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free); 686 call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free);
704
705 return 0; 687 return 0;
706 } 688 }
707 spin_unlock(&cipso_v4_doi_list_lock); 689 spin_unlock(&cipso_v4_doi_list_lock);
708 rcu_read_unlock();
709 690
710 return -ENOENT; 691 return -ENOENT;
711} 692}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 6b1a31a74cf2..ba9840195cf2 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -110,6 +110,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
110 if (!sg) 110 if (!sg)
111 goto unlock; 111 goto unlock;
112 } 112 }
113 sg_init_table(sg, nfrags);
113 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 114 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
114 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 115 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
115 if (unlikely(sg != &esp->sgbuf[0])) 116 if (unlikely(sg != &esp->sgbuf[0]))
@@ -201,6 +202,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
201 if (!sg) 202 if (!sg)
202 goto out; 203 goto out;
203 } 204 }
205 sg_init_table(sg, nfrags);
204 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); 206 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen);
205 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 207 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
206 if (unlikely(sg != &esp->sgbuf[0])) 208 if (unlikely(sg != &esp->sgbuf[0]))
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 272c69e106e9..233de0634298 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1104,5 +1104,4 @@ void __init icmp_init(struct net_proto_family *ops)
1104EXPORT_SYMBOL(icmp_err_convert); 1104EXPORT_SYMBOL(icmp_err_convert);
1105EXPORT_SYMBOL(icmp_send); 1105EXPORT_SYMBOL(icmp_send);
1106EXPORT_SYMBOL(icmp_statistics); 1106EXPORT_SYMBOL(icmp_statistics);
1107EXPORT_SYMBOL(icmpmsg_statistics);
1108EXPORT_SYMBOL(xrlim_allow); 1107EXPORT_SYMBOL(xrlim_allow);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index fd16cb8f8abe..9be0daa9c0ec 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -121,14 +121,6 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
121 SNMP_MIB_SENTINEL 121 SNMP_MIB_SENTINEL
122}; 122};
123 123
124static const struct snmp_mib snmp4_icmp_list[] = {
125 SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS),
126 SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS),
127 SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS),
128 SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS),
129 SNMP_MIB_SENTINEL
130};
131
132static struct { 124static struct {
133 char *name; 125 char *name;
134 int index; 126 int index;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3dbbb44b3e7d..69d8c38ccd39 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -103,7 +103,7 @@ int sysctl_tcp_abc __read_mostly;
103#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 103#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
104#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 104#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
105#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 105#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
106#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ 106#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
107#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ 107#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
108 108
109#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 109#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
@@ -866,7 +866,7 @@ static void tcp_disable_fack(struct tcp_sock *tp)
866 tp->rx_opt.sack_ok &= ~2; 866 tp->rx_opt.sack_ok &= ~2;
867} 867}
868 868
869/* Take a notice that peer is sending DSACKs */ 869/* Take a notice that peer is sending D-SACKs */
870static void tcp_dsack_seen(struct tcp_sock *tp) 870static void tcp_dsack_seen(struct tcp_sock *tp)
871{ 871{
872 tp->rx_opt.sack_ok |= 4; 872 tp->rx_opt.sack_ok |= 4;
@@ -1058,7 +1058,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
1058 * 1058 *
1059 * With D-SACK the lower bound is extended to cover sequence space below 1059 * With D-SACK the lower bound is extended to cover sequence space below
1060 * SND.UNA down to undo_marker, which is the last point of interest. Yet 1060 * SND.UNA down to undo_marker, which is the last point of interest. Yet
1061 * again, DSACK block must not to go across snd_una (for the same reason as 1061 * again, D-SACK block must not to go across snd_una (for the same reason as
1062 * for the normal SACK blocks, explained above). But there all simplicity 1062 * for the normal SACK blocks, explained above). But there all simplicity
1063 * ends, TCP might receive valid D-SACKs below that. As long as they reside 1063 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1064 * fully below undo_marker they do not affect behavior in anyway and can 1064 * fully below undo_marker they do not affect behavior in anyway and can
@@ -1080,7 +1080,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1080 if (!before(start_seq, tp->snd_nxt)) 1080 if (!before(start_seq, tp->snd_nxt))
1081 return 0; 1081 return 0;
1082 1082
1083 /* In outstanding window? ...This is valid exit for DSACKs too. 1083 /* In outstanding window? ...This is valid exit for D-SACKs too.
1084 * start_seq == snd_una is non-sensical (see comments above) 1084 * start_seq == snd_una is non-sensical (see comments above)
1085 */ 1085 */
1086 if (after(start_seq, tp->snd_una)) 1086 if (after(start_seq, tp->snd_una))
@@ -1204,8 +1204,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
1204 * which may fail and creates some hassle (caller must handle error case 1204 * which may fail and creates some hassle (caller must handle error case
1205 * returns). 1205 * returns).
1206 */ 1206 */
1207int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1207static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1208 u32 start_seq, u32 end_seq) 1208 u32 start_seq, u32 end_seq)
1209{ 1209{
1210 int in_sack, err; 1210 int in_sack, err;
1211 unsigned int pkt_len; 1211 unsigned int pkt_len;
@@ -1248,6 +1248,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1248 int cached_fack_count; 1248 int cached_fack_count;
1249 int i; 1249 int i;
1250 int first_sack_index; 1250 int first_sack_index;
1251 int force_one_sack;
1251 1252
1252 if (!tp->sacked_out) { 1253 if (!tp->sacked_out) {
1253 if (WARN_ON(tp->fackets_out)) 1254 if (WARN_ON(tp->fackets_out))
@@ -1272,18 +1273,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1272 * if the only SACK change is the increase of the end_seq of 1273 * if the only SACK change is the increase of the end_seq of
1273 * the first block then only apply that SACK block 1274 * the first block then only apply that SACK block
1274 * and use retrans queue hinting otherwise slowpath */ 1275 * and use retrans queue hinting otherwise slowpath */
1275 flag = 1; 1276 force_one_sack = 1;
1276 for (i = 0; i < num_sacks; i++) { 1277 for (i = 0; i < num_sacks; i++) {
1277 __be32 start_seq = sp[i].start_seq; 1278 __be32 start_seq = sp[i].start_seq;
1278 __be32 end_seq = sp[i].end_seq; 1279 __be32 end_seq = sp[i].end_seq;
1279 1280
1280 if (i == 0) { 1281 if (i == 0) {
1281 if (tp->recv_sack_cache[i].start_seq != start_seq) 1282 if (tp->recv_sack_cache[i].start_seq != start_seq)
1282 flag = 0; 1283 force_one_sack = 0;
1283 } else { 1284 } else {
1284 if ((tp->recv_sack_cache[i].start_seq != start_seq) || 1285 if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
1285 (tp->recv_sack_cache[i].end_seq != end_seq)) 1286 (tp->recv_sack_cache[i].end_seq != end_seq))
1286 flag = 0; 1287 force_one_sack = 0;
1287 } 1288 }
1288 tp->recv_sack_cache[i].start_seq = start_seq; 1289 tp->recv_sack_cache[i].start_seq = start_seq;
1289 tp->recv_sack_cache[i].end_seq = end_seq; 1290 tp->recv_sack_cache[i].end_seq = end_seq;
@@ -1295,7 +1296,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1295 } 1296 }
1296 1297
1297 first_sack_index = 0; 1298 first_sack_index = 0;
1298 if (flag) 1299 if (force_one_sack)
1299 num_sacks = 1; 1300 num_sacks = 1;
1300 else { 1301 else {
1301 int j; 1302 int j;
@@ -1321,9 +1322,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1321 } 1322 }
1322 } 1323 }
1323 1324
1324 /* clear flag as used for different purpose in following code */
1325 flag = 0;
1326
1327 /* Use SACK fastpath hint if valid */ 1325 /* Use SACK fastpath hint if valid */
1328 cached_skb = tp->fastpath_skb_hint; 1326 cached_skb = tp->fastpath_skb_hint;
1329 cached_fack_count = tp->fastpath_cnt_hint; 1327 cached_fack_count = tp->fastpath_cnt_hint;
@@ -1615,7 +1613,7 @@ void tcp_enter_frto(struct sock *sk)
1615 !icsk->icsk_retransmits)) { 1613 !icsk->icsk_retransmits)) {
1616 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1614 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1617 /* Our state is too optimistic in ssthresh() call because cwnd 1615 /* Our state is too optimistic in ssthresh() call because cwnd
1618 * is not reduced until tcp_enter_frto_loss() when previous FRTO 1616 * is not reduced until tcp_enter_frto_loss() when previous F-RTO
1619 * recovery has not yet completed. Pattern would be this: RTO, 1617 * recovery has not yet completed. Pattern would be this: RTO,
1620 * Cumulative ACK, RTO (2xRTO for the same segment does not end 1618 * Cumulative ACK, RTO (2xRTO for the same segment does not end
1621 * up here twice). 1619 * up here twice).
@@ -1801,7 +1799,7 @@ void tcp_enter_loss(struct sock *sk, int how)
1801 tcp_set_ca_state(sk, TCP_CA_Loss); 1799 tcp_set_ca_state(sk, TCP_CA_Loss);
1802 tp->high_seq = tp->snd_nxt; 1800 tp->high_seq = tp->snd_nxt;
1803 TCP_ECN_queue_cwr(tp); 1801 TCP_ECN_queue_cwr(tp);
1804 /* Abort FRTO algorithm if one is in progress */ 1802 /* Abort F-RTO algorithm if one is in progress */
1805 tp->frto_counter = 0; 1803 tp->frto_counter = 0;
1806} 1804}
1807 1805
@@ -1946,7 +1944,7 @@ static int tcp_time_to_recover(struct sock *sk)
1946 struct tcp_sock *tp = tcp_sk(sk); 1944 struct tcp_sock *tp = tcp_sk(sk);
1947 __u32 packets_out; 1945 __u32 packets_out;
1948 1946
1949 /* Do not perform any recovery during FRTO algorithm */ 1947 /* Do not perform any recovery during F-RTO algorithm */
1950 if (tp->frto_counter) 1948 if (tp->frto_counter)
1951 return 0; 1949 return 0;
1952 1950
@@ -2962,7 +2960,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
2962 } 2960 }
2963 2961
2964 if (tp->frto_counter == 1) { 2962 if (tp->frto_counter == 1) {
2965 /* Sending of the next skb must be allowed or no FRTO */ 2963 /* Sending of the next skb must be allowed or no F-RTO */
2966 if (!tcp_send_head(sk) || 2964 if (!tcp_send_head(sk) ||
2967 after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, 2965 after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
2968 tp->snd_una + tp->snd_wnd)) { 2966 tp->snd_una + tp->snd_wnd)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 38cf73a56731..ad759f1c3777 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1055,6 +1055,9 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1055 bp->pad = 0; 1055 bp->pad = 0;
1056 bp->protocol = protocol; 1056 bp->protocol = protocol;
1057 bp->len = htons(tcplen); 1057 bp->len = htons(tcplen);
1058
1059 sg_init_table(sg, 4);
1060
1058 sg_set_buf(&sg[block++], bp, sizeof(*bp)); 1061 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1059 nbytes += sizeof(*bp); 1062 nbytes += sizeof(*bp);
1060 1063
@@ -1080,6 +1083,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1080 sg_set_buf(&sg[block++], key->key, key->keylen); 1083 sg_set_buf(&sg[block++], key->key, key->keylen);
1081 nbytes += key->keylen; 1084 nbytes += key->keylen;
1082 1085
1086 sg_mark_end(sg, block);
1087
1083 /* Now store the Hash into the packet */ 1088 /* Now store the Hash into the packet */
1084 err = crypto_hash_init(desc); 1089 err = crypto_hash_init(desc);
1085 if (err) 1090 if (err)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 35d2b0e9e10b..4bc25b46f33f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1152,7 +1152,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1152 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1152 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
1153 1153
1154 sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, 1154 sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest,
1155 skb->dev->ifindex, udptable ); 1155 inet_iif(skb), udptable);
1156 1156
1157 if (sk != NULL) { 1157 if (sk != NULL) {
1158 int ret = udp_queue_rcv_skb(sk, skb); 1158 int ret = udp_queue_rcv_skb(sk, skb);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 72a659806cad..f67d51a4e56d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -109,6 +109,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
109 if (!sg) 109 if (!sg)
110 goto unlock; 110 goto unlock;
111 } 111 }
112 sg_init_table(sg, nfrags);
112 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 113 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
113 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 114 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
114 if (unlikely(sg != &esp->sgbuf[0])) 115 if (unlikely(sg != &esp->sgbuf[0]))
@@ -205,6 +206,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
205 goto out; 206 goto out;
206 } 207 }
207 } 208 }
209 sg_init_table(sg, nfrags);
208 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); 210 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen);
209 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 211 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
210 if (unlikely(sg != &esp->sgbuf[0])) 212 if (unlikely(sg != &esp->sgbuf[0]))
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 737b755342bd..06fa4baddf05 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -757,6 +757,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
757 bp->len = htonl(tcplen); 757 bp->len = htonl(tcplen);
758 bp->protocol = htonl(protocol); 758 bp->protocol = htonl(protocol);
759 759
760 sg_init_table(sg, 4);
761
760 sg_set_buf(&sg[block++], bp, sizeof(*bp)); 762 sg_set_buf(&sg[block++], bp, sizeof(*bp));
761 nbytes += sizeof(*bp); 763 nbytes += sizeof(*bp);
762 764
@@ -778,6 +780,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
778 sg_set_buf(&sg[block++], key->key, key->keylen); 780 sg_set_buf(&sg[block++], key->key, key->keylen);
779 nbytes += key->keylen; 781 nbytes += key->keylen;
780 782
783 sg_mark_end(sg, block);
784
781 /* Now store the hash into the packet */ 785 /* Now store the hash into the packet */
782 err = crypto_hash_init(desc); 786 err = crypto_hash_init(desc);
783 if (err) { 787 if (err) {
@@ -1728,6 +1732,8 @@ process:
1728 if (!sock_owned_by_user(sk)) { 1732 if (!sock_owned_by_user(sk)) {
1729#ifdef CONFIG_NET_DMA 1733#ifdef CONFIG_NET_DMA
1730 struct tcp_sock *tp = tcp_sk(sk); 1734 struct tcp_sock *tp = tcp_sk(sk);
1735 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1736 tp->ucopy.dma_chan = get_softnet_dma();
1731 if (tp->ucopy.dma_chan) 1737 if (tp->ucopy.dma_chan)
1732 ret = tcp_v6_do_rcv(sk, skb); 1738 ret = tcp_v6_do_rcv(sk, skb);
1733 else 1739 else
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 3d241e415a2a..1120b150e211 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -77,7 +77,7 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
77#endif /* CONFIG_PROC_FS */ 77#endif /* CONFIG_PROC_FS */
78static struct tty_driver *driver; 78static struct tty_driver *driver;
79 79
80hashbin_t *ircomm_tty = NULL; 80static hashbin_t *ircomm_tty = NULL;
81 81
82static const struct tty_operations ops = { 82static const struct tty_operations ops = {
83 .open = ircomm_tty_open, 83 .open = ircomm_tty_open,
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index f7ffeec3913f..fda0e06453e8 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -1184,7 +1184,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev,
1184 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " 1184 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x "
1185 "status=%d aid=%d)\n", 1185 "status=%d aid=%d)\n",
1186 dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), 1186 dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa),
1187 capab_info, status_code, aid & ~(BIT(15) | BIT(14))); 1187 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
1188 1188
1189 if (status_code != WLAN_STATUS_SUCCESS) { 1189 if (status_code != WLAN_STATUS_SUCCESS) {
1190 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", 1190 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
@@ -2096,7 +2096,8 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
2096{ 2096{
2097 int tmp, hidden_ssid; 2097 int tmp, hidden_ssid;
2098 2098
2099 if (!memcmp(ifsta->ssid, ssid, ssid_len)) 2099 if (ssid_len == ifsta->ssid_len &&
2100 !memcmp(ifsta->ssid, ssid, ssid_len))
2100 return 1; 2101 return 1;
2101 2102
2102 if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) 2103 if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL)
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index b6c844b7e1c1..b3675bd7db33 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -178,11 +178,9 @@ int netlbl_domhsh_init(u32 size)
178 for (iter = 0; iter < hsh_tbl->size; iter++) 178 for (iter = 0; iter < hsh_tbl->size; iter++)
179 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); 179 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
180 180
181 rcu_read_lock();
182 spin_lock(&netlbl_domhsh_lock); 181 spin_lock(&netlbl_domhsh_lock);
183 rcu_assign_pointer(netlbl_domhsh, hsh_tbl); 182 rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
184 spin_unlock(&netlbl_domhsh_lock); 183 spin_unlock(&netlbl_domhsh_lock);
185 rcu_read_unlock();
186 184
187 return 0; 185 return 0;
188} 186}
@@ -222,7 +220,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
222 entry->valid = 1; 220 entry->valid = 1;
223 INIT_RCU_HEAD(&entry->rcu); 221 INIT_RCU_HEAD(&entry->rcu);
224 222
225 ret_val = 0;
226 rcu_read_lock(); 223 rcu_read_lock();
227 if (entry->domain != NULL) { 224 if (entry->domain != NULL) {
228 bkt = netlbl_domhsh_hash(entry->domain); 225 bkt = netlbl_domhsh_hash(entry->domain);
@@ -233,7 +230,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
233 else 230 else
234 ret_val = -EEXIST; 231 ret_val = -EEXIST;
235 spin_unlock(&netlbl_domhsh_lock); 232 spin_unlock(&netlbl_domhsh_lock);
236 } else if (entry->domain == NULL) { 233 } else {
237 INIT_LIST_HEAD(&entry->list); 234 INIT_LIST_HEAD(&entry->list);
238 spin_lock(&netlbl_domhsh_def_lock); 235 spin_lock(&netlbl_domhsh_def_lock);
239 if (rcu_dereference(netlbl_domhsh_def) == NULL) 236 if (rcu_dereference(netlbl_domhsh_def) == NULL)
@@ -241,9 +238,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
241 else 238 else
242 ret_val = -EEXIST; 239 ret_val = -EEXIST;
243 spin_unlock(&netlbl_domhsh_def_lock); 240 spin_unlock(&netlbl_domhsh_def_lock);
244 } else 241 }
245 ret_val = -EINVAL;
246
247 audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); 242 audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info);
248 if (audit_buf != NULL) { 243 if (audit_buf != NULL) {
249 audit_log_format(audit_buf, 244 audit_log_format(audit_buf,
@@ -262,7 +257,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
262 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); 257 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
263 audit_log_end(audit_buf); 258 audit_log_end(audit_buf);
264 } 259 }
265
266 rcu_read_unlock(); 260 rcu_read_unlock();
267 261
268 if (ret_val != 0) { 262 if (ret_val != 0) {
@@ -313,38 +307,30 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
313 struct audit_buffer *audit_buf; 307 struct audit_buffer *audit_buf;
314 308
315 rcu_read_lock(); 309 rcu_read_lock();
316 if (domain != NULL) 310 entry = netlbl_domhsh_search(domain, (domain != NULL ? 0 : 1));
317 entry = netlbl_domhsh_search(domain, 0);
318 else
319 entry = netlbl_domhsh_search(domain, 1);
320 if (entry == NULL) 311 if (entry == NULL)
321 goto remove_return; 312 goto remove_return;
322 switch (entry->type) { 313 switch (entry->type) {
323 case NETLBL_NLTYPE_UNLABELED:
324 break;
325 case NETLBL_NLTYPE_CIPSOV4: 314 case NETLBL_NLTYPE_CIPSOV4:
326 ret_val = cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4, 315 cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4,
327 entry->domain); 316 entry->domain);
328 if (ret_val != 0)
329 goto remove_return;
330 break; 317 break;
331 } 318 }
332 ret_val = 0;
333 if (entry != rcu_dereference(netlbl_domhsh_def)) { 319 if (entry != rcu_dereference(netlbl_domhsh_def)) {
334 spin_lock(&netlbl_domhsh_lock); 320 spin_lock(&netlbl_domhsh_lock);
335 if (entry->valid) { 321 if (entry->valid) {
336 entry->valid = 0; 322 entry->valid = 0;
337 list_del_rcu(&entry->list); 323 list_del_rcu(&entry->list);
338 } else 324 ret_val = 0;
339 ret_val = -ENOENT; 325 }
340 spin_unlock(&netlbl_domhsh_lock); 326 spin_unlock(&netlbl_domhsh_lock);
341 } else { 327 } else {
342 spin_lock(&netlbl_domhsh_def_lock); 328 spin_lock(&netlbl_domhsh_def_lock);
343 if (entry->valid) { 329 if (entry->valid) {
344 entry->valid = 0; 330 entry->valid = 0;
345 rcu_assign_pointer(netlbl_domhsh_def, NULL); 331 rcu_assign_pointer(netlbl_domhsh_def, NULL);
346 } else 332 ret_val = 0;
347 ret_val = -ENOENT; 333 }
348 spin_unlock(&netlbl_domhsh_def_lock); 334 spin_unlock(&netlbl_domhsh_def_lock);
349 } 335 }
350 336
@@ -357,11 +343,10 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info)
357 audit_log_end(audit_buf); 343 audit_log_end(audit_buf);
358 } 344 }
359 345
360 if (ret_val == 0)
361 call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
362
363remove_return: 346remove_return:
364 rcu_read_unlock(); 347 rcu_read_unlock();
348 if (ret_val == 0)
349 call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
365 return ret_val; 350 return ret_val;
366} 351}
367 352
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 5315dacc5222..56483377997a 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -85,11 +85,9 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = {
85 */ 85 */
86void netlbl_mgmt_protocount_inc(void) 86void netlbl_mgmt_protocount_inc(void)
87{ 87{
88 rcu_read_lock();
89 spin_lock(&netlabel_mgmt_protocount_lock); 88 spin_lock(&netlabel_mgmt_protocount_lock);
90 netlabel_mgmt_protocount++; 89 netlabel_mgmt_protocount++;
91 spin_unlock(&netlabel_mgmt_protocount_lock); 90 spin_unlock(&netlabel_mgmt_protocount_lock);
92 rcu_read_unlock();
93} 91}
94 92
95/** 93/**
@@ -103,12 +101,10 @@ void netlbl_mgmt_protocount_inc(void)
103 */ 101 */
104void netlbl_mgmt_protocount_dec(void) 102void netlbl_mgmt_protocount_dec(void)
105{ 103{
106 rcu_read_lock();
107 spin_lock(&netlabel_mgmt_protocount_lock); 104 spin_lock(&netlabel_mgmt_protocount_lock);
108 if (netlabel_mgmt_protocount > 0) 105 if (netlabel_mgmt_protocount > 0)
109 netlabel_mgmt_protocount--; 106 netlabel_mgmt_protocount--;
110 spin_unlock(&netlabel_mgmt_protocount_lock); 107 spin_unlock(&netlabel_mgmt_protocount_lock);
111 rcu_read_unlock();
112} 108}
113 109
114/** 110/**
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 5c303c68af1d..348292450deb 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -84,12 +84,10 @@ static void netlbl_unlabel_acceptflg_set(u8 value,
84 struct audit_buffer *audit_buf; 84 struct audit_buffer *audit_buf;
85 u8 old_val; 85 u8 old_val;
86 86
87 rcu_read_lock();
88 old_val = netlabel_unlabel_acceptflg;
89 spin_lock(&netlabel_unlabel_acceptflg_lock); 87 spin_lock(&netlabel_unlabel_acceptflg_lock);
88 old_val = netlabel_unlabel_acceptflg;
90 netlabel_unlabel_acceptflg = value; 89 netlabel_unlabel_acceptflg = value;
91 spin_unlock(&netlabel_unlabel_acceptflg_lock); 90 spin_unlock(&netlabel_unlabel_acceptflg_lock);
92 rcu_read_unlock();
93 91
94 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, 92 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW,
95 audit_info); 93 audit_info);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fd7bca4d5c20..c3fde9180f9d 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -166,7 +166,7 @@ bad_mirred:
166 return TC_ACT_SHOT; 166 return TC_ACT_SHOT;
167 } 167 }
168 168
169 skb2 = skb_clone(skb, GFP_ATOMIC); 169 skb2 = skb_act_clone(skb, GFP_ATOMIC);
170 if (skb2 == NULL) 170 if (skb2 == NULL)
171 goto bad_mirred; 171 goto bad_mirred;
172 if (m->tcfm_eaction != TCA_EGRESS_MIRROR && 172 if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 621113a109b2..c9dbc3afa99f 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -107,7 +107,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)
107} 107}
108 108
109/* Free the shared key stucture */ 109/* Free the shared key stucture */
110void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) 110static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
111{ 111{
112 BUG_ON(!list_empty(&sh_key->key_list)); 112 BUG_ON(!list_empty(&sh_key->key_list));
113 sctp_auth_key_put(sh_key->key); 113 sctp_auth_key_put(sh_key->key);
@@ -220,7 +220,7 @@ static struct sctp_auth_bytes *sctp_auth_make_key_vector(
220 220
221 221
222/* Make a key vector based on our local parameters */ 222/* Make a key vector based on our local parameters */
223struct sctp_auth_bytes *sctp_auth_make_local_vector( 223static struct sctp_auth_bytes *sctp_auth_make_local_vector(
224 const struct sctp_association *asoc, 224 const struct sctp_association *asoc,
225 gfp_t gfp) 225 gfp_t gfp)
226{ 226{
@@ -232,7 +232,7 @@ struct sctp_auth_bytes *sctp_auth_make_local_vector(
232} 232}
233 233
234/* Make a key vector based on peer's parameters */ 234/* Make a key vector based on peer's parameters */
235struct sctp_auth_bytes *sctp_auth_make_peer_vector( 235static struct sctp_auth_bytes *sctp_auth_make_peer_vector(
236 const struct sctp_association *asoc, 236 const struct sctp_association *asoc,
237 gfp_t gfp) 237 gfp_t gfp)
238{ 238{
diff --git a/net/sctp/crc32c.c b/net/sctp/crc32c.c
index 59cf7b06d216..181edabdb8ca 100644
--- a/net/sctp/crc32c.c
+++ b/net/sctp/crc32c.c
@@ -170,6 +170,7 @@ __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
170 return crc32; 170 return crc32;
171} 171}
172 172
173#if 0
173__u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) 174__u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32)
174{ 175{
175 __u32 i; 176 __u32 i;
@@ -186,6 +187,7 @@ __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32)
186 187
187 return crc32; 188 return crc32;
188} 189}
190#endif /* 0 */
189 191
190__u32 sctp_end_cksum(__u32 crc32) 192__u32 sctp_end_cksum(__u32 crc32)
191{ 193{
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index fa45989a716a..0426388d351d 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -553,7 +553,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
553 if (copy > len) 553 if (copy > len)
554 copy = len; 554 copy = len;
555 555
556 sg_set_buf(&sg, skb->data + offset, copy); 556 sg_init_one(&sg, skb->data + offset, copy);
557 557
558 err = icv_update(desc, &sg, copy); 558 err = icv_update(desc, &sg, copy);
559 if (unlikely(err)) 559 if (unlikely(err))
@@ -576,8 +576,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
576 if (copy > len) 576 if (copy > len)
577 copy = len; 577 copy = len;
578 578
579 sg_init_table(&sg, 1);
579 sg_set_page(&sg, frag->page, copy, 580 sg_set_page(&sg, frag->page, copy,
580 frag->page_offset + offset-start); 581 frag->page_offset + offset-start);
581 582
582 err = icv_update(desc, &sg, copy); 583 err = icv_update(desc, &sg, copy);
583 if (unlikely(err)) 584 if (unlikely(err))