diff options
author | David Vrabel <david.vrabel@csr.com> | 2009-02-02 12:52:39 -0500 |
---|---|---|
committer | David Vrabel <david.vrabel@csr.com> | 2009-02-02 12:52:39 -0500 |
commit | 8f04915532485d81e7f6c580a396ea7b01094221 (patch) | |
tree | c5740e961a025f2fb6b520a2bc5937f19d4345ab /net | |
parent | 8f5140a6a0b1a9aa79585b0008e88c5d266c5c1d (diff) | |
parent | 45c82b5a770be66845687a7d027c8b52946d59af (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-upstream
Diffstat (limited to 'net')
-rw-r--r-- | net/8021q/vlan_core.c | 4 | ||||
-rw-r--r-- | net/9p/client.c | 2 | ||||
-rw-r--r-- | net/Kconfig | 8 | ||||
-rw-r--r-- | net/core/dev.c | 15 | ||||
-rw-r--r-- | net/core/net_namespace.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 85 | ||||
-rw-r--r-- | net/ipv4/ipconfig.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 3 | ||||
-rw-r--r-- | net/ipv4/udp.c | 55 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 2 | ||||
-rw-r--r-- | net/ipv6/af_inet6.c | 7 | ||||
-rw-r--r-- | net/ipv6/icmp.c | 4 | ||||
-rw-r--r-- | net/ipv6/ip6_input.c | 2 | ||||
-rw-r--r-- | net/ipv6/ip6mr.c | 24 | ||||
-rw-r--r-- | net/ipv6/route.c | 2 | ||||
-rw-r--r-- | net/key/af_key.c | 1 | ||||
-rw-r--r-- | net/mac80211/mlme.c | 4 | ||||
-rw-r--r-- | net/mac80211/sta_info.h | 1 | ||||
-rw-r--r-- | net/mac80211/tx.c | 6 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_netlink.c | 3 | ||||
-rw-r--r-- | net/packet/af_packet.c | 9 | ||||
-rw-r--r-- | net/sctp/input.c | 13 | ||||
-rw-r--r-- | net/sctp/output.c | 7 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 3 | ||||
-rw-r--r-- | net/sunrpc/Kconfig | 78 | ||||
-rw-r--r-- | net/wimax/debugfs.c | 11 | ||||
-rw-r--r-- | net/wimax/stack.c | 13 | ||||
-rw-r--r-- | net/wireless/reg.c | 142 | ||||
-rw-r--r-- | net/xfrm/xfrm_user.c | 11 |
29 files changed, 391 insertions, 132 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 6c1323940263..e9db889d6222 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -62,13 +62,13 @@ struct net_device *vlan_dev_real_dev(const struct net_device *dev) | |||
62 | { | 62 | { |
63 | return vlan_dev_info(dev)->real_dev; | 63 | return vlan_dev_info(dev)->real_dev; |
64 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(vlan_dev_real_dev); | 65 | EXPORT_SYMBOL(vlan_dev_real_dev); |
66 | 66 | ||
67 | u16 vlan_dev_vlan_id(const struct net_device *dev) | 67 | u16 vlan_dev_vlan_id(const struct net_device *dev) |
68 | { | 68 | { |
69 | return vlan_dev_info(dev)->vlan_id; | 69 | return vlan_dev_info(dev)->vlan_id; |
70 | } | 70 | } |
71 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); | 71 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
72 | 72 | ||
73 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | 73 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, |
74 | unsigned int vlan_tci, struct sk_buff *skb) | 74 | unsigned int vlan_tci, struct sk_buff *skb) |
diff --git a/net/9p/client.c b/net/9p/client.c index 821f1ec0b2c3..1eb580c38fbb 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -618,7 +618,7 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt) | |||
618 | return ERR_PTR(-ENOMEM); | 618 | return ERR_PTR(-ENOMEM); |
619 | 619 | ||
620 | ret = p9_idpool_get(clnt->fidpool); | 620 | ret = p9_idpool_get(clnt->fidpool); |
621 | if (fid->fid < 0) { | 621 | if (ret < 0) { |
622 | ret = -ENOSPC; | 622 | ret = -ENOSPC; |
623 | goto error; | 623 | goto error; |
624 | } | 624 | } |
diff --git a/net/Kconfig b/net/Kconfig index bf2776018f71..cdb8fdef6c4a 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -24,14 +24,6 @@ if NET | |||
24 | 24 | ||
25 | menu "Networking options" | 25 | menu "Networking options" |
26 | 26 | ||
27 | config NET_NS | ||
28 | bool "Network namespace support" | ||
29 | default n | ||
30 | depends on EXPERIMENTAL && NAMESPACES | ||
31 | help | ||
32 | Allow user space to create what appear to be multiple instances | ||
33 | of the network stack. | ||
34 | |||
35 | config COMPAT_NET_DEV_OPS | 27 | config COMPAT_NET_DEV_OPS |
36 | def_bool y | 28 | def_bool y |
37 | 29 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 8d675975d85b..5379b0c1190a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1534,7 +1534,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1534 | skb->mac_len = skb->network_header - skb->mac_header; | 1534 | skb->mac_len = skb->network_header - skb->mac_header; |
1535 | __skb_pull(skb, skb->mac_len); | 1535 | __skb_pull(skb, skb->mac_len); |
1536 | 1536 | ||
1537 | if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) { | 1537 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
1538 | struct net_device *dev = skb->dev; | ||
1539 | struct ethtool_drvinfo info = {}; | ||
1540 | |||
1541 | if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) | ||
1542 | dev->ethtool_ops->get_drvinfo(dev, &info); | ||
1543 | |||
1544 | WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d " | ||
1545 | "ip_summed=%d", | ||
1546 | info.driver, dev ? dev->features : 0L, | ||
1547 | skb->sk ? skb->sk->sk_route_caps : 0L, | ||
1548 | skb->len, skb->data_len, skb->ip_summed); | ||
1549 | |||
1538 | if (skb_header_cloned(skb) && | 1550 | if (skb_header_cloned(skb) && |
1539 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | 1551 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) |
1540 | return ERR_PTR(err); | 1552 | return ERR_PTR(err); |
@@ -2524,6 +2536,7 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | |||
2524 | 2536 | ||
2525 | if (!pskb_may_pull(skb, ETH_HLEN)) { | 2537 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
2526 | napi_reuse_skb(napi, skb); | 2538 | napi_reuse_skb(napi, skb); |
2539 | skb = NULL; | ||
2527 | goto out; | 2540 | goto out; |
2528 | } | 2541 | } |
2529 | 2542 | ||
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 55cffad2f328..55151faaf90c 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -341,8 +341,8 @@ again: | |||
341 | rv = register_pernet_operations(first_device, ops); | 341 | rv = register_pernet_operations(first_device, ops); |
342 | if (rv < 0) | 342 | if (rv < 0) |
343 | ida_remove(&net_generic_ids, *id); | 343 | ida_remove(&net_generic_ids, *id); |
344 | mutex_unlock(&net_mutex); | ||
345 | out: | 344 | out: |
345 | mutex_unlock(&net_mutex); | ||
346 | return rv; | 346 | return rv; |
347 | } | 347 | } |
348 | EXPORT_SYMBOL_GPL(register_pernet_gen_subsys); | 348 | EXPORT_SYMBOL_GPL(register_pernet_gen_subsys); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 65eac7739033..da74b844f4ea 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -73,17 +73,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly; | |||
73 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, | 73 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, |
74 | struct pipe_buffer *buf) | 74 | struct pipe_buffer *buf) |
75 | { | 75 | { |
76 | struct sk_buff *skb = (struct sk_buff *) buf->private; | 76 | put_page(buf->page); |
77 | |||
78 | kfree_skb(skb); | ||
79 | } | 77 | } |
80 | 78 | ||
81 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, | 79 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, |
82 | struct pipe_buffer *buf) | 80 | struct pipe_buffer *buf) |
83 | { | 81 | { |
84 | struct sk_buff *skb = (struct sk_buff *) buf->private; | 82 | get_page(buf->page); |
85 | |||
86 | skb_get(skb); | ||
87 | } | 83 | } |
88 | 84 | ||
89 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, | 85 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, |
@@ -1334,9 +1330,19 @@ fault: | |||
1334 | */ | 1330 | */ |
1335 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | 1331 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
1336 | { | 1332 | { |
1337 | struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private; | 1333 | put_page(spd->pages[i]); |
1334 | } | ||
1338 | 1335 | ||
1339 | kfree_skb(skb); | 1336 | static inline struct page *linear_to_page(struct page *page, unsigned int len, |
1337 | unsigned int offset) | ||
1338 | { | ||
1339 | struct page *p = alloc_pages(GFP_KERNEL, 0); | ||
1340 | |||
1341 | if (!p) | ||
1342 | return NULL; | ||
1343 | memcpy(page_address(p) + offset, page_address(page) + offset, len); | ||
1344 | |||
1345 | return p; | ||
1340 | } | 1346 | } |
1341 | 1347 | ||
1342 | /* | 1348 | /* |
@@ -1344,16 +1350,23 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1344 | */ | 1350 | */ |
1345 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1351 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, |
1346 | unsigned int len, unsigned int offset, | 1352 | unsigned int len, unsigned int offset, |
1347 | struct sk_buff *skb) | 1353 | struct sk_buff *skb, int linear) |
1348 | { | 1354 | { |
1349 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1355 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) |
1350 | return 1; | 1356 | return 1; |
1351 | 1357 | ||
1358 | if (linear) { | ||
1359 | page = linear_to_page(page, len, offset); | ||
1360 | if (!page) | ||
1361 | return 1; | ||
1362 | } else | ||
1363 | get_page(page); | ||
1364 | |||
1352 | spd->pages[spd->nr_pages] = page; | 1365 | spd->pages[spd->nr_pages] = page; |
1353 | spd->partial[spd->nr_pages].len = len; | 1366 | spd->partial[spd->nr_pages].len = len; |
1354 | spd->partial[spd->nr_pages].offset = offset; | 1367 | spd->partial[spd->nr_pages].offset = offset; |
1355 | spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb); | ||
1356 | spd->nr_pages++; | 1368 | spd->nr_pages++; |
1369 | |||
1357 | return 0; | 1370 | return 0; |
1358 | } | 1371 | } |
1359 | 1372 | ||
@@ -1369,7 +1382,7 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, | |||
1369 | static inline int __splice_segment(struct page *page, unsigned int poff, | 1382 | static inline int __splice_segment(struct page *page, unsigned int poff, |
1370 | unsigned int plen, unsigned int *off, | 1383 | unsigned int plen, unsigned int *off, |
1371 | unsigned int *len, struct sk_buff *skb, | 1384 | unsigned int *len, struct sk_buff *skb, |
1372 | struct splice_pipe_desc *spd) | 1385 | struct splice_pipe_desc *spd, int linear) |
1373 | { | 1386 | { |
1374 | if (!*len) | 1387 | if (!*len) |
1375 | return 1; | 1388 | return 1; |
@@ -1392,7 +1405,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1392 | /* the linear region may spread across several pages */ | 1405 | /* the linear region may spread across several pages */ |
1393 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1406 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1394 | 1407 | ||
1395 | if (spd_fill_page(spd, page, flen, poff, skb)) | 1408 | if (spd_fill_page(spd, page, flen, poff, skb, linear)) |
1396 | return 1; | 1409 | return 1; |
1397 | 1410 | ||
1398 | __segment_seek(&page, &poff, &plen, flen); | 1411 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1419,7 +1432,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1419 | if (__splice_segment(virt_to_page(skb->data), | 1432 | if (__splice_segment(virt_to_page(skb->data), |
1420 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1433 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1421 | skb_headlen(skb), | 1434 | skb_headlen(skb), |
1422 | offset, len, skb, spd)) | 1435 | offset, len, skb, spd, 1)) |
1423 | return 1; | 1436 | return 1; |
1424 | 1437 | ||
1425 | /* | 1438 | /* |
@@ -1429,7 +1442,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1429 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1442 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1430 | 1443 | ||
1431 | if (__splice_segment(f->page, f->page_offset, f->size, | 1444 | if (__splice_segment(f->page, f->page_offset, f->size, |
1432 | offset, len, skb, spd)) | 1445 | offset, len, skb, spd, 0)) |
1433 | return 1; | 1446 | return 1; |
1434 | } | 1447 | } |
1435 | 1448 | ||
@@ -1442,7 +1455,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1442 | * the frag list, if such a thing exists. We'd probably need to recurse to | 1455 | * the frag list, if such a thing exists. We'd probably need to recurse to |
1443 | * handle that cleanly. | 1456 | * handle that cleanly. |
1444 | */ | 1457 | */ |
1445 | int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | 1458 | int skb_splice_bits(struct sk_buff *skb, unsigned int offset, |
1446 | struct pipe_inode_info *pipe, unsigned int tlen, | 1459 | struct pipe_inode_info *pipe, unsigned int tlen, |
1447 | unsigned int flags) | 1460 | unsigned int flags) |
1448 | { | 1461 | { |
@@ -1455,16 +1468,6 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | |||
1455 | .ops = &sock_pipe_buf_ops, | 1468 | .ops = &sock_pipe_buf_ops, |
1456 | .spd_release = sock_spd_release, | 1469 | .spd_release = sock_spd_release, |
1457 | }; | 1470 | }; |
1458 | struct sk_buff *skb; | ||
1459 | |||
1460 | /* | ||
1461 | * I'd love to avoid the clone here, but tcp_read_sock() | ||
1462 | * ignores reference counts and unconditonally kills the sk_buff | ||
1463 | * on return from the actor. | ||
1464 | */ | ||
1465 | skb = skb_clone(__skb, GFP_KERNEL); | ||
1466 | if (unlikely(!skb)) | ||
1467 | return -ENOMEM; | ||
1468 | 1471 | ||
1469 | /* | 1472 | /* |
1470 | * __skb_splice_bits() only fails if the output has no room left, | 1473 | * __skb_splice_bits() only fails if the output has no room left, |
@@ -1488,15 +1491,9 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | |||
1488 | } | 1491 | } |
1489 | 1492 | ||
1490 | done: | 1493 | done: |
1491 | /* | ||
1492 | * drop our reference to the clone, the pipe consumption will | ||
1493 | * drop the rest. | ||
1494 | */ | ||
1495 | kfree_skb(skb); | ||
1496 | |||
1497 | if (spd.nr_pages) { | 1494 | if (spd.nr_pages) { |
1495 | struct sock *sk = skb->sk; | ||
1498 | int ret; | 1496 | int ret; |
1499 | struct sock *sk = __skb->sk; | ||
1500 | 1497 | ||
1501 | /* | 1498 | /* |
1502 | * Drop the socket lock, otherwise we have reverse | 1499 | * Drop the socket lock, otherwise we have reverse |
@@ -2215,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, | |||
2215 | return 0; | 2212 | return 0; |
2216 | 2213 | ||
2217 | next_skb: | 2214 | next_skb: |
2218 | block_limit = skb_headlen(st->cur_skb); | 2215 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
2219 | 2216 | ||
2220 | if (abs_offset < block_limit) { | 2217 | if (abs_offset < block_limit) { |
2221 | *data = st->cur_skb->data + abs_offset; | 2218 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
2222 | return block_limit - abs_offset; | 2219 | return block_limit - abs_offset; |
2223 | } | 2220 | } |
2224 | 2221 | ||
@@ -2253,13 +2250,14 @@ next_skb: | |||
2253 | st->frag_data = NULL; | 2250 | st->frag_data = NULL; |
2254 | } | 2251 | } |
2255 | 2252 | ||
2256 | if (st->cur_skb->next) { | 2253 | if (st->root_skb == st->cur_skb && |
2257 | st->cur_skb = st->cur_skb->next; | 2254 | skb_shinfo(st->root_skb)->frag_list) { |
2255 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | ||
2258 | st->frag_idx = 0; | 2256 | st->frag_idx = 0; |
2259 | goto next_skb; | 2257 | goto next_skb; |
2260 | } else if (st->root_skb == st->cur_skb && | 2258 | } else if (st->cur_skb->next) { |
2261 | skb_shinfo(st->root_skb)->frag_list) { | 2259 | st->cur_skb = st->cur_skb->next; |
2262 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | 2260 | st->frag_idx = 0; |
2263 | goto next_skb; | 2261 | goto next_skb; |
2264 | } | 2262 | } |
2265 | 2263 | ||
@@ -2588,8 +2586,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2588 | struct sk_buff *nskb; | 2586 | struct sk_buff *nskb; |
2589 | unsigned int headroom; | 2587 | unsigned int headroom; |
2590 | unsigned int hlen = p->data - skb_mac_header(p); | 2588 | unsigned int hlen = p->data - skb_mac_header(p); |
2589 | unsigned int len = skb->len; | ||
2591 | 2590 | ||
2592 | if (hlen + p->len + skb->len >= 65536) | 2591 | if (hlen + p->len + len >= 65536) |
2593 | return -E2BIG; | 2592 | return -E2BIG; |
2594 | 2593 | ||
2595 | if (skb_shinfo(p)->frag_list) | 2594 | if (skb_shinfo(p)->frag_list) |
@@ -2651,9 +2650,9 @@ merge: | |||
2651 | 2650 | ||
2652 | done: | 2651 | done: |
2653 | NAPI_GRO_CB(p)->count++; | 2652 | NAPI_GRO_CB(p)->count++; |
2654 | p->data_len += skb->len; | 2653 | p->data_len += len; |
2655 | p->truesize += skb->len; | 2654 | p->truesize += len; |
2656 | p->len += skb->len; | 2655 | p->len += len; |
2657 | 2656 | ||
2658 | NAPI_GRO_CB(skb)->same_flow = 1; | 2657 | NAPI_GRO_CB(skb)->same_flow = 1; |
2659 | return 0; | 2658 | return 0; |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 42a0f3dd3fd6..d722013c1cae 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name) | |||
1268 | static int __init ip_auto_config(void) | 1268 | static int __init ip_auto_config(void) |
1269 | { | 1269 | { |
1270 | __be32 addr; | 1270 | __be32 addr; |
1271 | #ifdef IPCONFIG_DYNAMIC | ||
1272 | int retries = CONF_OPEN_RETRIES; | ||
1273 | #endif | ||
1271 | 1274 | ||
1272 | #ifdef CONFIG_PROC_FS | 1275 | #ifdef CONFIG_PROC_FS |
1273 | proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); | 1276 | proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); |
@@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void) | |||
1304 | #endif | 1307 | #endif |
1305 | ic_first_dev->next) { | 1308 | ic_first_dev->next) { |
1306 | #ifdef IPCONFIG_DYNAMIC | 1309 | #ifdef IPCONFIG_DYNAMIC |
1307 | |||
1308 | int retries = CONF_OPEN_RETRIES; | ||
1309 | |||
1310 | if (ic_dynamic() < 0) { | 1310 | if (ic_dynamic() < 0) { |
1311 | ic_close_devs(); | 1311 | ic_close_devs(); |
1312 | 1312 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0cd71b84e483..76b148bcb0dc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -524,7 +524,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | |||
524 | struct tcp_splice_state *tss = rd_desc->arg.data; | 524 | struct tcp_splice_state *tss = rd_desc->arg.data; |
525 | int ret; | 525 | int ret; |
526 | 526 | ||
527 | ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags); | 527 | ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), |
528 | tss->flags); | ||
528 | if (ret > 0) | 529 | if (ret > 0) |
529 | rd_desc->count -= ret; | 530 | rd_desc->count -= ret; |
530 | return ret; | 531 | return ret; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cf5ab0581eba..b7faffe5c029 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min); | |||
120 | atomic_t udp_memory_allocated; | 120 | atomic_t udp_memory_allocated; |
121 | EXPORT_SYMBOL(udp_memory_allocated); | 121 | EXPORT_SYMBOL(udp_memory_allocated); |
122 | 122 | ||
123 | #define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE) | ||
124 | |||
123 | static int udp_lib_lport_inuse(struct net *net, __u16 num, | 125 | static int udp_lib_lport_inuse(struct net *net, __u16 num, |
124 | const struct udp_hslot *hslot, | 126 | const struct udp_hslot *hslot, |
127 | unsigned long *bitmap, | ||
125 | struct sock *sk, | 128 | struct sock *sk, |
126 | int (*saddr_comp)(const struct sock *sk1, | 129 | int (*saddr_comp)(const struct sock *sk1, |
127 | const struct sock *sk2)) | 130 | const struct sock *sk2)) |
@@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, | |||
132 | sk_nulls_for_each(sk2, node, &hslot->head) | 135 | sk_nulls_for_each(sk2, node, &hslot->head) |
133 | if (net_eq(sock_net(sk2), net) && | 136 | if (net_eq(sock_net(sk2), net) && |
134 | sk2 != sk && | 137 | sk2 != sk && |
135 | sk2->sk_hash == num && | 138 | (bitmap || sk2->sk_hash == num) && |
136 | (!sk2->sk_reuse || !sk->sk_reuse) && | 139 | (!sk2->sk_reuse || !sk->sk_reuse) && |
137 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if | 140 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if |
138 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | 141 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
139 | (*saddr_comp)(sk, sk2)) | 142 | (*saddr_comp)(sk, sk2)) { |
140 | return 1; | 143 | if (bitmap) |
144 | __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE, | ||
145 | bitmap); | ||
146 | else | ||
147 | return 1; | ||
148 | } | ||
141 | return 0; | 149 | return 0; |
142 | } | 150 | } |
143 | 151 | ||
@@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
160 | if (!snum) { | 168 | if (!snum) { |
161 | int low, high, remaining; | 169 | int low, high, remaining; |
162 | unsigned rand; | 170 | unsigned rand; |
163 | unsigned short first; | 171 | unsigned short first, last; |
172 | DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); | ||
164 | 173 | ||
165 | inet_get_local_port_range(&low, &high); | 174 | inet_get_local_port_range(&low, &high); |
166 | remaining = (high - low) + 1; | 175 | remaining = (high - low) + 1; |
167 | 176 | ||
168 | rand = net_random(); | 177 | rand = net_random(); |
169 | snum = first = rand % remaining + low; | 178 | first = (((u64)rand * remaining) >> 32) + low; |
170 | rand |= 1; | 179 | /* |
171 | for (;;) { | 180 | * force rand to be an odd multiple of UDP_HTABLE_SIZE |
172 | hslot = &udptable->hash[udp_hashfn(net, snum)]; | 181 | */ |
182 | rand = (rand | 1) * UDP_HTABLE_SIZE; | ||
183 | for (last = first + UDP_HTABLE_SIZE; first != last; first++) { | ||
184 | hslot = &udptable->hash[udp_hashfn(net, first)]; | ||
185 | bitmap_zero(bitmap, PORTS_PER_CHAIN); | ||
173 | spin_lock_bh(&hslot->lock); | 186 | spin_lock_bh(&hslot->lock); |
174 | if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) | 187 | udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, |
175 | break; | 188 | saddr_comp); |
176 | spin_unlock_bh(&hslot->lock); | 189 | |
190 | snum = first; | ||
191 | /* | ||
192 | * Iterate on all possible values of snum for this hash. | ||
193 | * Using steps of an odd multiple of UDP_HTABLE_SIZE | ||
194 | * give us randomization and full range coverage. | ||
195 | */ | ||
177 | do { | 196 | do { |
178 | snum = snum + rand; | 197 | if (low <= snum && snum <= high && |
179 | } while (snum < low || snum > high); | 198 | !test_bit(snum / UDP_HTABLE_SIZE, bitmap)) |
180 | if (snum == first) | 199 | goto found; |
181 | goto fail; | 200 | snum += rand; |
201 | } while (snum != first); | ||
202 | spin_unlock_bh(&hslot->lock); | ||
182 | } | 203 | } |
204 | goto fail; | ||
183 | } else { | 205 | } else { |
184 | hslot = &udptable->hash[udp_hashfn(net, snum)]; | 206 | hslot = &udptable->hash[udp_hashfn(net, snum)]; |
185 | spin_lock_bh(&hslot->lock); | 207 | spin_lock_bh(&hslot->lock); |
186 | if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) | 208 | if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp)) |
187 | goto fail_unlock; | 209 | goto fail_unlock; |
188 | } | 210 | } |
211 | found: | ||
189 | inet_sk(sk)->num = snum; | 212 | inet_sk(sk)->num = snum; |
190 | sk->sk_hash = snum; | 213 | sk->sk_hash = snum; |
191 | if (sk_unhashed(sk)) { | 214 | if (sk_unhashed(sk)) { |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e92ad8455c63..f9afb452249c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table | |||
4250 | .procname = "mc_forwarding", | 4250 | .procname = "mc_forwarding", |
4251 | .data = &ipv6_devconf.mc_forwarding, | 4251 | .data = &ipv6_devconf.mc_forwarding, |
4252 | .maxlen = sizeof(int), | 4252 | .maxlen = sizeof(int), |
4253 | .mode = 0644, | 4253 | .mode = 0444, |
4254 | .proc_handler = proc_dointvec, | 4254 | .proc_handler = proc_dointvec, |
4255 | }, | 4255 | }, |
4256 | #endif | 4256 | #endif |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 94f74f5b0cbf..c802bc1658a8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -797,6 +797,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
797 | unsigned int nlen; | 797 | unsigned int nlen; |
798 | int flush = 1; | 798 | int flush = 1; |
799 | int proto; | 799 | int proto; |
800 | __wsum csum; | ||
800 | 801 | ||
801 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | 802 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) |
802 | goto out; | 803 | goto out; |
@@ -808,6 +809,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
808 | 809 | ||
809 | rcu_read_lock(); | 810 | rcu_read_lock(); |
810 | proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); | 811 | proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); |
812 | iph = ipv6_hdr(skb); | ||
811 | IPV6_GRO_CB(skb)->proto = proto; | 813 | IPV6_GRO_CB(skb)->proto = proto; |
812 | ops = rcu_dereference(inet6_protos[proto]); | 814 | ops = rcu_dereference(inet6_protos[proto]); |
813 | if (!ops || !ops->gro_receive) | 815 | if (!ops || !ops->gro_receive) |
@@ -839,8 +841,13 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
839 | 841 | ||
840 | NAPI_GRO_CB(skb)->flush |= flush; | 842 | NAPI_GRO_CB(skb)->flush |= flush; |
841 | 843 | ||
844 | csum = skb->csum; | ||
845 | skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); | ||
846 | |||
842 | pp = ops->gro_receive(head, skb); | 847 | pp = ops->gro_receive(head, skb); |
843 | 848 | ||
849 | skb->csum = csum; | ||
850 | |||
844 | out_unlock: | 851 | out_unlock: |
845 | rcu_read_unlock(); | 852 | rcu_read_unlock(); |
846 | 853 | ||
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 4f433847d95f..36dff8807183 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, | |||
443 | if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6)) | 443 | if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6)) |
444 | goto relookup_failed; | 444 | goto relookup_failed; |
445 | 445 | ||
446 | if (ip6_dst_lookup(sk, &dst2, &fl)) | 446 | if (ip6_dst_lookup(sk, &dst2, &fl2)) |
447 | goto relookup_failed; | 447 | goto relookup_failed; |
448 | 448 | ||
449 | err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP); | 449 | err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP); |
450 | switch (err) { | 450 | switch (err) { |
451 | case 0: | 451 | case 0: |
452 | dst_release(dst); | 452 | dst_release(dst); |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 936f48946e20..f171e8dbac91 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb) | |||
255 | * IPv6 multicast router mode is now supported ;) | 255 | * IPv6 multicast router mode is now supported ;) |
256 | */ | 256 | */ |
257 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && | 257 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && |
258 | !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && | ||
258 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { | 259 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { |
259 | /* | 260 | /* |
260 | * Okay, we try to forward - split and duplicate | 261 | * Okay, we try to forward - split and duplicate |
@@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb) | |||
316 | } | 317 | } |
317 | 318 | ||
318 | if (skb2) { | 319 | if (skb2) { |
319 | skb2->dev = skb2->dst->dev; | ||
320 | ip6_mr_input(skb2); | 320 | ip6_mr_input(skb2); |
321 | } | 321 | } |
322 | } | 322 | } |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 3c51b2d827f4..228be551e9c1 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/pim.h> | 48 | #include <linux/pim.h> |
49 | #include <net/addrconf.h> | 49 | #include <net/addrconf.h> |
50 | #include <linux/netfilter_ipv6.h> | 50 | #include <linux/netfilter_ipv6.h> |
51 | #include <net/ip6_checksum.h> | ||
51 | 52 | ||
52 | /* Big lock, protecting vif table, mrt cache and mroute socket state. | 53 | /* Big lock, protecting vif table, mrt cache and mroute socket state. |
53 | Note that the changes are semaphored via rtnl_lock. | 54 | Note that the changes are semaphored via rtnl_lock. |
@@ -365,7 +366,9 @@ static int pim6_rcv(struct sk_buff *skb) | |||
365 | pim = (struct pimreghdr *)skb_transport_header(skb); | 366 | pim = (struct pimreghdr *)skb_transport_header(skb); |
366 | if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || | 367 | if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || |
367 | (pim->flags & PIM_NULL_REGISTER) || | 368 | (pim->flags & PIM_NULL_REGISTER) || |
368 | (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && | 369 | (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, |
370 | sizeof(*pim), IPPROTO_PIM, | ||
371 | csum_partial((void *)pim, sizeof(*pim), 0)) && | ||
369 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) | 372 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) |
370 | goto drop; | 373 | goto drop; |
371 | 374 | ||
@@ -392,7 +395,7 @@ static int pim6_rcv(struct sk_buff *skb) | |||
392 | skb_pull(skb, (u8 *)encap - skb->data); | 395 | skb_pull(skb, (u8 *)encap - skb->data); |
393 | skb_reset_network_header(skb); | 396 | skb_reset_network_header(skb); |
394 | skb->dev = reg_dev; | 397 | skb->dev = reg_dev; |
395 | skb->protocol = htons(ETH_P_IP); | 398 | skb->protocol = htons(ETH_P_IPV6); |
396 | skb->ip_summed = 0; | 399 | skb->ip_summed = 0; |
397 | skb->pkt_type = PACKET_HOST; | 400 | skb->pkt_type = PACKET_HOST; |
398 | dst_release(skb->dst); | 401 | dst_release(skb->dst); |
@@ -481,6 +484,7 @@ static int mif6_delete(struct net *net, int vifi) | |||
481 | { | 484 | { |
482 | struct mif_device *v; | 485 | struct mif_device *v; |
483 | struct net_device *dev; | 486 | struct net_device *dev; |
487 | struct inet6_dev *in6_dev; | ||
484 | if (vifi < 0 || vifi >= net->ipv6.maxvif) | 488 | if (vifi < 0 || vifi >= net->ipv6.maxvif) |
485 | return -EADDRNOTAVAIL; | 489 | return -EADDRNOTAVAIL; |
486 | 490 | ||
@@ -513,6 +517,10 @@ static int mif6_delete(struct net *net, int vifi) | |||
513 | 517 | ||
514 | dev_set_allmulti(dev, -1); | 518 | dev_set_allmulti(dev, -1); |
515 | 519 | ||
520 | in6_dev = __in6_dev_get(dev); | ||
521 | if (in6_dev) | ||
522 | in6_dev->cnf.mc_forwarding--; | ||
523 | |||
516 | if (v->flags & MIFF_REGISTER) | 524 | if (v->flags & MIFF_REGISTER) |
517 | unregister_netdevice(dev); | 525 | unregister_netdevice(dev); |
518 | 526 | ||
@@ -622,6 +630,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) | |||
622 | int vifi = vifc->mif6c_mifi; | 630 | int vifi = vifc->mif6c_mifi; |
623 | struct mif_device *v = &net->ipv6.vif6_table[vifi]; | 631 | struct mif_device *v = &net->ipv6.vif6_table[vifi]; |
624 | struct net_device *dev; | 632 | struct net_device *dev; |
633 | struct inet6_dev *in6_dev; | ||
625 | int err; | 634 | int err; |
626 | 635 | ||
627 | /* Is vif busy ? */ | 636 | /* Is vif busy ? */ |
@@ -662,6 +671,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) | |||
662 | return -EINVAL; | 671 | return -EINVAL; |
663 | } | 672 | } |
664 | 673 | ||
674 | in6_dev = __in6_dev_get(dev); | ||
675 | if (in6_dev) | ||
676 | in6_dev->cnf.mc_forwarding++; | ||
677 | |||
665 | /* | 678 | /* |
666 | * Fill in the VIF structures | 679 | * Fill in the VIF structures |
667 | */ | 680 | */ |
@@ -838,8 +851,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
838 | 851 | ||
839 | skb->dst = dst_clone(pkt->dst); | 852 | skb->dst = dst_clone(pkt->dst); |
840 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 853 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
841 | |||
842 | skb_pull(skb, sizeof(struct ipv6hdr)); | ||
843 | } | 854 | } |
844 | 855 | ||
845 | if (net->ipv6.mroute6_sk == NULL) { | 856 | if (net->ipv6.mroute6_sk == NULL) { |
@@ -1222,8 +1233,10 @@ static int ip6mr_sk_init(struct sock *sk) | |||
1222 | 1233 | ||
1223 | rtnl_lock(); | 1234 | rtnl_lock(); |
1224 | write_lock_bh(&mrt_lock); | 1235 | write_lock_bh(&mrt_lock); |
1225 | if (likely(net->ipv6.mroute6_sk == NULL)) | 1236 | if (likely(net->ipv6.mroute6_sk == NULL)) { |
1226 | net->ipv6.mroute6_sk = sk; | 1237 | net->ipv6.mroute6_sk = sk; |
1238 | net->ipv6.devconf_all->mc_forwarding++; | ||
1239 | } | ||
1227 | else | 1240 | else |
1228 | err = -EADDRINUSE; | 1241 | err = -EADDRINUSE; |
1229 | write_unlock_bh(&mrt_lock); | 1242 | write_unlock_bh(&mrt_lock); |
@@ -1242,6 +1255,7 @@ int ip6mr_sk_done(struct sock *sk) | |||
1242 | if (sk == net->ipv6.mroute6_sk) { | 1255 | if (sk == net->ipv6.mroute6_sk) { |
1243 | write_lock_bh(&mrt_lock); | 1256 | write_lock_bh(&mrt_lock); |
1244 | net->ipv6.mroute6_sk = NULL; | 1257 | net->ipv6.mroute6_sk = NULL; |
1258 | net->ipv6.devconf_all->mc_forwarding--; | ||
1245 | write_unlock_bh(&mrt_lock); | 1259 | write_unlock_bh(&mrt_lock); |
1246 | 1260 | ||
1247 | mroute_clean_tables(net); | 1261 | mroute_clean_tables(net); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c4a59824ac2c..9c574235c905 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb) | |||
794 | .proto = iph->nexthdr, | 794 | .proto = iph->nexthdr, |
795 | }; | 795 | }; |
796 | 796 | ||
797 | if (rt6_need_strict(&iph->daddr)) | 797 | if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG) |
798 | flags |= RT6_LOOKUP_F_IFACE; | 798 | flags |= RT6_LOOKUP_F_IFACE; |
799 | 799 | ||
800 | skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); | 800 | skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); |
diff --git a/net/key/af_key.c b/net/key/af_key.c index f8bd8df5e257..7dcbde3ea7d9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1285,6 +1285,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
1285 | ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]; | 1285 | ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]; |
1286 | natt->encap_dport = n_port->sadb_x_nat_t_port_port; | 1286 | natt->encap_dport = n_port->sadb_x_nat_t_port_port; |
1287 | } | 1287 | } |
1288 | memset(&natt->encap_oa, 0, sizeof(natt->encap_oa)); | ||
1288 | } | 1289 | } |
1289 | 1290 | ||
1290 | err = xfrm_init_state(x); | 1291 | err = xfrm_init_state(x); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 5ba721b6a399..2b890af01ba4 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -620,8 +620,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | |||
620 | if (use_short_slot != bss_conf->use_short_slot) { | 620 | if (use_short_slot != bss_conf->use_short_slot) { |
621 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 621 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
622 | if (net_ratelimit()) { | 622 | if (net_ratelimit()) { |
623 | printk(KERN_DEBUG "%s: switched to %s slot" | 623 | printk(KERN_DEBUG "%s: switched to %s slot time" |
624 | " (BSSID=%s)\n", | 624 | " (BSSID=%pM)\n", |
625 | sdata->dev->name, | 625 | sdata->dev->name, |
626 | use_short_slot ? "short" : "long", | 626 | use_short_slot ? "short" : "long", |
627 | ifsta->bssid); | 627 | ifsta->bssid); |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index dc2606d0ae77..e49a5b99cf10 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -195,7 +195,6 @@ struct sta_ampdu_mlme { | |||
195 | * @tx_packets: number of RX/TX MSDUs | 195 | * @tx_packets: number of RX/TX MSDUs |
196 | * @tx_bytes: number of bytes transmitted to this STA | 196 | * @tx_bytes: number of bytes transmitted to this STA |
197 | * @tx_fragments: number of transmitted MPDUs | 197 | * @tx_fragments: number of transmitted MPDUs |
198 | * @last_txrate: description of the last used transmit rate | ||
199 | * @tid_seq: per-TID sequence numbers for sending to this STA | 198 | * @tid_seq: per-TID sequence numbers for sending to this STA |
200 | * @ampdu_mlme: A-MPDU state machine state | 199 | * @ampdu_mlme: A-MPDU state machine state |
201 | * @timer_to_tid: identity mapping to ID timers | 200 | * @timer_to_tid: identity mapping to ID timers |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a4af3a124cce..4278e545638f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1307,8 +1307,10 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1307 | if (is_multicast_ether_addr(hdr->addr3)) | 1307 | if (is_multicast_ether_addr(hdr->addr3)) |
1308 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); | 1308 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); |
1309 | else | 1309 | else |
1310 | if (mesh_nexthop_lookup(skb, osdata)) | 1310 | if (mesh_nexthop_lookup(skb, osdata)) { |
1311 | return 0; | 1311 | dev_put(odev); |
1312 | return 0; | ||
1313 | } | ||
1312 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) | 1314 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) |
1313 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, | 1315 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, |
1314 | fwded_frames); | 1316 | fwded_frames); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 3dddec6d2f7e..c32a7e8e3a1b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -831,13 +831,16 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct, | |||
831 | if (!parse_nat_setup) { | 831 | if (!parse_nat_setup) { |
832 | #ifdef CONFIG_MODULES | 832 | #ifdef CONFIG_MODULES |
833 | rcu_read_unlock(); | 833 | rcu_read_unlock(); |
834 | spin_unlock_bh(&nf_conntrack_lock); | ||
834 | nfnl_unlock(); | 835 | nfnl_unlock(); |
835 | if (request_module("nf-nat-ipv4") < 0) { | 836 | if (request_module("nf-nat-ipv4") < 0) { |
836 | nfnl_lock(); | 837 | nfnl_lock(); |
838 | spin_lock_bh(&nf_conntrack_lock); | ||
837 | rcu_read_lock(); | 839 | rcu_read_lock(); |
838 | return -EOPNOTSUPP; | 840 | return -EOPNOTSUPP; |
839 | } | 841 | } |
840 | nfnl_lock(); | 842 | nfnl_lock(); |
843 | spin_lock_bh(&nf_conntrack_lock); | ||
841 | rcu_read_lock(); | 844 | rcu_read_lock(); |
842 | if (nfnetlink_parse_nat_setup_hook) | 845 | if (nfnetlink_parse_nat_setup_hook) |
843 | return -EAGAIN; | 846 | return -EAGAIN; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5f94db2f3e9e..9454d4ae46df 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #include <linux/poll.h> | 77 | #include <linux/poll.h> |
78 | #include <linux/module.h> | 78 | #include <linux/module.h> |
79 | #include <linux/init.h> | 79 | #include <linux/init.h> |
80 | #include <linux/mutex.h> | ||
80 | 81 | ||
81 | #ifdef CONFIG_INET | 82 | #ifdef CONFIG_INET |
82 | #include <net/inet_common.h> | 83 | #include <net/inet_common.h> |
@@ -175,6 +176,7 @@ struct packet_sock { | |||
175 | #endif | 176 | #endif |
176 | struct packet_type prot_hook; | 177 | struct packet_type prot_hook; |
177 | spinlock_t bind_lock; | 178 | spinlock_t bind_lock; |
179 | struct mutex pg_vec_lock; | ||
178 | unsigned int running:1, /* prot_hook is attached*/ | 180 | unsigned int running:1, /* prot_hook is attached*/ |
179 | auxdata:1, | 181 | auxdata:1, |
180 | origdev:1; | 182 | origdev:1; |
@@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol) | |||
1069 | */ | 1071 | */ |
1070 | 1072 | ||
1071 | spin_lock_init(&po->bind_lock); | 1073 | spin_lock_init(&po->bind_lock); |
1074 | mutex_init(&po->pg_vec_lock); | ||
1072 | po->prot_hook.func = packet_rcv; | 1075 | po->prot_hook.func = packet_rcv; |
1073 | 1076 | ||
1074 | if (sock->type == SOCK_PACKET) | 1077 | if (sock->type == SOCK_PACKET) |
@@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1865 | synchronize_net(); | 1868 | synchronize_net(); |
1866 | 1869 | ||
1867 | err = -EBUSY; | 1870 | err = -EBUSY; |
1871 | mutex_lock(&po->pg_vec_lock); | ||
1868 | if (closing || atomic_read(&po->mapped) == 0) { | 1872 | if (closing || atomic_read(&po->mapped) == 0) { |
1869 | err = 0; | 1873 | err = 0; |
1870 | #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) | 1874 | #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) |
@@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1886 | if (atomic_read(&po->mapped)) | 1890 | if (atomic_read(&po->mapped)) |
1887 | printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); | 1891 | printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); |
1888 | } | 1892 | } |
1893 | mutex_unlock(&po->pg_vec_lock); | ||
1889 | 1894 | ||
1890 | spin_lock(&po->bind_lock); | 1895 | spin_lock(&po->bind_lock); |
1891 | if (was_running && !po->running) { | 1896 | if (was_running && !po->running) { |
@@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st | |||
1918 | 1923 | ||
1919 | size = vma->vm_end - vma->vm_start; | 1924 | size = vma->vm_end - vma->vm_start; |
1920 | 1925 | ||
1921 | lock_sock(sk); | 1926 | mutex_lock(&po->pg_vec_lock); |
1922 | if (po->pg_vec == NULL) | 1927 | if (po->pg_vec == NULL) |
1923 | goto out; | 1928 | goto out; |
1924 | if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) | 1929 | if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) |
@@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st | |||
1941 | err = 0; | 1946 | err = 0; |
1942 | 1947 | ||
1943 | out: | 1948 | out: |
1944 | release_sock(sk); | 1949 | mutex_unlock(&po->pg_vec_lock); |
1945 | return err; | 1950 | return err; |
1946 | } | 1951 | } |
1947 | #endif | 1952 | #endif |
diff --git a/net/sctp/input.c b/net/sctp/input.c index bf612d954d41..2e4a8646dbc3 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -249,6 +249,19 @@ int sctp_rcv(struct sk_buff *skb) | |||
249 | */ | 249 | */ |
250 | sctp_bh_lock_sock(sk); | 250 | sctp_bh_lock_sock(sk); |
251 | 251 | ||
252 | if (sk != rcvr->sk) { | ||
253 | /* Our cached sk is different from the rcvr->sk. This is | ||
254 | * because migrate()/accept() may have moved the association | ||
255 | * to a new socket and released all the sockets. So now we | ||
256 | * are holding a lock on the old socket while the user may | ||
257 | * be doing something with the new socket. Switch our veiw | ||
258 | * of the current sk. | ||
259 | */ | ||
260 | sctp_bh_unlock_sock(sk); | ||
261 | sk = rcvr->sk; | ||
262 | sctp_bh_lock_sock(sk); | ||
263 | } | ||
264 | |||
252 | if (sock_owned_by_user(sk)) { | 265 | if (sock_owned_by_user(sk)) { |
253 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); | 266 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); |
254 | sctp_add_backlog(sk, skb); | 267 | sctp_add_backlog(sk, skb); |
diff --git a/net/sctp/output.c b/net/sctp/output.c index c3f417f7ec6e..73639355157e 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -324,14 +324,16 @@ append: | |||
324 | switch (chunk->chunk_hdr->type) { | 324 | switch (chunk->chunk_hdr->type) { |
325 | case SCTP_CID_DATA: | 325 | case SCTP_CID_DATA: |
326 | retval = sctp_packet_append_data(packet, chunk); | 326 | retval = sctp_packet_append_data(packet, chunk); |
327 | if (SCTP_XMIT_OK != retval) | ||
328 | goto finish; | ||
327 | /* Disallow SACK bundling after DATA. */ | 329 | /* Disallow SACK bundling after DATA. */ |
328 | packet->has_sack = 1; | 330 | packet->has_sack = 1; |
329 | /* Disallow AUTH bundling after DATA */ | 331 | /* Disallow AUTH bundling after DATA */ |
330 | packet->has_auth = 1; | 332 | packet->has_auth = 1; |
331 | /* Let it be knows that packet has DATA in it */ | 333 | /* Let it be knows that packet has DATA in it */ |
332 | packet->has_data = 1; | 334 | packet->has_data = 1; |
333 | if (SCTP_XMIT_OK != retval) | 335 | /* timestamp the chunk for rtx purposes */ |
334 | goto finish; | 336 | chunk->sent_at = jiffies; |
335 | break; | 337 | break; |
336 | case SCTP_CID_COOKIE_ECHO: | 338 | case SCTP_CID_COOKIE_ECHO: |
337 | packet->has_cookie_echo = 1; | 339 | packet->has_cookie_echo = 1; |
@@ -470,7 +472,6 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
470 | } else | 472 | } else |
471 | chunk->resent = 1; | 473 | chunk->resent = 1; |
472 | 474 | ||
473 | chunk->sent_at = jiffies; | ||
474 | has_data = 1; | 475 | has_data = 1; |
475 | } | 476 | } |
476 | 477 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 247ebc95c1e5..bc411c896216 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -929,7 +929,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
929 | } | 929 | } |
930 | 930 | ||
931 | /* Finally, transmit new packets. */ | 931 | /* Finally, transmit new packets. */ |
932 | start_timer = 0; | ||
933 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { | 932 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { |
934 | /* RFC 2960 6.5 Every DATA chunk MUST carry a valid | 933 | /* RFC 2960 6.5 Every DATA chunk MUST carry a valid |
935 | * stream identifier. | 934 | * stream identifier. |
@@ -1028,7 +1027,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
1028 | list_add_tail(&chunk->transmitted_list, | 1027 | list_add_tail(&chunk->transmitted_list, |
1029 | &transport->transmitted); | 1028 | &transport->transmitted); |
1030 | 1029 | ||
1031 | sctp_transport_reset_timers(transport, start_timer-1); | 1030 | sctp_transport_reset_timers(transport, 0); |
1032 | 1031 | ||
1033 | q->empty = 0; | 1032 | q->empty = 0; |
1034 | 1033 | ||
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig new file mode 100644 index 000000000000..dcef600d0bf5 --- /dev/null +++ b/net/sunrpc/Kconfig | |||
@@ -0,0 +1,78 @@ | |||
1 | config SUNRPC | ||
2 | tristate | ||
3 | |||
4 | config SUNRPC_GSS | ||
5 | tristate | ||
6 | |||
7 | config SUNRPC_XPRT_RDMA | ||
8 | tristate | ||
9 | depends on SUNRPC && INFINIBAND && EXPERIMENTAL | ||
10 | default SUNRPC && INFINIBAND | ||
11 | help | ||
12 | This option allows the NFS client and server to support | ||
13 | an RDMA-enabled transport. | ||
14 | |||
15 | To compile RPC client RDMA transport support as a module, | ||
16 | choose M here: the module will be called xprtrdma. | ||
17 | |||
18 | If unsure, say N. | ||
19 | |||
20 | config SUNRPC_REGISTER_V4 | ||
21 | bool "Register local RPC services via rpcbind v4 (EXPERIMENTAL)" | ||
22 | depends on SUNRPC && EXPERIMENTAL | ||
23 | default n | ||
24 | help | ||
25 | Sun added support for registering RPC services at an IPv6 | ||
26 | address by creating two new versions of the rpcbind protocol | ||
27 | (RFC 1833). | ||
28 | |||
29 | This option enables support in the kernel RPC server for | ||
30 | registering kernel RPC services via version 4 of the rpcbind | ||
31 | protocol. If you enable this option, you must run a portmapper | ||
32 | daemon that supports rpcbind protocol version 4. | ||
33 | |||
34 | Serving NFS over IPv6 from knfsd (the kernel's NFS server) | ||
35 | requires that you enable this option and use a portmapper that | ||
36 | supports rpcbind version 4. | ||
37 | |||
38 | If unsure, say N to get traditional behavior (register kernel | ||
39 | RPC services using only rpcbind version 2). Distributions | ||
40 | using the legacy Linux portmapper daemon must say N here. | ||
41 | |||
42 | config RPCSEC_GSS_KRB5 | ||
43 | tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" | ||
44 | depends on SUNRPC && EXPERIMENTAL | ||
45 | select SUNRPC_GSS | ||
46 | select CRYPTO | ||
47 | select CRYPTO_MD5 | ||
48 | select CRYPTO_DES | ||
49 | select CRYPTO_CBC | ||
50 | help | ||
51 | Choose Y here to enable Secure RPC using the Kerberos version 5 | ||
52 | GSS-API mechanism (RFC 1964). | ||
53 | |||
54 | Secure RPC calls with Kerberos require an auxiliary user-space | ||
55 | daemon which may be found in the Linux nfs-utils package | ||
56 | available from http://linux-nfs.org/. In addition, user-space | ||
57 | Kerberos support should be installed. | ||
58 | |||
59 | If unsure, say N. | ||
60 | |||
61 | config RPCSEC_GSS_SPKM3 | ||
62 | tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" | ||
63 | depends on SUNRPC && EXPERIMENTAL | ||
64 | select SUNRPC_GSS | ||
65 | select CRYPTO | ||
66 | select CRYPTO_MD5 | ||
67 | select CRYPTO_DES | ||
68 | select CRYPTO_CAST5 | ||
69 | select CRYPTO_CBC | ||
70 | help | ||
71 | Choose Y here to enable Secure RPC using the SPKM3 public key | ||
72 | GSS-API mechansim (RFC 2025). | ||
73 | |||
74 | Secure RPC calls with SPKM3 require an auxiliary userspace | ||
75 | daemon which may be found in the Linux nfs-utils package | ||
76 | available from http://linux-nfs.org/. | ||
77 | |||
78 | If unsure, say N. | ||
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c index 87cf4430079c..94d216a46407 100644 --- a/net/wimax/debugfs.c +++ b/net/wimax/debugfs.c | |||
@@ -28,17 +28,6 @@ | |||
28 | #include "debug-levels.h" | 28 | #include "debug-levels.h" |
29 | 29 | ||
30 | 30 | ||
31 | /* Debug framework control of debug levels */ | ||
32 | struct d_level D_LEVEL[] = { | ||
33 | D_SUBMODULE_DEFINE(debugfs), | ||
34 | D_SUBMODULE_DEFINE(id_table), | ||
35 | D_SUBMODULE_DEFINE(op_msg), | ||
36 | D_SUBMODULE_DEFINE(op_reset), | ||
37 | D_SUBMODULE_DEFINE(op_rfkill), | ||
38 | D_SUBMODULE_DEFINE(stack), | ||
39 | }; | ||
40 | size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); | ||
41 | |||
42 | #define __debugfs_register(prefix, name, parent) \ | 31 | #define __debugfs_register(prefix, name, parent) \ |
43 | do { \ | 32 | do { \ |
44 | result = d_level_register_debugfs(prefix, name, parent); \ | 33 | result = d_level_register_debugfs(prefix, name, parent); \ |
diff --git a/net/wimax/stack.c b/net/wimax/stack.c index d4da92f8981a..3869c0327882 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c | |||
@@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev) | |||
516 | } | 516 | } |
517 | EXPORT_SYMBOL_GPL(wimax_dev_rm); | 517 | EXPORT_SYMBOL_GPL(wimax_dev_rm); |
518 | 518 | ||
519 | |||
520 | /* Debug framework control of debug levels */ | ||
521 | struct d_level D_LEVEL[] = { | ||
522 | D_SUBMODULE_DEFINE(debugfs), | ||
523 | D_SUBMODULE_DEFINE(id_table), | ||
524 | D_SUBMODULE_DEFINE(op_msg), | ||
525 | D_SUBMODULE_DEFINE(op_reset), | ||
526 | D_SUBMODULE_DEFINE(op_rfkill), | ||
527 | D_SUBMODULE_DEFINE(stack), | ||
528 | }; | ||
529 | size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); | ||
530 | |||
531 | |||
519 | struct genl_family wimax_gnl_family = { | 532 | struct genl_family wimax_gnl_family = { |
520 | .id = GENL_ID_GENERATE, | 533 | .id = GENL_ID_GENERATE, |
521 | .name = "WiMAX", | 534 | .name = "WiMAX", |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 4f877535e666..85c9034c59b2 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -421,6 +421,31 @@ static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range, | |||
421 | return 0; | 421 | return 0; |
422 | } | 422 | } |
423 | 423 | ||
424 | /** | ||
425 | * freq_in_rule_band - tells us if a frequency is in a frequency band | ||
426 | * @freq_range: frequency rule we want to query | ||
427 | * @freq_khz: frequency we are inquiring about | ||
428 | * | ||
429 | * This lets us know if a specific frequency rule is or is not relevant to | ||
430 | * a specific frequency's band. Bands are device specific and artificial | ||
431 | * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is | ||
432 | * safe for now to assume that a frequency rule should not be part of a | ||
433 | * frequency's band if the start freq or end freq are off by more than 2 GHz. | ||
434 | * This resolution can be lowered and should be considered as we add | ||
435 | * regulatory rule support for other "bands". | ||
436 | **/ | ||
437 | static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, | ||
438 | u32 freq_khz) | ||
439 | { | ||
440 | #define ONE_GHZ_IN_KHZ 1000000 | ||
441 | if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) | ||
442 | return true; | ||
443 | if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) | ||
444 | return true; | ||
445 | return false; | ||
446 | #undef ONE_GHZ_IN_KHZ | ||
447 | } | ||
448 | |||
424 | /* Converts a country IE to a regulatory domain. A regulatory domain | 449 | /* Converts a country IE to a regulatory domain. A regulatory domain |
425 | * structure has a lot of information which the IE doesn't yet have, | 450 | * structure has a lot of information which the IE doesn't yet have, |
426 | * so for the other values we use upper max values as we will intersect | 451 | * so for the other values we use upper max values as we will intersect |
@@ -473,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd( | |||
473 | * calculate the number of reg rules we will need. We will need one | 498 | * calculate the number of reg rules we will need. We will need one |
474 | * for each channel subband */ | 499 | * for each channel subband */ |
475 | while (country_ie_len >= 3) { | 500 | while (country_ie_len >= 3) { |
501 | int end_channel = 0; | ||
476 | struct ieee80211_country_ie_triplet *triplet = | 502 | struct ieee80211_country_ie_triplet *triplet = |
477 | (struct ieee80211_country_ie_triplet *) country_ie; | 503 | (struct ieee80211_country_ie_triplet *) country_ie; |
478 | int cur_sub_max_channel = 0, cur_channel = 0; | 504 | int cur_sub_max_channel = 0, cur_channel = 0; |
@@ -484,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd( | |||
484 | continue; | 510 | continue; |
485 | } | 511 | } |
486 | 512 | ||
513 | /* 2 GHz */ | ||
514 | if (triplet->chans.first_channel <= 14) | ||
515 | end_channel = triplet->chans.first_channel + | ||
516 | triplet->chans.num_channels; | ||
517 | else | ||
518 | /* | ||
519 | * 5 GHz -- For example in country IEs if the first | ||
520 | * channel given is 36 and the number of channels is 4 | ||
521 | * then the individual channel numbers defined for the | ||
522 | * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 | ||
523 | * and not 36, 37, 38, 39. | ||
524 | * | ||
525 | * See: http://tinyurl.com/11d-clarification | ||
526 | */ | ||
527 | end_channel = triplet->chans.first_channel + | ||
528 | (4 * (triplet->chans.num_channels - 1)); | ||
529 | |||
487 | cur_channel = triplet->chans.first_channel; | 530 | cur_channel = triplet->chans.first_channel; |
488 | cur_sub_max_channel = ieee80211_channel_to_frequency( | 531 | cur_sub_max_channel = end_channel; |
489 | cur_channel + triplet->chans.num_channels); | ||
490 | 532 | ||
491 | /* Basic sanity check */ | 533 | /* Basic sanity check */ |
492 | if (cur_sub_max_channel < cur_channel) | 534 | if (cur_sub_max_channel < cur_channel) |
@@ -538,6 +580,7 @@ static struct ieee80211_regdomain *country_ie_2_rd( | |||
538 | 580 | ||
539 | /* This time around we fill in the rd */ | 581 | /* This time around we fill in the rd */ |
540 | while (country_ie_len >= 3) { | 582 | while (country_ie_len >= 3) { |
583 | int end_channel = 0; | ||
541 | struct ieee80211_country_ie_triplet *triplet = | 584 | struct ieee80211_country_ie_triplet *triplet = |
542 | (struct ieee80211_country_ie_triplet *) country_ie; | 585 | (struct ieee80211_country_ie_triplet *) country_ie; |
543 | struct ieee80211_reg_rule *reg_rule = NULL; | 586 | struct ieee80211_reg_rule *reg_rule = NULL; |
@@ -559,6 +602,14 @@ static struct ieee80211_regdomain *country_ie_2_rd( | |||
559 | 602 | ||
560 | reg_rule->flags = flags; | 603 | reg_rule->flags = flags; |
561 | 604 | ||
605 | /* 2 GHz */ | ||
606 | if (triplet->chans.first_channel <= 14) | ||
607 | end_channel = triplet->chans.first_channel + | ||
608 | triplet->chans.num_channels; | ||
609 | else | ||
610 | end_channel = triplet->chans.first_channel + | ||
611 | (4 * (triplet->chans.num_channels - 1)); | ||
612 | |||
562 | /* The +10 is since the regulatory domain expects | 613 | /* The +10 is since the regulatory domain expects |
563 | * the actual band edge, not the center of freq for | 614 | * the actual band edge, not the center of freq for |
564 | * its start and end freqs, assuming 20 MHz bandwidth on | 615 | * its start and end freqs, assuming 20 MHz bandwidth on |
@@ -568,8 +619,7 @@ static struct ieee80211_regdomain *country_ie_2_rd( | |||
568 | triplet->chans.first_channel) - 10); | 619 | triplet->chans.first_channel) - 10); |
569 | freq_range->end_freq_khz = | 620 | freq_range->end_freq_khz = |
570 | MHZ_TO_KHZ(ieee80211_channel_to_frequency( | 621 | MHZ_TO_KHZ(ieee80211_channel_to_frequency( |
571 | triplet->chans.first_channel + | 622 | end_channel) + 10); |
572 | triplet->chans.num_channels) + 10); | ||
573 | 623 | ||
574 | /* Large arbitrary values, we intersect later */ | 624 | /* Large arbitrary values, we intersect later */ |
575 | /* Increment this if we ever support >= 40 MHz channels | 625 | /* Increment this if we ever support >= 40 MHz channels |
@@ -748,12 +798,23 @@ static u32 map_regdom_flags(u32 rd_flags) | |||
748 | * this value to the maximum allowed bandwidth. | 798 | * this value to the maximum allowed bandwidth. |
749 | * @reg_rule: the regulatory rule which we have for this frequency | 799 | * @reg_rule: the regulatory rule which we have for this frequency |
750 | * | 800 | * |
751 | * Use this function to get the regulatory rule for a specific frequency. | 801 | * Use this function to get the regulatory rule for a specific frequency on |
802 | * a given wireless device. If the device has a specific regulatory domain | ||
803 | * it wants to follow we respect that unless a country IE has been received | ||
804 | * and processed already. | ||
805 | * | ||
806 | * Returns 0 if it was able to find a valid regulatory rule which does | ||
807 | * apply to the given center_freq otherwise it returns non-zero. It will | ||
808 | * also return -ERANGE if we determine the given center_freq does not even have | ||
809 | * a regulatory rule for a frequency range in the center_freq's band. See | ||
810 | * freq_in_rule_band() for our current definition of a band -- this is purely | ||
811 | * subjective and right now its 802.11 specific. | ||
752 | */ | 812 | */ |
753 | static int freq_reg_info(u32 center_freq, u32 *bandwidth, | 813 | static int freq_reg_info(u32 center_freq, u32 *bandwidth, |
754 | const struct ieee80211_reg_rule **reg_rule) | 814 | const struct ieee80211_reg_rule **reg_rule) |
755 | { | 815 | { |
756 | int i; | 816 | int i; |
817 | bool band_rule_found = false; | ||
757 | u32 max_bandwidth = 0; | 818 | u32 max_bandwidth = 0; |
758 | 819 | ||
759 | if (!cfg80211_regdomain) | 820 | if (!cfg80211_regdomain) |
@@ -767,7 +828,15 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth, | |||
767 | rr = &cfg80211_regdomain->reg_rules[i]; | 828 | rr = &cfg80211_regdomain->reg_rules[i]; |
768 | fr = &rr->freq_range; | 829 | fr = &rr->freq_range; |
769 | pr = &rr->power_rule; | 830 | pr = &rr->power_rule; |
831 | |||
832 | /* We only need to know if one frequency rule was | ||
833 | * was in center_freq's band, that's enough, so lets | ||
834 | * not overwrite it once found */ | ||
835 | if (!band_rule_found) | ||
836 | band_rule_found = freq_in_rule_band(fr, center_freq); | ||
837 | |||
770 | max_bandwidth = freq_max_bandwidth(fr, center_freq); | 838 | max_bandwidth = freq_max_bandwidth(fr, center_freq); |
839 | |||
771 | if (max_bandwidth && *bandwidth <= max_bandwidth) { | 840 | if (max_bandwidth && *bandwidth <= max_bandwidth) { |
772 | *reg_rule = rr; | 841 | *reg_rule = rr; |
773 | *bandwidth = max_bandwidth; | 842 | *bandwidth = max_bandwidth; |
@@ -775,23 +844,64 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth, | |||
775 | } | 844 | } |
776 | } | 845 | } |
777 | 846 | ||
847 | if (!band_rule_found) | ||
848 | return -ERANGE; | ||
849 | |||
778 | return !max_bandwidth; | 850 | return !max_bandwidth; |
779 | } | 851 | } |
780 | 852 | ||
781 | static void handle_channel(struct ieee80211_channel *chan) | 853 | static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, |
854 | unsigned int chan_idx) | ||
782 | { | 855 | { |
783 | int r; | 856 | int r; |
784 | u32 flags = chan->orig_flags; | 857 | u32 flags; |
785 | u32 max_bandwidth = 0; | 858 | u32 max_bandwidth = 0; |
786 | const struct ieee80211_reg_rule *reg_rule = NULL; | 859 | const struct ieee80211_reg_rule *reg_rule = NULL; |
787 | const struct ieee80211_power_rule *power_rule = NULL; | 860 | const struct ieee80211_power_rule *power_rule = NULL; |
861 | struct ieee80211_supported_band *sband; | ||
862 | struct ieee80211_channel *chan; | ||
863 | |||
864 | sband = wiphy->bands[band]; | ||
865 | BUG_ON(chan_idx >= sband->n_channels); | ||
866 | chan = &sband->channels[chan_idx]; | ||
867 | |||
868 | flags = chan->orig_flags; | ||
788 | 869 | ||
789 | r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), | 870 | r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), |
790 | &max_bandwidth, ®_rule); | 871 | &max_bandwidth, ®_rule); |
791 | 872 | ||
792 | if (r) { | 873 | if (r) { |
793 | flags |= IEEE80211_CHAN_DISABLED; | 874 | /* This means no regulatory rule was found in the country IE |
794 | chan->flags = flags; | 875 | * with a frequency range on the center_freq's band, since |
876 | * IEEE-802.11 allows for a country IE to have a subset of the | ||
877 | * regulatory information provided in a country we ignore | ||
878 | * disabling the channel unless at least one reg rule was | ||
879 | * found on the center_freq's band. For details see this | ||
880 | * clarification: | ||
881 | * | ||
882 | * http://tinyurl.com/11d-clarification | ||
883 | */ | ||
884 | if (r == -ERANGE && | ||
885 | last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) { | ||
886 | #ifdef CONFIG_CFG80211_REG_DEBUG | ||
887 | printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz " | ||
888 | "intact on %s - no rule found in band on " | ||
889 | "Country IE\n", | ||
890 | chan->center_freq, wiphy_name(wiphy)); | ||
891 | #endif | ||
892 | } else { | ||
893 | /* In this case we know the country IE has at least one reg rule | ||
894 | * for the band so we respect its band definitions */ | ||
895 | #ifdef CONFIG_CFG80211_REG_DEBUG | ||
896 | if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) | ||
897 | printk(KERN_DEBUG "cfg80211: Disabling " | ||
898 | "channel %d MHz on %s due to " | ||
899 | "Country IE\n", | ||
900 | chan->center_freq, wiphy_name(wiphy)); | ||
901 | #endif | ||
902 | flags |= IEEE80211_CHAN_DISABLED; | ||
903 | chan->flags = flags; | ||
904 | } | ||
795 | return; | 905 | return; |
796 | } | 906 | } |
797 | 907 | ||
@@ -808,12 +918,16 @@ static void handle_channel(struct ieee80211_channel *chan) | |||
808 | chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); | 918 | chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); |
809 | } | 919 | } |
810 | 920 | ||
811 | static void handle_band(struct ieee80211_supported_band *sband) | 921 | static void handle_band(struct wiphy *wiphy, enum ieee80211_band band) |
812 | { | 922 | { |
813 | int i; | 923 | unsigned int i; |
924 | struct ieee80211_supported_band *sband; | ||
925 | |||
926 | BUG_ON(!wiphy->bands[band]); | ||
927 | sband = wiphy->bands[band]; | ||
814 | 928 | ||
815 | for (i = 0; i < sband->n_channels; i++) | 929 | for (i = 0; i < sband->n_channels; i++) |
816 | handle_channel(&sband->channels[i]); | 930 | handle_channel(wiphy, band, i); |
817 | } | 931 | } |
818 | 932 | ||
819 | static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby) | 933 | static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby) |
@@ -840,7 +954,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby) | |||
840 | enum ieee80211_band band; | 954 | enum ieee80211_band band; |
841 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 955 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
842 | if (wiphy->bands[band]) | 956 | if (wiphy->bands[band]) |
843 | handle_band(wiphy->bands[band]); | 957 | handle_band(wiphy, band); |
844 | if (wiphy->reg_notifier) | 958 | if (wiphy->reg_notifier) |
845 | wiphy->reg_notifier(wiphy, setby); | 959 | wiphy->reg_notifier(wiphy, setby); |
846 | } | 960 | } |
@@ -1170,7 +1284,7 @@ static void reg_country_ie_process_debug( | |||
1170 | if (intersected_rd) { | 1284 | if (intersected_rd) { |
1171 | printk(KERN_DEBUG "cfg80211: We intersect both of these " | 1285 | printk(KERN_DEBUG "cfg80211: We intersect both of these " |
1172 | "and get:\n"); | 1286 | "and get:\n"); |
1173 | print_regdomain_info(rd); | 1287 | print_regdomain_info(intersected_rd); |
1174 | return; | 1288 | return; |
1175 | } | 1289 | } |
1176 | printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); | 1290 | printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7877e7975dae..b95a2d64eb59 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1914,17 +1914,10 @@ static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, | |||
1914 | } | 1914 | } |
1915 | #endif | 1915 | #endif |
1916 | 1916 | ||
1917 | /* For the xfrm_usersa_info cases we have to work around some 32-bit vs. | ||
1918 | * 64-bit compatability issues. On 32-bit the structure is 220 bytes, but | ||
1919 | * for 64-bit it gets padded out to 224 bytes. Those bytes are just | ||
1920 | * padding and don't have any content we care about. Therefore as long | ||
1921 | * as we have enough bytes for the content we can make both cases work. | ||
1922 | */ | ||
1923 | |||
1924 | #define XMSGSIZE(type) sizeof(struct type) | 1917 | #define XMSGSIZE(type) sizeof(struct type) |
1925 | 1918 | ||
1926 | static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { | 1919 | static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { |
1927 | [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = 220, /* see above */ | 1920 | [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), |
1928 | [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), | 1921 | [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), |
1929 | [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), | 1922 | [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), |
1930 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), | 1923 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), |
@@ -1934,7 +1927,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { | |||
1934 | [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), | 1927 | [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), |
1935 | [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), | 1928 | [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), |
1936 | [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), | 1929 | [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), |
1937 | [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = 220, /* see above */ | 1930 | [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), |
1938 | [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), | 1931 | [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), |
1939 | [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), | 1932 | [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), |
1940 | [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, | 1933 | [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, |