diff options
author | Takashi Iwai <tiwai@suse.de> | 2009-02-09 11:19:21 -0500 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2009-02-09 11:19:21 -0500 |
commit | 2a074f4a54d11ec0ce16b35e9a88feaa2fd886aa (patch) | |
tree | b81ab05dac66fa0cccf44896d2d86be7ba52aa49 /net | |
parent | 23c7b521c250b261dd97a7a06d5a2e74b56233d5 (diff) | |
parent | 8bd4bb7a35e8ebb015a531218614c48e10a3c4ee (diff) |
Merge branch 'topic/quirk-cleanup' into topic/hda
Diffstat (limited to 'net')
39 files changed, 478 insertions, 199 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 6c1323940263..e9db889d6222 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -62,13 +62,13 @@ struct net_device *vlan_dev_real_dev(const struct net_device *dev) | |||
62 | { | 62 | { |
63 | return vlan_dev_info(dev)->real_dev; | 63 | return vlan_dev_info(dev)->real_dev; |
64 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(vlan_dev_real_dev); | 65 | EXPORT_SYMBOL(vlan_dev_real_dev); |
66 | 66 | ||
67 | u16 vlan_dev_vlan_id(const struct net_device *dev) | 67 | u16 vlan_dev_vlan_id(const struct net_device *dev) |
68 | { | 68 | { |
69 | return vlan_dev_info(dev)->vlan_id; | 69 | return vlan_dev_info(dev)->vlan_id; |
70 | } | 70 | } |
71 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); | 71 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
72 | 72 | ||
73 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | 73 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, |
74 | unsigned int vlan_tci, struct sk_buff *skb) | 74 | unsigned int vlan_tci, struct sk_buff *skb) |
diff --git a/net/9p/Kconfig b/net/9p/Kconfig index 0663f99e977a..7ed75c7bd5d1 100644 --- a/net/9p/Kconfig +++ b/net/9p/Kconfig | |||
@@ -23,7 +23,7 @@ config NET_9P_VIRTIO | |||
23 | guest partitions and a host partition. | 23 | guest partitions and a host partition. |
24 | 24 | ||
25 | config NET_9P_RDMA | 25 | config NET_9P_RDMA |
26 | depends on INET && INFINIBAND && EXPERIMENTAL | 26 | depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL |
27 | tristate "9P RDMA Transport (Experimental)" | 27 | tristate "9P RDMA Transport (Experimental)" |
28 | help | 28 | help |
29 | This builds support for an RDMA transport. | 29 | This builds support for an RDMA transport. |
diff --git a/net/9p/client.c b/net/9p/client.c index 821f1ec0b2c3..1eb580c38fbb 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -618,7 +618,7 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt) | |||
618 | return ERR_PTR(-ENOMEM); | 618 | return ERR_PTR(-ENOMEM); |
619 | 619 | ||
620 | ret = p9_idpool_get(clnt->fidpool); | 620 | ret = p9_idpool_get(clnt->fidpool); |
621 | if (fid->fid < 0) { | 621 | if (ret < 0) { |
622 | ret = -ENOSPC; | 622 | ret = -ENOSPC; |
623 | goto error; | 623 | goto error; |
624 | } | 624 | } |
diff --git a/net/Kconfig b/net/Kconfig index bf2776018f71..cdb8fdef6c4a 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -24,14 +24,6 @@ if NET | |||
24 | 24 | ||
25 | menu "Networking options" | 25 | menu "Networking options" |
26 | 26 | ||
27 | config NET_NS | ||
28 | bool "Network namespace support" | ||
29 | default n | ||
30 | depends on EXPERIMENTAL && NAMESPACES | ||
31 | help | ||
32 | Allow user space to create what appear to be multiple instances | ||
33 | of the network stack. | ||
34 | |||
35 | config COMPAT_NET_DEV_OPS | 27 | config COMPAT_NET_DEV_OPS |
36 | def_bool y | 28 | def_bool y |
37 | 29 | ||
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index a65e43a17fbb..cf754ace0b75 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -58,11 +58,11 @@ static struct ctl_table_header *brnf_sysctl_header; | |||
58 | static int brnf_call_iptables __read_mostly = 1; | 58 | static int brnf_call_iptables __read_mostly = 1; |
59 | static int brnf_call_ip6tables __read_mostly = 1; | 59 | static int brnf_call_ip6tables __read_mostly = 1; |
60 | static int brnf_call_arptables __read_mostly = 1; | 60 | static int brnf_call_arptables __read_mostly = 1; |
61 | static int brnf_filter_vlan_tagged __read_mostly = 1; | 61 | static int brnf_filter_vlan_tagged __read_mostly = 0; |
62 | static int brnf_filter_pppoe_tagged __read_mostly = 1; | 62 | static int brnf_filter_pppoe_tagged __read_mostly = 0; |
63 | #else | 63 | #else |
64 | #define brnf_filter_vlan_tagged 1 | 64 | #define brnf_filter_vlan_tagged 0 |
65 | #define brnf_filter_pppoe_tagged 1 | 65 | #define brnf_filter_pppoe_tagged 0 |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | static inline __be16 vlan_proto(const struct sk_buff *skb) | 68 | static inline __be16 vlan_proto(const struct sk_buff *skb) |
@@ -686,8 +686,11 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, | |||
686 | if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || | 686 | if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || |
687 | IS_PPPOE_IP(skb)) | 687 | IS_PPPOE_IP(skb)) |
688 | pf = PF_INET; | 688 | pf = PF_INET; |
689 | else | 689 | else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || |
690 | IS_PPPOE_IPV6(skb)) | ||
690 | pf = PF_INET6; | 691 | pf = PF_INET6; |
692 | else | ||
693 | return NF_ACCEPT; | ||
691 | 694 | ||
692 | nf_bridge_pull_encap_header(skb); | 695 | nf_bridge_pull_encap_header(skb); |
693 | 696 | ||
@@ -828,8 +831,11 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, | |||
828 | if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || | 831 | if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || |
829 | IS_PPPOE_IP(skb)) | 832 | IS_PPPOE_IP(skb)) |
830 | pf = PF_INET; | 833 | pf = PF_INET; |
831 | else | 834 | else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || |
835 | IS_PPPOE_IPV6(skb)) | ||
832 | pf = PF_INET6; | 836 | pf = PF_INET6; |
837 | else | ||
838 | return NF_ACCEPT; | ||
833 | 839 | ||
834 | #ifdef CONFIG_NETFILTER_DEBUG | 840 | #ifdef CONFIG_NETFILTER_DEBUG |
835 | if (skb->dst == NULL) { | 841 | if (skb->dst == NULL) { |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 8a8743d7d6e7..820252aee81f 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -79,7 +79,7 @@ static inline int ebt_do_match (struct ebt_entry_match *m, | |||
79 | { | 79 | { |
80 | par->match = m->u.match; | 80 | par->match = m->u.match; |
81 | par->matchinfo = m->data; | 81 | par->matchinfo = m->data; |
82 | return m->u.match->match(skb, par); | 82 | return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline int ebt_dev_check(char *entry, const struct net_device *device) | 85 | static inline int ebt_dev_check(char *entry, const struct net_device *device) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 1649c8ab2c2f..b7c7d4651136 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -347,51 +347,54 @@ static void bcm_tx_timeout_tsklet(unsigned long data) | |||
347 | struct bcm_op *op = (struct bcm_op *)data; | 347 | struct bcm_op *op = (struct bcm_op *)data; |
348 | struct bcm_msg_head msg_head; | 348 | struct bcm_msg_head msg_head; |
349 | 349 | ||
350 | /* create notification to user */ | ||
351 | msg_head.opcode = TX_EXPIRED; | ||
352 | msg_head.flags = op->flags; | ||
353 | msg_head.count = op->count; | ||
354 | msg_head.ival1 = op->ival1; | ||
355 | msg_head.ival2 = op->ival2; | ||
356 | msg_head.can_id = op->can_id; | ||
357 | msg_head.nframes = 0; | ||
358 | |||
359 | bcm_send_to_user(op, &msg_head, NULL, 0); | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions | ||
364 | */ | ||
365 | static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) | ||
366 | { | ||
367 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | ||
368 | enum hrtimer_restart ret = HRTIMER_NORESTART; | ||
369 | |||
370 | if (op->kt_ival1.tv64 && (op->count > 0)) { | 350 | if (op->kt_ival1.tv64 && (op->count > 0)) { |
371 | 351 | ||
372 | op->count--; | 352 | op->count--; |
373 | if (!op->count && (op->flags & TX_COUNTEVT)) | 353 | if (!op->count && (op->flags & TX_COUNTEVT)) { |
374 | tasklet_schedule(&op->tsklet); | 354 | |
355 | /* create notification to user */ | ||
356 | msg_head.opcode = TX_EXPIRED; | ||
357 | msg_head.flags = op->flags; | ||
358 | msg_head.count = op->count; | ||
359 | msg_head.ival1 = op->ival1; | ||
360 | msg_head.ival2 = op->ival2; | ||
361 | msg_head.can_id = op->can_id; | ||
362 | msg_head.nframes = 0; | ||
363 | |||
364 | bcm_send_to_user(op, &msg_head, NULL, 0); | ||
365 | } | ||
375 | } | 366 | } |
376 | 367 | ||
377 | if (op->kt_ival1.tv64 && (op->count > 0)) { | 368 | if (op->kt_ival1.tv64 && (op->count > 0)) { |
378 | 369 | ||
379 | /* send (next) frame */ | 370 | /* send (next) frame */ |
380 | bcm_can_tx(op); | 371 | bcm_can_tx(op); |
381 | hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1); | 372 | hrtimer_start(&op->timer, |
382 | ret = HRTIMER_RESTART; | 373 | ktime_add(ktime_get(), op->kt_ival1), |
374 | HRTIMER_MODE_ABS); | ||
383 | 375 | ||
384 | } else { | 376 | } else { |
385 | if (op->kt_ival2.tv64) { | 377 | if (op->kt_ival2.tv64) { |
386 | 378 | ||
387 | /* send (next) frame */ | 379 | /* send (next) frame */ |
388 | bcm_can_tx(op); | 380 | bcm_can_tx(op); |
389 | hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); | 381 | hrtimer_start(&op->timer, |
390 | ret = HRTIMER_RESTART; | 382 | ktime_add(ktime_get(), op->kt_ival2), |
383 | HRTIMER_MODE_ABS); | ||
391 | } | 384 | } |
392 | } | 385 | } |
386 | } | ||
393 | 387 | ||
394 | return ret; | 388 | /* |
389 | * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions | ||
390 | */ | ||
391 | static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) | ||
392 | { | ||
393 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | ||
394 | |||
395 | tasklet_schedule(&op->tsklet); | ||
396 | |||
397 | return HRTIMER_NORESTART; | ||
395 | } | 398 | } |
396 | 399 | ||
397 | /* | 400 | /* |
diff --git a/net/core/dev.c b/net/core/dev.c index b715a55cccc4..5379b0c1190a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1534,7 +1534,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1534 | skb->mac_len = skb->network_header - skb->mac_header; | 1534 | skb->mac_len = skb->network_header - skb->mac_header; |
1535 | __skb_pull(skb, skb->mac_len); | 1535 | __skb_pull(skb, skb->mac_len); |
1536 | 1536 | ||
1537 | if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) { | 1537 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
1538 | struct net_device *dev = skb->dev; | ||
1539 | struct ethtool_drvinfo info = {}; | ||
1540 | |||
1541 | if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) | ||
1542 | dev->ethtool_ops->get_drvinfo(dev, &info); | ||
1543 | |||
1544 | WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d " | ||
1545 | "ip_summed=%d", | ||
1546 | info.driver, dev ? dev->features : 0L, | ||
1547 | skb->sk ? skb->sk->sk_route_caps : 0L, | ||
1548 | skb->len, skb->data_len, skb->ip_summed); | ||
1549 | |||
1538 | if (skb_header_cloned(skb) && | 1550 | if (skb_header_cloned(skb) && |
1539 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | 1551 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) |
1540 | return ERR_PTR(err); | 1552 | return ERR_PTR(err); |
@@ -2392,6 +2404,9 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2392 | if (!(skb->dev->features & NETIF_F_GRO)) | 2404 | if (!(skb->dev->features & NETIF_F_GRO)) |
2393 | goto normal; | 2405 | goto normal; |
2394 | 2406 | ||
2407 | if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) | ||
2408 | goto normal; | ||
2409 | |||
2395 | rcu_read_lock(); | 2410 | rcu_read_lock(); |
2396 | list_for_each_entry_rcu(ptype, head, list) { | 2411 | list_for_each_entry_rcu(ptype, head, list) { |
2397 | struct sk_buff *p; | 2412 | struct sk_buff *p; |
@@ -2488,12 +2503,6 @@ EXPORT_SYMBOL(napi_gro_receive); | |||
2488 | 2503 | ||
2489 | void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) | 2504 | void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) |
2490 | { | 2505 | { |
2491 | skb_shinfo(skb)->nr_frags = 0; | ||
2492 | |||
2493 | skb->len -= skb->data_len; | ||
2494 | skb->truesize -= skb->data_len; | ||
2495 | skb->data_len = 0; | ||
2496 | |||
2497 | __skb_pull(skb, skb_headlen(skb)); | 2506 | __skb_pull(skb, skb_headlen(skb)); |
2498 | skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); | 2507 | skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); |
2499 | 2508 | ||
@@ -2527,6 +2536,7 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | |||
2527 | 2536 | ||
2528 | if (!pskb_may_pull(skb, ETH_HLEN)) { | 2537 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
2529 | napi_reuse_skb(napi, skb); | 2538 | napi_reuse_skb(napi, skb); |
2539 | skb = NULL; | ||
2530 | goto out; | 2540 | goto out; |
2531 | } | 2541 | } |
2532 | 2542 | ||
@@ -4434,6 +4444,45 @@ err_uninit: | |||
4434 | } | 4444 | } |
4435 | 4445 | ||
4436 | /** | 4446 | /** |
4447 | * init_dummy_netdev - init a dummy network device for NAPI | ||
4448 | * @dev: device to init | ||
4449 | * | ||
4450 | * This takes a network device structure and initialize the minimum | ||
4451 | * amount of fields so it can be used to schedule NAPI polls without | ||
4452 | * registering a full blown interface. This is to be used by drivers | ||
4453 | * that need to tie several hardware interfaces to a single NAPI | ||
4454 | * poll scheduler due to HW limitations. | ||
4455 | */ | ||
4456 | int init_dummy_netdev(struct net_device *dev) | ||
4457 | { | ||
4458 | /* Clear everything. Note we don't initialize spinlocks | ||
4459 | * are they aren't supposed to be taken by any of the | ||
4460 | * NAPI code and this dummy netdev is supposed to be | ||
4461 | * only ever used for NAPI polls | ||
4462 | */ | ||
4463 | memset(dev, 0, sizeof(struct net_device)); | ||
4464 | |||
4465 | /* make sure we BUG if trying to hit standard | ||
4466 | * register/unregister code path | ||
4467 | */ | ||
4468 | dev->reg_state = NETREG_DUMMY; | ||
4469 | |||
4470 | /* initialize the ref count */ | ||
4471 | atomic_set(&dev->refcnt, 1); | ||
4472 | |||
4473 | /* NAPI wants this */ | ||
4474 | INIT_LIST_HEAD(&dev->napi_list); | ||
4475 | |||
4476 | /* a dummy interface is started by default */ | ||
4477 | set_bit(__LINK_STATE_PRESENT, &dev->state); | ||
4478 | set_bit(__LINK_STATE_START, &dev->state); | ||
4479 | |||
4480 | return 0; | ||
4481 | } | ||
4482 | EXPORT_SYMBOL_GPL(init_dummy_netdev); | ||
4483 | |||
4484 | |||
4485 | /** | ||
4437 | * register_netdev - register a network device | 4486 | * register_netdev - register a network device |
4438 | * @dev: device to register | 4487 | * @dev: device to register |
4439 | * | 4488 | * |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 55cffad2f328..55151faaf90c 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -341,8 +341,8 @@ again: | |||
341 | rv = register_pernet_operations(first_device, ops); | 341 | rv = register_pernet_operations(first_device, ops); |
342 | if (rv < 0) | 342 | if (rv < 0) |
343 | ida_remove(&net_generic_ids, *id); | 343 | ida_remove(&net_generic_ids, *id); |
344 | mutex_unlock(&net_mutex); | ||
345 | out: | 344 | out: |
345 | mutex_unlock(&net_mutex); | ||
346 | return rv; | 346 | return rv; |
347 | } | 347 | } |
348 | EXPORT_SYMBOL_GPL(register_pernet_gen_subsys); | 348 | EXPORT_SYMBOL_GPL(register_pernet_gen_subsys); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5110b359c758..2e5f2ca3bdcd 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -73,17 +73,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly; | |||
73 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, | 73 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, |
74 | struct pipe_buffer *buf) | 74 | struct pipe_buffer *buf) |
75 | { | 75 | { |
76 | struct sk_buff *skb = (struct sk_buff *) buf->private; | 76 | put_page(buf->page); |
77 | |||
78 | kfree_skb(skb); | ||
79 | } | 77 | } |
80 | 78 | ||
81 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, | 79 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, |
82 | struct pipe_buffer *buf) | 80 | struct pipe_buffer *buf) |
83 | { | 81 | { |
84 | struct sk_buff *skb = (struct sk_buff *) buf->private; | 82 | get_page(buf->page); |
85 | |||
86 | skb_get(skb); | ||
87 | } | 83 | } |
88 | 84 | ||
89 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, | 85 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, |
@@ -1334,9 +1330,19 @@ fault: | |||
1334 | */ | 1330 | */ |
1335 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | 1331 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
1336 | { | 1332 | { |
1337 | struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private; | 1333 | put_page(spd->pages[i]); |
1334 | } | ||
1338 | 1335 | ||
1339 | kfree_skb(skb); | 1336 | static inline struct page *linear_to_page(struct page *page, unsigned int len, |
1337 | unsigned int offset) | ||
1338 | { | ||
1339 | struct page *p = alloc_pages(GFP_KERNEL, 0); | ||
1340 | |||
1341 | if (!p) | ||
1342 | return NULL; | ||
1343 | memcpy(page_address(p) + offset, page_address(page) + offset, len); | ||
1344 | |||
1345 | return p; | ||
1340 | } | 1346 | } |
1341 | 1347 | ||
1342 | /* | 1348 | /* |
@@ -1344,16 +1350,23 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1344 | */ | 1350 | */ |
1345 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1351 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, |
1346 | unsigned int len, unsigned int offset, | 1352 | unsigned int len, unsigned int offset, |
1347 | struct sk_buff *skb) | 1353 | struct sk_buff *skb, int linear) |
1348 | { | 1354 | { |
1349 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1355 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) |
1350 | return 1; | 1356 | return 1; |
1351 | 1357 | ||
1358 | if (linear) { | ||
1359 | page = linear_to_page(page, len, offset); | ||
1360 | if (!page) | ||
1361 | return 1; | ||
1362 | } else | ||
1363 | get_page(page); | ||
1364 | |||
1352 | spd->pages[spd->nr_pages] = page; | 1365 | spd->pages[spd->nr_pages] = page; |
1353 | spd->partial[spd->nr_pages].len = len; | 1366 | spd->partial[spd->nr_pages].len = len; |
1354 | spd->partial[spd->nr_pages].offset = offset; | 1367 | spd->partial[spd->nr_pages].offset = offset; |
1355 | spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb); | ||
1356 | spd->nr_pages++; | 1368 | spd->nr_pages++; |
1369 | |||
1357 | return 0; | 1370 | return 0; |
1358 | } | 1371 | } |
1359 | 1372 | ||
@@ -1369,7 +1382,7 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, | |||
1369 | static inline int __splice_segment(struct page *page, unsigned int poff, | 1382 | static inline int __splice_segment(struct page *page, unsigned int poff, |
1370 | unsigned int plen, unsigned int *off, | 1383 | unsigned int plen, unsigned int *off, |
1371 | unsigned int *len, struct sk_buff *skb, | 1384 | unsigned int *len, struct sk_buff *skb, |
1372 | struct splice_pipe_desc *spd) | 1385 | struct splice_pipe_desc *spd, int linear) |
1373 | { | 1386 | { |
1374 | if (!*len) | 1387 | if (!*len) |
1375 | return 1; | 1388 | return 1; |
@@ -1392,7 +1405,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1392 | /* the linear region may spread across several pages */ | 1405 | /* the linear region may spread across several pages */ |
1393 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1406 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1394 | 1407 | ||
1395 | if (spd_fill_page(spd, page, flen, poff, skb)) | 1408 | if (spd_fill_page(spd, page, flen, poff, skb, linear)) |
1396 | return 1; | 1409 | return 1; |
1397 | 1410 | ||
1398 | __segment_seek(&page, &poff, &plen, flen); | 1411 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1419,7 +1432,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1419 | if (__splice_segment(virt_to_page(skb->data), | 1432 | if (__splice_segment(virt_to_page(skb->data), |
1420 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1433 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1421 | skb_headlen(skb), | 1434 | skb_headlen(skb), |
1422 | offset, len, skb, spd)) | 1435 | offset, len, skb, spd, 1)) |
1423 | return 1; | 1436 | return 1; |
1424 | 1437 | ||
1425 | /* | 1438 | /* |
@@ -1429,7 +1442,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1429 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1442 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1430 | 1443 | ||
1431 | if (__splice_segment(f->page, f->page_offset, f->size, | 1444 | if (__splice_segment(f->page, f->page_offset, f->size, |
1432 | offset, len, skb, spd)) | 1445 | offset, len, skb, spd, 0)) |
1433 | return 1; | 1446 | return 1; |
1434 | } | 1447 | } |
1435 | 1448 | ||
@@ -1442,7 +1455,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1442 | * the frag list, if such a thing exists. We'd probably need to recurse to | 1455 | * the frag list, if such a thing exists. We'd probably need to recurse to |
1443 | * handle that cleanly. | 1456 | * handle that cleanly. |
1444 | */ | 1457 | */ |
1445 | int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | 1458 | int skb_splice_bits(struct sk_buff *skb, unsigned int offset, |
1446 | struct pipe_inode_info *pipe, unsigned int tlen, | 1459 | struct pipe_inode_info *pipe, unsigned int tlen, |
1447 | unsigned int flags) | 1460 | unsigned int flags) |
1448 | { | 1461 | { |
@@ -1455,16 +1468,6 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | |||
1455 | .ops = &sock_pipe_buf_ops, | 1468 | .ops = &sock_pipe_buf_ops, |
1456 | .spd_release = sock_spd_release, | 1469 | .spd_release = sock_spd_release, |
1457 | }; | 1470 | }; |
1458 | struct sk_buff *skb; | ||
1459 | |||
1460 | /* | ||
1461 | * I'd love to avoid the clone here, but tcp_read_sock() | ||
1462 | * ignores reference counts and unconditonally kills the sk_buff | ||
1463 | * on return from the actor. | ||
1464 | */ | ||
1465 | skb = skb_clone(__skb, GFP_KERNEL); | ||
1466 | if (unlikely(!skb)) | ||
1467 | return -ENOMEM; | ||
1468 | 1471 | ||
1469 | /* | 1472 | /* |
1470 | * __skb_splice_bits() only fails if the output has no room left, | 1473 | * __skb_splice_bits() only fails if the output has no room left, |
@@ -1488,15 +1491,9 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | |||
1488 | } | 1491 | } |
1489 | 1492 | ||
1490 | done: | 1493 | done: |
1491 | /* | ||
1492 | * drop our reference to the clone, the pipe consumption will | ||
1493 | * drop the rest. | ||
1494 | */ | ||
1495 | kfree_skb(skb); | ||
1496 | |||
1497 | if (spd.nr_pages) { | 1494 | if (spd.nr_pages) { |
1495 | struct sock *sk = skb->sk; | ||
1498 | int ret; | 1496 | int ret; |
1499 | struct sock *sk = __skb->sk; | ||
1500 | 1497 | ||
1501 | /* | 1498 | /* |
1502 | * Drop the socket lock, otherwise we have reverse | 1499 | * Drop the socket lock, otherwise we have reverse |
@@ -2588,8 +2585,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2588 | struct sk_buff *nskb; | 2585 | struct sk_buff *nskb; |
2589 | unsigned int headroom; | 2586 | unsigned int headroom; |
2590 | unsigned int hlen = p->data - skb_mac_header(p); | 2587 | unsigned int hlen = p->data - skb_mac_header(p); |
2588 | unsigned int len = skb->len; | ||
2591 | 2589 | ||
2592 | if (hlen + p->len + skb->len >= 65536) | 2590 | if (hlen + p->len + len >= 65536) |
2593 | return -E2BIG; | 2591 | return -E2BIG; |
2594 | 2592 | ||
2595 | if (skb_shinfo(p)->frag_list) | 2593 | if (skb_shinfo(p)->frag_list) |
@@ -2602,6 +2600,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2602 | skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); | 2600 | skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); |
2603 | 2601 | ||
2604 | skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; | 2602 | skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; |
2603 | skb_shinfo(skb)->nr_frags = 0; | ||
2604 | |||
2605 | skb->truesize -= skb->data_len; | ||
2606 | skb->len -= skb->data_len; | ||
2607 | skb->data_len = 0; | ||
2608 | |||
2605 | NAPI_GRO_CB(skb)->free = 1; | 2609 | NAPI_GRO_CB(skb)->free = 1; |
2606 | goto done; | 2610 | goto done; |
2607 | } | 2611 | } |
@@ -2645,9 +2649,9 @@ merge: | |||
2645 | 2649 | ||
2646 | done: | 2650 | done: |
2647 | NAPI_GRO_CB(p)->count++; | 2651 | NAPI_GRO_CB(p)->count++; |
2648 | p->data_len += skb->len; | 2652 | p->data_len += len; |
2649 | p->truesize += skb->len; | 2653 | p->truesize += len; |
2650 | p->len += skb->len; | 2654 | p->len += len; |
2651 | 2655 | ||
2652 | NAPI_GRO_CB(skb)->same_flow = 1; | 2656 | NAPI_GRO_CB(skb)->same_flow = 1; |
2653 | return 0; | 2657 | return 0; |
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index c9224310ebae..52cb6939d093 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c | |||
@@ -93,13 +93,8 @@ ipt_local_out_hook(unsigned int hook, | |||
93 | { | 93 | { |
94 | /* root is playing with raw sockets. */ | 94 | /* root is playing with raw sockets. */ |
95 | if (skb->len < sizeof(struct iphdr) || | 95 | if (skb->len < sizeof(struct iphdr) || |
96 | ip_hdrlen(skb) < sizeof(struct iphdr)) { | 96 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
97 | if (net_ratelimit()) | ||
98 | printk("iptable_filter: ignoring short SOCK_RAW " | ||
99 | "packet.\n"); | ||
100 | return NF_ACCEPT; | 97 | return NF_ACCEPT; |
101 | } | ||
102 | |||
103 | return ipt_do_table(skb, hook, in, out, | 98 | return ipt_do_table(skb, hook, in, out, |
104 | dev_net(out)->ipv4.iptable_filter); | 99 | dev_net(out)->ipv4.iptable_filter); |
105 | } | 100 | } |
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index 69f2c4287146..3929d20b9e45 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c | |||
@@ -132,12 +132,8 @@ ipt_local_hook(unsigned int hook, | |||
132 | 132 | ||
133 | /* root is playing with raw sockets. */ | 133 | /* root is playing with raw sockets. */ |
134 | if (skb->len < sizeof(struct iphdr) | 134 | if (skb->len < sizeof(struct iphdr) |
135 | || ip_hdrlen(skb) < sizeof(struct iphdr)) { | 135 | || ip_hdrlen(skb) < sizeof(struct iphdr)) |
136 | if (net_ratelimit()) | ||
137 | printk("iptable_mangle: ignoring short SOCK_RAW " | ||
138 | "packet.\n"); | ||
139 | return NF_ACCEPT; | 136 | return NF_ACCEPT; |
140 | } | ||
141 | 137 | ||
142 | /* Save things which could affect route */ | 138 | /* Save things which could affect route */ |
143 | mark = skb->mark; | 139 | mark = skb->mark; |
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 8faebfe638f1..7f65d18333e3 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c | |||
@@ -65,12 +65,8 @@ ipt_local_hook(unsigned int hook, | |||
65 | { | 65 | { |
66 | /* root is playing with raw sockets. */ | 66 | /* root is playing with raw sockets. */ |
67 | if (skb->len < sizeof(struct iphdr) || | 67 | if (skb->len < sizeof(struct iphdr) || |
68 | ip_hdrlen(skb) < sizeof(struct iphdr)) { | 68 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
69 | if (net_ratelimit()) | ||
70 | printk("iptable_raw: ignoring short SOCK_RAW " | ||
71 | "packet.\n"); | ||
72 | return NF_ACCEPT; | 69 | return NF_ACCEPT; |
73 | } | ||
74 | return ipt_do_table(skb, hook, in, out, | 70 | return ipt_do_table(skb, hook, in, out, |
75 | dev_net(out)->ipv4.iptable_raw); | 71 | dev_net(out)->ipv4.iptable_raw); |
76 | } | 72 | } |
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c index 36f3be3cc428..a52a35f4a584 100644 --- a/net/ipv4/netfilter/iptable_security.c +++ b/net/ipv4/netfilter/iptable_security.c | |||
@@ -96,12 +96,8 @@ ipt_local_out_hook(unsigned int hook, | |||
96 | { | 96 | { |
97 | /* Somebody is playing with raw sockets. */ | 97 | /* Somebody is playing with raw sockets. */ |
98 | if (skb->len < sizeof(struct iphdr) | 98 | if (skb->len < sizeof(struct iphdr) |
99 | || ip_hdrlen(skb) < sizeof(struct iphdr)) { | 99 | || ip_hdrlen(skb) < sizeof(struct iphdr)) |
100 | if (net_ratelimit()) | ||
101 | printk(KERN_INFO "iptable_security: ignoring short " | ||
102 | "SOCK_RAW packet.\n"); | ||
103 | return NF_ACCEPT; | 100 | return NF_ACCEPT; |
104 | } | ||
105 | return ipt_do_table(skb, hook, in, out, | 101 | return ipt_do_table(skb, hook, in, out, |
106 | dev_net(out)->ipv4.iptable_security); | 102 | dev_net(out)->ipv4.iptable_security); |
107 | } | 103 | } |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index b2141e11575e..4beb04fac588 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -145,11 +145,8 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum, | |||
145 | { | 145 | { |
146 | /* root is playing with raw sockets. */ | 146 | /* root is playing with raw sockets. */ |
147 | if (skb->len < sizeof(struct iphdr) || | 147 | if (skb->len < sizeof(struct iphdr) || |
148 | ip_hdrlen(skb) < sizeof(struct iphdr)) { | 148 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
149 | if (net_ratelimit()) | ||
150 | printk("ipt_hook: happy cracking.\n"); | ||
151 | return NF_ACCEPT; | 149 | return NF_ACCEPT; |
152 | } | ||
153 | return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb); | 150 | return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb); |
154 | } | 151 | } |
155 | 152 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 1fd3ef7718b6..2a8bee26f43d 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <net/netfilter/nf_conntrack_core.h> | 20 | #include <net/netfilter/nf_conntrack_core.h> |
21 | #include <net/netfilter/nf_log.h> | 21 | #include <net/netfilter/nf_log.h> |
22 | 22 | ||
23 | static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ; | 23 | static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; |
24 | 24 | ||
25 | static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, | 25 | static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, |
26 | struct nf_conntrack_tuple *tuple) | 26 | struct nf_conntrack_tuple *tuple) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ce572f9dff02..0cd71b84e483 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -522,8 +522,12 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | |||
522 | unsigned int offset, size_t len) | 522 | unsigned int offset, size_t len) |
523 | { | 523 | { |
524 | struct tcp_splice_state *tss = rd_desc->arg.data; | 524 | struct tcp_splice_state *tss = rd_desc->arg.data; |
525 | int ret; | ||
525 | 526 | ||
526 | return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags); | 527 | ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags); |
528 | if (ret > 0) | ||
529 | rd_desc->count -= ret; | ||
530 | return ret; | ||
527 | } | 531 | } |
528 | 532 | ||
529 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) | 533 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) |
@@ -531,6 +535,7 @@ static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) | |||
531 | /* Store TCP splice context information in read_descriptor_t. */ | 535 | /* Store TCP splice context information in read_descriptor_t. */ |
532 | read_descriptor_t rd_desc = { | 536 | read_descriptor_t rd_desc = { |
533 | .arg.data = tss, | 537 | .arg.data = tss, |
538 | .count = tss->len, | ||
534 | }; | 539 | }; |
535 | 540 | ||
536 | return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); | 541 | return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); |
@@ -611,11 +616,13 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |||
611 | tss.len -= ret; | 616 | tss.len -= ret; |
612 | spliced += ret; | 617 | spliced += ret; |
613 | 618 | ||
619 | if (!timeo) | ||
620 | break; | ||
614 | release_sock(sk); | 621 | release_sock(sk); |
615 | lock_sock(sk); | 622 | lock_sock(sk); |
616 | 623 | ||
617 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || | 624 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || |
618 | (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || | 625 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
619 | signal_pending(current)) | 626 | signal_pending(current)) |
620 | break; | 627 | break; |
621 | } | 628 | } |
@@ -2382,7 +2389,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2382 | unsigned int seq; | 2389 | unsigned int seq; |
2383 | __be32 delta; | 2390 | __be32 delta; |
2384 | unsigned int oldlen; | 2391 | unsigned int oldlen; |
2385 | unsigned int len; | 2392 | unsigned int mss; |
2386 | 2393 | ||
2387 | if (!pskb_may_pull(skb, sizeof(*th))) | 2394 | if (!pskb_may_pull(skb, sizeof(*th))) |
2388 | goto out; | 2395 | goto out; |
@@ -2398,10 +2405,13 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2398 | oldlen = (u16)~skb->len; | 2405 | oldlen = (u16)~skb->len; |
2399 | __skb_pull(skb, thlen); | 2406 | __skb_pull(skb, thlen); |
2400 | 2407 | ||
2408 | mss = skb_shinfo(skb)->gso_size; | ||
2409 | if (unlikely(skb->len <= mss)) | ||
2410 | goto out; | ||
2411 | |||
2401 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | 2412 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
2402 | /* Packet is from an untrusted source, reset gso_segs. */ | 2413 | /* Packet is from an untrusted source, reset gso_segs. */ |
2403 | int type = skb_shinfo(skb)->gso_type; | 2414 | int type = skb_shinfo(skb)->gso_type; |
2404 | int mss; | ||
2405 | 2415 | ||
2406 | if (unlikely(type & | 2416 | if (unlikely(type & |
2407 | ~(SKB_GSO_TCPV4 | | 2417 | ~(SKB_GSO_TCPV4 | |
@@ -2412,7 +2422,6 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2412 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) | 2422 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) |
2413 | goto out; | 2423 | goto out; |
2414 | 2424 | ||
2415 | mss = skb_shinfo(skb)->gso_size; | ||
2416 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); | 2425 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
2417 | 2426 | ||
2418 | segs = NULL; | 2427 | segs = NULL; |
@@ -2423,8 +2432,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2423 | if (IS_ERR(segs)) | 2432 | if (IS_ERR(segs)) |
2424 | goto out; | 2433 | goto out; |
2425 | 2434 | ||
2426 | len = skb_shinfo(skb)->gso_size; | 2435 | delta = htonl(oldlen + (thlen + mss)); |
2427 | delta = htonl(oldlen + (thlen + len)); | ||
2428 | 2436 | ||
2429 | skb = segs; | 2437 | skb = segs; |
2430 | th = tcp_hdr(skb); | 2438 | th = tcp_hdr(skb); |
@@ -2440,7 +2448,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2440 | csum_fold(csum_partial(skb_transport_header(skb), | 2448 | csum_fold(csum_partial(skb_transport_header(skb), |
2441 | thlen, skb->csum)); | 2449 | thlen, skb->csum)); |
2442 | 2450 | ||
2443 | seq += len; | 2451 | seq += mss; |
2444 | skb = skb->next; | 2452 | skb = skb->next; |
2445 | th = tcp_hdr(skb); | 2453 | th = tcp_hdr(skb); |
2446 | 2454 | ||
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 94f74f5b0cbf..c802bc1658a8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -797,6 +797,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
797 | unsigned int nlen; | 797 | unsigned int nlen; |
798 | int flush = 1; | 798 | int flush = 1; |
799 | int proto; | 799 | int proto; |
800 | __wsum csum; | ||
800 | 801 | ||
801 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | 802 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) |
802 | goto out; | 803 | goto out; |
@@ -808,6 +809,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
808 | 809 | ||
809 | rcu_read_lock(); | 810 | rcu_read_lock(); |
810 | proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); | 811 | proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); |
812 | iph = ipv6_hdr(skb); | ||
811 | IPV6_GRO_CB(skb)->proto = proto; | 813 | IPV6_GRO_CB(skb)->proto = proto; |
812 | ops = rcu_dereference(inet6_protos[proto]); | 814 | ops = rcu_dereference(inet6_protos[proto]); |
813 | if (!ops || !ops->gro_receive) | 815 | if (!ops || !ops->gro_receive) |
@@ -839,8 +841,13 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
839 | 841 | ||
840 | NAPI_GRO_CB(skb)->flush |= flush; | 842 | NAPI_GRO_CB(skb)->flush |= flush; |
841 | 843 | ||
844 | csum = skb->csum; | ||
845 | skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); | ||
846 | |||
842 | pp = ops->gro_receive(head, skb); | 847 | pp = ops->gro_receive(head, skb); |
843 | 848 | ||
849 | skb->csum = csum; | ||
850 | |||
844 | out_unlock: | 851 | out_unlock: |
845 | rcu_read_unlock(); | 852 | rcu_read_unlock(); |
846 | 853 | ||
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 29c7c99e69f7..52ee1dced2ff 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -298,6 +298,10 @@ static void fib6_dump_end(struct netlink_callback *cb) | |||
298 | struct fib6_walker_t *w = (void*)cb->args[2]; | 298 | struct fib6_walker_t *w = (void*)cb->args[2]; |
299 | 299 | ||
300 | if (w) { | 300 | if (w) { |
301 | if (cb->args[4]) { | ||
302 | cb->args[4] = 0; | ||
303 | fib6_walker_unlink(w); | ||
304 | } | ||
301 | cb->args[2] = 0; | 305 | cb->args[2] = 0; |
302 | kfree(w); | 306 | kfree(w); |
303 | } | 307 | } |
@@ -330,15 +334,12 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, | |||
330 | read_lock_bh(&table->tb6_lock); | 334 | read_lock_bh(&table->tb6_lock); |
331 | res = fib6_walk_continue(w); | 335 | res = fib6_walk_continue(w); |
332 | read_unlock_bh(&table->tb6_lock); | 336 | read_unlock_bh(&table->tb6_lock); |
333 | if (res != 0) { | 337 | if (res <= 0) { |
334 | if (res < 0) | 338 | fib6_walker_unlink(w); |
335 | fib6_walker_unlink(w); | 339 | cb->args[4] = 0; |
336 | goto end; | ||
337 | } | 340 | } |
338 | fib6_walker_unlink(w); | ||
339 | cb->args[4] = 0; | ||
340 | } | 341 | } |
341 | end: | 342 | |
342 | return res; | 343 | return res; |
343 | } | 344 | } |
344 | 345 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index bd52151d31e9..c455cf4ee756 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> | 26 | #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> |
27 | #include <net/netfilter/nf_log.h> | 27 | #include <net/netfilter/nf_log.h> |
28 | 28 | ||
29 | static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ; | 29 | static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ; |
30 | 30 | ||
31 | static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, | 31 | static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, |
32 | unsigned int dataoff, | 32 | unsigned int dataoff, |
diff --git a/net/key/af_key.c b/net/key/af_key.c index f8bd8df5e257..7dcbde3ea7d9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1285,6 +1285,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
1285 | ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]; | 1285 | ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]; |
1286 | natt->encap_dport = n_port->sadb_x_nat_t_port_port; | 1286 | natt->encap_dport = n_port->sadb_x_nat_t_port_port; |
1287 | } | 1287 | } |
1288 | memset(&natt->encap_oa, 0, sizeof(natt->encap_oa)); | ||
1288 | } | 1289 | } |
1289 | 1290 | ||
1290 | err = xfrm_init_state(x); | 1291 | err = xfrm_init_state(x); |
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 5f510a13b9f0..c5c0c5271096 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
@@ -469,7 +469,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
469 | struct ieee80211_sub_if_data *sdata; | 469 | struct ieee80211_sub_if_data *sdata; |
470 | u16 start_seq_num; | 470 | u16 start_seq_num; |
471 | u8 *state; | 471 | u8 *state; |
472 | int ret; | 472 | int ret = 0; |
473 | 473 | ||
474 | if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) | 474 | if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) |
475 | return -EINVAL; | 475 | return -EINVAL; |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 5abbc3f07dd6..b9074824862a 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -699,7 +699,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
699 | return 0; | 699 | return 0; |
700 | 700 | ||
701 | /* Setting ad-hoc mode on non-IBSS channel is not supported. */ | 701 | /* Setting ad-hoc mode on non-IBSS channel is not supported. */ |
702 | if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS) | 702 | if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS && |
703 | type == NL80211_IFTYPE_ADHOC) | ||
703 | return -EOPNOTSUPP; | 704 | return -EOPNOTSUPP; |
704 | 705 | ||
705 | /* | 706 | /* |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 929ba542fd72..1159bdb4119c 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -107,6 +107,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, | |||
107 | 107 | ||
108 | sta->flags = WLAN_STA_AUTHORIZED; | 108 | sta->flags = WLAN_STA_AUTHORIZED; |
109 | sta->sta.supp_rates[local->hw.conf.channel->band] = rates; | 109 | sta->sta.supp_rates[local->hw.conf.channel->band] = rates; |
110 | rate_control_rate_init(sta); | ||
110 | 111 | ||
111 | return sta; | 112 | return sta; |
112 | } | 113 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 5ba721b6a399..2b890af01ba4 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -620,8 +620,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | |||
620 | if (use_short_slot != bss_conf->use_short_slot) { | 620 | if (use_short_slot != bss_conf->use_short_slot) { |
621 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 621 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
622 | if (net_ratelimit()) { | 622 | if (net_ratelimit()) { |
623 | printk(KERN_DEBUG "%s: switched to %s slot" | 623 | printk(KERN_DEBUG "%s: switched to %s slot time" |
624 | " (BSSID=%s)\n", | 624 | " (BSSID=%pM)\n", |
625 | sdata->dev->name, | 625 | sdata->dev->name, |
626 | use_short_slot ? "short" : "long", | 626 | use_short_slot ? "short" : "long", |
627 | ifsta->bssid); | 627 | ifsta->bssid); |
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 2b3b490a6073..3824990d340b 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -395,13 +395,15 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, | |||
395 | { | 395 | { |
396 | struct minstrel_sta_info *mi = priv_sta; | 396 | struct minstrel_sta_info *mi = priv_sta; |
397 | struct minstrel_priv *mp = priv; | 397 | struct minstrel_priv *mp = priv; |
398 | struct minstrel_rate *mr_ctl; | 398 | struct ieee80211_local *local = hw_to_local(mp->hw); |
399 | struct ieee80211_rate *ctl_rate; | ||
399 | unsigned int i, n = 0; | 400 | unsigned int i, n = 0; |
400 | unsigned int t_slot = 9; /* FIXME: get real slot time */ | 401 | unsigned int t_slot = 9; /* FIXME: get real slot time */ |
401 | 402 | ||
402 | mi->lowest_rix = rate_lowest_index(sband, sta); | 403 | mi->lowest_rix = rate_lowest_index(sband, sta); |
403 | mr_ctl = &mi->r[rix_to_ndx(mi, mi->lowest_rix)]; | 404 | ctl_rate = &sband->bitrates[mi->lowest_rix]; |
404 | mi->sp_ack_dur = mr_ctl->ack_time; | 405 | mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate, |
406 | !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1); | ||
405 | 407 | ||
406 | for (i = 0; i < sband->n_bitrates; i++) { | 408 | for (i = 0; i < sband->n_bitrates; i++) { |
407 | struct minstrel_rate *mr = &mi->r[n]; | 409 | struct minstrel_rate *mr = &mi->r[n]; |
@@ -416,7 +418,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, | |||
416 | 418 | ||
417 | mr->rix = i; | 419 | mr->rix = i; |
418 | mr->bitrate = sband->bitrates[i].bitrate / 5; | 420 | mr->bitrate = sband->bitrates[i].bitrate / 5; |
419 | calc_rate_durations(mi, hw_to_local(mp->hw), mr, | 421 | calc_rate_durations(mi, local, mr, |
420 | &sband->bitrates[i]); | 422 | &sband->bitrates[i]); |
421 | 423 | ||
422 | /* calculate maximum number of retransmissions before | 424 | /* calculate maximum number of retransmissions before |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index dc2606d0ae77..e49a5b99cf10 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -195,7 +195,6 @@ struct sta_ampdu_mlme { | |||
195 | * @tx_packets: number of RX/TX MSDUs | 195 | * @tx_packets: number of RX/TX MSDUs |
196 | * @tx_bytes: number of bytes transmitted to this STA | 196 | * @tx_bytes: number of bytes transmitted to this STA |
197 | * @tx_fragments: number of transmitted MPDUs | 197 | * @tx_fragments: number of transmitted MPDUs |
198 | * @last_txrate: description of the last used transmit rate | ||
199 | * @tid_seq: per-TID sequence numbers for sending to this STA | 198 | * @tid_seq: per-TID sequence numbers for sending to this STA |
200 | * @ampdu_mlme: A-MPDU state machine state | 199 | * @ampdu_mlme: A-MPDU state machine state |
201 | * @timer_to_tid: identity mapping to ID timers | 200 | * @timer_to_tid: identity mapping to ID timers |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a4af3a124cce..4278e545638f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1307,8 +1307,10 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1307 | if (is_multicast_ether_addr(hdr->addr3)) | 1307 | if (is_multicast_ether_addr(hdr->addr3)) |
1308 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); | 1308 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); |
1309 | else | 1309 | else |
1310 | if (mesh_nexthop_lookup(skb, osdata)) | 1310 | if (mesh_nexthop_lookup(skb, osdata)) { |
1311 | return 0; | 1311 | dev_put(odev); |
1312 | return 0; | ||
1313 | } | ||
1312 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) | 1314 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) |
1313 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, | 1315 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, |
1314 | fwded_frames); | 1316 | fwded_frames); |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 7e83f74cd5de..90ce9ddb9451 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -469,7 +469,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, | |||
469 | const struct nf_conntrack_tuple *repl, | 469 | const struct nf_conntrack_tuple *repl, |
470 | gfp_t gfp) | 470 | gfp_t gfp) |
471 | { | 471 | { |
472 | struct nf_conn *ct = NULL; | 472 | struct nf_conn *ct; |
473 | 473 | ||
474 | if (unlikely(!nf_conntrack_hash_rnd_initted)) { | 474 | if (unlikely(!nf_conntrack_hash_rnd_initted)) { |
475 | get_random_bytes(&nf_conntrack_hash_rnd, 4); | 475 | get_random_bytes(&nf_conntrack_hash_rnd, 4); |
@@ -551,7 +551,7 @@ init_conntrack(struct net *net, | |||
551 | } | 551 | } |
552 | 552 | ||
553 | ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); | 553 | ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); |
554 | if (ct == NULL || IS_ERR(ct)) { | 554 | if (IS_ERR(ct)) { |
555 | pr_debug("Can't allocate conntrack.\n"); | 555 | pr_debug("Can't allocate conntrack.\n"); |
556 | return (struct nf_conntrack_tuple_hash *)ct; | 556 | return (struct nf_conntrack_tuple_hash *)ct; |
557 | } | 557 | } |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 00e8c27130ff..c32a7e8e3a1b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -831,13 +831,16 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct, | |||
831 | if (!parse_nat_setup) { | 831 | if (!parse_nat_setup) { |
832 | #ifdef CONFIG_MODULES | 832 | #ifdef CONFIG_MODULES |
833 | rcu_read_unlock(); | 833 | rcu_read_unlock(); |
834 | spin_unlock_bh(&nf_conntrack_lock); | ||
834 | nfnl_unlock(); | 835 | nfnl_unlock(); |
835 | if (request_module("nf-nat-ipv4") < 0) { | 836 | if (request_module("nf-nat-ipv4") < 0) { |
836 | nfnl_lock(); | 837 | nfnl_lock(); |
838 | spin_lock_bh(&nf_conntrack_lock); | ||
837 | rcu_read_lock(); | 839 | rcu_read_lock(); |
838 | return -EOPNOTSUPP; | 840 | return -EOPNOTSUPP; |
839 | } | 841 | } |
840 | nfnl_lock(); | 842 | nfnl_lock(); |
843 | spin_lock_bh(&nf_conntrack_lock); | ||
841 | rcu_read_lock(); | 844 | rcu_read_lock(); |
842 | if (nfnetlink_parse_nat_setup_hook) | 845 | if (nfnetlink_parse_nat_setup_hook) |
843 | return -EAGAIN; | 846 | return -EAGAIN; |
@@ -1134,7 +1137,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[], | |||
1134 | struct nf_conntrack_helper *helper; | 1137 | struct nf_conntrack_helper *helper; |
1135 | 1138 | ||
1136 | ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); | 1139 | ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); |
1137 | if (ct == NULL || IS_ERR(ct)) | 1140 | if (IS_ERR(ct)) |
1138 | return -ENOMEM; | 1141 | return -ENOMEM; |
1139 | 1142 | ||
1140 | if (!cda[CTA_TIMEOUT]) | 1143 | if (!cda[CTA_TIMEOUT]) |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 89837a4eef76..bfbf521f6ea5 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -273,6 +273,10 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) | |||
273 | have_rev = 1; | 273 | have_rev = 1; |
274 | } | 274 | } |
275 | } | 275 | } |
276 | |||
277 | if (af != NFPROTO_UNSPEC && !have_rev) | ||
278 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); | ||
279 | |||
276 | return have_rev; | 280 | return have_rev; |
277 | } | 281 | } |
278 | 282 | ||
@@ -289,6 +293,10 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) | |||
289 | have_rev = 1; | 293 | have_rev = 1; |
290 | } | 294 | } |
291 | } | 295 | } |
296 | |||
297 | if (af != NFPROTO_UNSPEC && !have_rev) | ||
298 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); | ||
299 | |||
292 | return have_rev; | 300 | return have_rev; |
293 | } | 301 | } |
294 | 302 | ||
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 29375ba8db73..93acaa59d108 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c | |||
@@ -243,6 +243,17 @@ static struct xt_match xt_time_mt_reg __read_mostly = { | |||
243 | 243 | ||
244 | static int __init time_mt_init(void) | 244 | static int __init time_mt_init(void) |
245 | { | 245 | { |
246 | int minutes = sys_tz.tz_minuteswest; | ||
247 | |||
248 | if (minutes < 0) /* east of Greenwich */ | ||
249 | printk(KERN_INFO KBUILD_MODNAME | ||
250 | ": kernel timezone is +%02d%02d\n", | ||
251 | -minutes / 60, -minutes % 60); | ||
252 | else /* west of Greenwich */ | ||
253 | printk(KERN_INFO KBUILD_MODNAME | ||
254 | ": kernel timezone is -%02d%02d\n", | ||
255 | minutes / 60, minutes % 60); | ||
256 | |||
246 | return xt_register_match(&xt_time_mt_reg); | 257 | return xt_register_match(&xt_time_mt_reg); |
247 | } | 258 | } |
248 | 259 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 5070643ce534..2f0f0b04d3fb 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -661,12 +661,13 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
661 | * next pending event (0 for no event in pq). | 661 | * next pending event (0 for no event in pq). |
662 | * Note: Applied are events whose have cl->pq_key <= q->now. | 662 | * Note: Applied are events whose have cl->pq_key <= q->now. |
663 | */ | 663 | */ |
664 | static psched_time_t htb_do_events(struct htb_sched *q, int level) | 664 | static psched_time_t htb_do_events(struct htb_sched *q, int level, |
665 | unsigned long start) | ||
665 | { | 666 | { |
666 | /* don't run for longer than 2 jiffies; 2 is used instead of | 667 | /* don't run for longer than 2 jiffies; 2 is used instead of |
667 | 1 to simplify things when jiffy is going to be incremented | 668 | 1 to simplify things when jiffy is going to be incremented |
668 | too soon */ | 669 | too soon */ |
669 | unsigned long stop_at = jiffies + 2; | 670 | unsigned long stop_at = start + 2; |
670 | while (time_before(jiffies, stop_at)) { | 671 | while (time_before(jiffies, stop_at)) { |
671 | struct htb_class *cl; | 672 | struct htb_class *cl; |
672 | long diff; | 673 | long diff; |
@@ -685,8 +686,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level) | |||
685 | if (cl->cmode != HTB_CAN_SEND) | 686 | if (cl->cmode != HTB_CAN_SEND) |
686 | htb_add_to_wait_tree(q, cl, diff); | 687 | htb_add_to_wait_tree(q, cl, diff); |
687 | } | 688 | } |
688 | /* too much load - let's continue on next jiffie */ | 689 | /* too much load - let's continue on next jiffie (including above) */ |
689 | return q->now + PSCHED_TICKS_PER_SEC / HZ; | 690 | return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ; |
690 | } | 691 | } |
691 | 692 | ||
692 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL | 693 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL |
@@ -845,6 +846,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
845 | struct htb_sched *q = qdisc_priv(sch); | 846 | struct htb_sched *q = qdisc_priv(sch); |
846 | int level; | 847 | int level; |
847 | psched_time_t next_event; | 848 | psched_time_t next_event; |
849 | unsigned long start_at; | ||
848 | 850 | ||
849 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ | 851 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ |
850 | skb = __skb_dequeue(&q->direct_queue); | 852 | skb = __skb_dequeue(&q->direct_queue); |
@@ -857,6 +859,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
857 | if (!sch->q.qlen) | 859 | if (!sch->q.qlen) |
858 | goto fin; | 860 | goto fin; |
859 | q->now = psched_get_time(); | 861 | q->now = psched_get_time(); |
862 | start_at = jiffies; | ||
860 | 863 | ||
861 | next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; | 864 | next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; |
862 | 865 | ||
@@ -866,14 +869,14 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
866 | psched_time_t event; | 869 | psched_time_t event; |
867 | 870 | ||
868 | if (q->now >= q->near_ev_cache[level]) { | 871 | if (q->now >= q->near_ev_cache[level]) { |
869 | event = htb_do_events(q, level); | 872 | event = htb_do_events(q, level, start_at); |
870 | if (!event) | 873 | if (!event) |
871 | event = q->now + PSCHED_TICKS_PER_SEC; | 874 | event = q->now + PSCHED_TICKS_PER_SEC; |
872 | q->near_ev_cache[level] = event; | 875 | q->near_ev_cache[level] = event; |
873 | } else | 876 | } else |
874 | event = q->near_ev_cache[level]; | 877 | event = q->near_ev_cache[level]; |
875 | 878 | ||
876 | if (event && next_event > event) | 879 | if (next_event > event) |
877 | next_event = event; | 880 | next_event = event; |
878 | 881 | ||
879 | m = ~q->row_mask[level]; | 882 | m = ~q->row_mask[level]; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index bf612d954d41..2e4a8646dbc3 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -249,6 +249,19 @@ int sctp_rcv(struct sk_buff *skb) | |||
249 | */ | 249 | */ |
250 | sctp_bh_lock_sock(sk); | 250 | sctp_bh_lock_sock(sk); |
251 | 251 | ||
252 | if (sk != rcvr->sk) { | ||
253 | /* Our cached sk is different from the rcvr->sk. This is | ||
254 | * because migrate()/accept() may have moved the association | ||
255 | * to a new socket and released all the sockets. So now we | ||
256 | * are holding a lock on the old socket while the user may | ||
257 | * be doing something with the new socket. Switch our veiw | ||
258 | * of the current sk. | ||
259 | */ | ||
260 | sctp_bh_unlock_sock(sk); | ||
261 | sk = rcvr->sk; | ||
262 | sctp_bh_lock_sock(sk); | ||
263 | } | ||
264 | |||
252 | if (sock_owned_by_user(sk)) { | 265 | if (sock_owned_by_user(sk)) { |
253 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); | 266 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); |
254 | sctp_add_backlog(sk, skb); | 267 | sctp_add_backlog(sk, skb); |
diff --git a/net/sctp/output.c b/net/sctp/output.c index c3f417f7ec6e..73639355157e 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -324,14 +324,16 @@ append: | |||
324 | switch (chunk->chunk_hdr->type) { | 324 | switch (chunk->chunk_hdr->type) { |
325 | case SCTP_CID_DATA: | 325 | case SCTP_CID_DATA: |
326 | retval = sctp_packet_append_data(packet, chunk); | 326 | retval = sctp_packet_append_data(packet, chunk); |
327 | if (SCTP_XMIT_OK != retval) | ||
328 | goto finish; | ||
327 | /* Disallow SACK bundling after DATA. */ | 329 | /* Disallow SACK bundling after DATA. */ |
328 | packet->has_sack = 1; | 330 | packet->has_sack = 1; |
329 | /* Disallow AUTH bundling after DATA */ | 331 | /* Disallow AUTH bundling after DATA */ |
330 | packet->has_auth = 1; | 332 | packet->has_auth = 1; |
331 | /* Let it be knows that packet has DATA in it */ | 333 | /* Let it be knows that packet has DATA in it */ |
332 | packet->has_data = 1; | 334 | packet->has_data = 1; |
333 | if (SCTP_XMIT_OK != retval) | 335 | /* timestamp the chunk for rtx purposes */ |
334 | goto finish; | 336 | chunk->sent_at = jiffies; |
335 | break; | 337 | break; |
336 | case SCTP_CID_COOKIE_ECHO: | 338 | case SCTP_CID_COOKIE_ECHO: |
337 | packet->has_cookie_echo = 1; | 339 | packet->has_cookie_echo = 1; |
@@ -470,7 +472,6 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
470 | } else | 472 | } else |
471 | chunk->resent = 1; | 473 | chunk->resent = 1; |
472 | 474 | ||
473 | chunk->sent_at = jiffies; | ||
474 | has_data = 1; | 475 | has_data = 1; |
475 | } | 476 | } |
476 | 477 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 247ebc95c1e5..bc411c896216 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -929,7 +929,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
929 | } | 929 | } |
930 | 930 | ||
931 | /* Finally, transmit new packets. */ | 931 | /* Finally, transmit new packets. */ |
932 | start_timer = 0; | ||
933 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { | 932 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { |
934 | /* RFC 2960 6.5 Every DATA chunk MUST carry a valid | 933 | /* RFC 2960 6.5 Every DATA chunk MUST carry a valid |
935 | * stream identifier. | 934 | * stream identifier. |
@@ -1028,7 +1027,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
1028 | list_add_tail(&chunk->transmitted_list, | 1027 | list_add_tail(&chunk->transmitted_list, |
1029 | &transport->transmitted); | 1028 | &transport->transmitted); |
1030 | 1029 | ||
1031 | sctp_transport_reset_timers(transport, start_timer-1); | 1030 | sctp_transport_reset_timers(transport, 0); |
1032 | 1031 | ||
1033 | q->empty = 0; | 1032 | q->empty = 0; |
1034 | 1033 | ||
diff --git a/net/socket.c b/net/socket.c index 06603d73c411..35dd7371752a 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1214,7 +1214,7 @@ int sock_create_kern(int family, int type, int protocol, struct socket **res) | |||
1214 | return __sock_create(&init_net, family, type, protocol, res, 1); | 1214 | return __sock_create(&init_net, family, type, protocol, res, 1); |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | asmlinkage long sys_socket(int family, int type, int protocol) | 1217 | SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) |
1218 | { | 1218 | { |
1219 | int retval; | 1219 | int retval; |
1220 | struct socket *sock; | 1220 | struct socket *sock; |
@@ -1255,8 +1255,8 @@ out_release: | |||
1255 | * Create a pair of connected sockets. | 1255 | * Create a pair of connected sockets. |
1256 | */ | 1256 | */ |
1257 | 1257 | ||
1258 | asmlinkage long sys_socketpair(int family, int type, int protocol, | 1258 | SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, |
1259 | int __user *usockvec) | 1259 | int __user *, usockvec) |
1260 | { | 1260 | { |
1261 | struct socket *sock1, *sock2; | 1261 | struct socket *sock1, *sock2; |
1262 | int fd1, fd2, err; | 1262 | int fd1, fd2, err; |
@@ -1356,7 +1356,7 @@ out_fd1: | |||
1356 | * the protocol layer (having also checked the address is ok). | 1356 | * the protocol layer (having also checked the address is ok). |
1357 | */ | 1357 | */ |
1358 | 1358 | ||
1359 | asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen) | 1359 | SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) |
1360 | { | 1360 | { |
1361 | struct socket *sock; | 1361 | struct socket *sock; |
1362 | struct sockaddr_storage address; | 1362 | struct sockaddr_storage address; |
@@ -1385,7 +1385,7 @@ asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen) | |||
1385 | * ready for listening. | 1385 | * ready for listening. |
1386 | */ | 1386 | */ |
1387 | 1387 | ||
1388 | asmlinkage long sys_listen(int fd, int backlog) | 1388 | SYSCALL_DEFINE2(listen, int, fd, int, backlog) |
1389 | { | 1389 | { |
1390 | struct socket *sock; | 1390 | struct socket *sock; |
1391 | int err, fput_needed; | 1391 | int err, fput_needed; |
@@ -1418,8 +1418,8 @@ asmlinkage long sys_listen(int fd, int backlog) | |||
1418 | * clean when we restucture accept also. | 1418 | * clean when we restucture accept also. |
1419 | */ | 1419 | */ |
1420 | 1420 | ||
1421 | asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, | 1421 | SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, |
1422 | int __user *upeer_addrlen, int flags) | 1422 | int __user *, upeer_addrlen, int, flags) |
1423 | { | 1423 | { |
1424 | struct socket *sock, *newsock; | 1424 | struct socket *sock, *newsock; |
1425 | struct file *newfile; | 1425 | struct file *newfile; |
@@ -1502,8 +1502,8 @@ out_fd: | |||
1502 | goto out_put; | 1502 | goto out_put; |
1503 | } | 1503 | } |
1504 | 1504 | ||
1505 | asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, | 1505 | SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, |
1506 | int __user *upeer_addrlen) | 1506 | int __user *, upeer_addrlen) |
1507 | { | 1507 | { |
1508 | return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); | 1508 | return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); |
1509 | } | 1509 | } |
@@ -1520,8 +1520,8 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, | |||
1520 | * include the -EINPROGRESS status for such sockets. | 1520 | * include the -EINPROGRESS status for such sockets. |
1521 | */ | 1521 | */ |
1522 | 1522 | ||
1523 | asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr, | 1523 | SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, |
1524 | int addrlen) | 1524 | int, addrlen) |
1525 | { | 1525 | { |
1526 | struct socket *sock; | 1526 | struct socket *sock; |
1527 | struct sockaddr_storage address; | 1527 | struct sockaddr_storage address; |
@@ -1552,8 +1552,8 @@ out: | |||
1552 | * name to user space. | 1552 | * name to user space. |
1553 | */ | 1553 | */ |
1554 | 1554 | ||
1555 | asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, | 1555 | SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, |
1556 | int __user *usockaddr_len) | 1556 | int __user *, usockaddr_len) |
1557 | { | 1557 | { |
1558 | struct socket *sock; | 1558 | struct socket *sock; |
1559 | struct sockaddr_storage address; | 1559 | struct sockaddr_storage address; |
@@ -1583,8 +1583,8 @@ out: | |||
1583 | * name to user space. | 1583 | * name to user space. |
1584 | */ | 1584 | */ |
1585 | 1585 | ||
1586 | asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, | 1586 | SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, |
1587 | int __user *usockaddr_len) | 1587 | int __user *, usockaddr_len) |
1588 | { | 1588 | { |
1589 | struct socket *sock; | 1589 | struct socket *sock; |
1590 | struct sockaddr_storage address; | 1590 | struct sockaddr_storage address; |
@@ -1615,9 +1615,9 @@ asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, | |||
1615 | * the protocol. | 1615 | * the protocol. |
1616 | */ | 1616 | */ |
1617 | 1617 | ||
1618 | asmlinkage long sys_sendto(int fd, void __user *buff, size_t len, | 1618 | SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, |
1619 | unsigned flags, struct sockaddr __user *addr, | 1619 | unsigned, flags, struct sockaddr __user *, addr, |
1620 | int addr_len) | 1620 | int, addr_len) |
1621 | { | 1621 | { |
1622 | struct socket *sock; | 1622 | struct socket *sock; |
1623 | struct sockaddr_storage address; | 1623 | struct sockaddr_storage address; |
@@ -1660,7 +1660,8 @@ out: | |||
1660 | * Send a datagram down a socket. | 1660 | * Send a datagram down a socket. |
1661 | */ | 1661 | */ |
1662 | 1662 | ||
1663 | asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags) | 1663 | SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, |
1664 | unsigned, flags) | ||
1664 | { | 1665 | { |
1665 | return sys_sendto(fd, buff, len, flags, NULL, 0); | 1666 | return sys_sendto(fd, buff, len, flags, NULL, 0); |
1666 | } | 1667 | } |
@@ -1671,9 +1672,9 @@ asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags) | |||
1671 | * sender address from kernel to user space. | 1672 | * sender address from kernel to user space. |
1672 | */ | 1673 | */ |
1673 | 1674 | ||
1674 | asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size, | 1675 | SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, |
1675 | unsigned flags, struct sockaddr __user *addr, | 1676 | unsigned, flags, struct sockaddr __user *, addr, |
1676 | int __user *addr_len) | 1677 | int __user *, addr_len) |
1677 | { | 1678 | { |
1678 | struct socket *sock; | 1679 | struct socket *sock; |
1679 | struct iovec iov; | 1680 | struct iovec iov; |
@@ -1725,8 +1726,8 @@ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, | |||
1725 | * to pass the user mode parameter for the protocols to sort out. | 1726 | * to pass the user mode parameter for the protocols to sort out. |
1726 | */ | 1727 | */ |
1727 | 1728 | ||
1728 | asmlinkage long sys_setsockopt(int fd, int level, int optname, | 1729 | SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, |
1729 | char __user *optval, int optlen) | 1730 | char __user *, optval, int, optlen) |
1730 | { | 1731 | { |
1731 | int err, fput_needed; | 1732 | int err, fput_needed; |
1732 | struct socket *sock; | 1733 | struct socket *sock; |
@@ -1759,8 +1760,8 @@ out_put: | |||
1759 | * to pass a user mode parameter for the protocols to sort out. | 1760 | * to pass a user mode parameter for the protocols to sort out. |
1760 | */ | 1761 | */ |
1761 | 1762 | ||
1762 | asmlinkage long sys_getsockopt(int fd, int level, int optname, | 1763 | SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, |
1763 | char __user *optval, int __user *optlen) | 1764 | char __user *, optval, int __user *, optlen) |
1764 | { | 1765 | { |
1765 | int err, fput_needed; | 1766 | int err, fput_needed; |
1766 | struct socket *sock; | 1767 | struct socket *sock; |
@@ -1789,7 +1790,7 @@ out_put: | |||
1789 | * Shutdown a socket. | 1790 | * Shutdown a socket. |
1790 | */ | 1791 | */ |
1791 | 1792 | ||
1792 | asmlinkage long sys_shutdown(int fd, int how) | 1793 | SYSCALL_DEFINE2(shutdown, int, fd, int, how) |
1793 | { | 1794 | { |
1794 | int err, fput_needed; | 1795 | int err, fput_needed; |
1795 | struct socket *sock; | 1796 | struct socket *sock; |
@@ -1815,7 +1816,7 @@ asmlinkage long sys_shutdown(int fd, int how) | |||
1815 | * BSD sendmsg interface | 1816 | * BSD sendmsg interface |
1816 | */ | 1817 | */ |
1817 | 1818 | ||
1818 | asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) | 1819 | SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) |
1819 | { | 1820 | { |
1820 | struct compat_msghdr __user *msg_compat = | 1821 | struct compat_msghdr __user *msg_compat = |
1821 | (struct compat_msghdr __user *)msg; | 1822 | (struct compat_msghdr __user *)msg; |
@@ -1921,8 +1922,8 @@ out: | |||
1921 | * BSD recvmsg interface | 1922 | * BSD recvmsg interface |
1922 | */ | 1923 | */ |
1923 | 1924 | ||
1924 | asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, | 1925 | SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, |
1925 | unsigned int flags) | 1926 | unsigned int, flags) |
1926 | { | 1927 | { |
1927 | struct compat_msghdr __user *msg_compat = | 1928 | struct compat_msghdr __user *msg_compat = |
1928 | (struct compat_msghdr __user *)msg; | 1929 | (struct compat_msghdr __user *)msg; |
@@ -2045,7 +2046,7 @@ static const unsigned char nargs[19]={ | |||
2045 | * it is set by the callees. | 2046 | * it is set by the callees. |
2046 | */ | 2047 | */ |
2047 | 2048 | ||
2048 | asmlinkage long sys_socketcall(int call, unsigned long __user *args) | 2049 | SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) |
2049 | { | 2050 | { |
2050 | unsigned long a[6]; | 2051 | unsigned long a[6]; |
2051 | unsigned long a0, a1; | 2052 | unsigned long a0, a1; |
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig new file mode 100644 index 000000000000..dcef600d0bf5 --- /dev/null +++ b/net/sunrpc/Kconfig | |||
@@ -0,0 +1,78 @@ | |||
1 | config SUNRPC | ||
2 | tristate | ||
3 | |||
4 | config SUNRPC_GSS | ||
5 | tristate | ||
6 | |||
7 | config SUNRPC_XPRT_RDMA | ||
8 | tristate | ||
9 | depends on SUNRPC && INFINIBAND && EXPERIMENTAL | ||
10 | default SUNRPC && INFINIBAND | ||
11 | help | ||
12 | This option allows the NFS client and server to support | ||
13 | an RDMA-enabled transport. | ||
14 | |||
15 | To compile RPC client RDMA transport support as a module, | ||
16 | choose M here: the module will be called xprtrdma. | ||
17 | |||
18 | If unsure, say N. | ||
19 | |||
20 | config SUNRPC_REGISTER_V4 | ||
21 | bool "Register local RPC services via rpcbind v4 (EXPERIMENTAL)" | ||
22 | depends on SUNRPC && EXPERIMENTAL | ||
23 | default n | ||
24 | help | ||
25 | Sun added support for registering RPC services at an IPv6 | ||
26 | address by creating two new versions of the rpcbind protocol | ||
27 | (RFC 1833). | ||
28 | |||
29 | This option enables support in the kernel RPC server for | ||
30 | registering kernel RPC services via version 4 of the rpcbind | ||
31 | protocol. If you enable this option, you must run a portmapper | ||
32 | daemon that supports rpcbind protocol version 4. | ||
33 | |||
34 | Serving NFS over IPv6 from knfsd (the kernel's NFS server) | ||
35 | requires that you enable this option and use a portmapper that | ||
36 | supports rpcbind version 4. | ||
37 | |||
38 | If unsure, say N to get traditional behavior (register kernel | ||
39 | RPC services using only rpcbind version 2). Distributions | ||
40 | using the legacy Linux portmapper daemon must say N here. | ||
41 | |||
42 | config RPCSEC_GSS_KRB5 | ||
43 | tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" | ||
44 | depends on SUNRPC && EXPERIMENTAL | ||
45 | select SUNRPC_GSS | ||
46 | select CRYPTO | ||
47 | select CRYPTO_MD5 | ||
48 | select CRYPTO_DES | ||
49 | select CRYPTO_CBC | ||
50 | help | ||
51 | Choose Y here to enable Secure RPC using the Kerberos version 5 | ||
52 | GSS-API mechanism (RFC 1964). | ||
53 | |||
54 | Secure RPC calls with Kerberos require an auxiliary user-space | ||
55 | daemon which may be found in the Linux nfs-utils package | ||
56 | available from http://linux-nfs.org/. In addition, user-space | ||
57 | Kerberos support should be installed. | ||
58 | |||
59 | If unsure, say N. | ||
60 | |||
61 | config RPCSEC_GSS_SPKM3 | ||
62 | tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" | ||
63 | depends on SUNRPC && EXPERIMENTAL | ||
64 | select SUNRPC_GSS | ||
65 | select CRYPTO | ||
66 | select CRYPTO_MD5 | ||
67 | select CRYPTO_DES | ||
68 | select CRYPTO_CAST5 | ||
69 | select CRYPTO_CBC | ||
70 | help | ||
71 | Choose Y here to enable Secure RPC using the SPKM3 public key | ||
72 | GSS-API mechansim (RFC 2025). | ||
73 | |||
74 | Secure RPC calls with SPKM3 require an auxiliary userspace | ||
75 | daemon which may be found in the Linux nfs-utils package | ||
76 | available from http://linux-nfs.org/. | ||
77 | |||
78 | If unsure, say N. | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 4f877535e666..bc494cef2102 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -421,6 +421,31 @@ static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range, | |||
421 | return 0; | 421 | return 0; |
422 | } | 422 | } |
423 | 423 | ||
424 | /** | ||
425 | * freq_in_rule_band - tells us if a frequency is in a frequency band | ||
426 | * @freq_range: frequency rule we want to query | ||
427 | * @freq_khz: frequency we are inquiring about | ||
428 | * | ||
429 | * This lets us know if a specific frequency rule is or is not relevant to | ||
430 | * a specific frequency's band. Bands are device specific and artificial | ||
431 | * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is | ||
432 | * safe for now to assume that a frequency rule should not be part of a | ||
433 | * frequency's band if the start freq or end freq are off by more than 2 GHz. | ||
434 | * This resolution can be lowered and should be considered as we add | ||
435 | * regulatory rule support for other "bands". | ||
436 | **/ | ||
437 | static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, | ||
438 | u32 freq_khz) | ||
439 | { | ||
440 | #define ONE_GHZ_IN_KHZ 1000000 | ||
441 | if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) | ||
442 | return true; | ||
443 | if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) | ||
444 | return true; | ||
445 | return false; | ||
446 | #undef ONE_GHZ_IN_KHZ | ||
447 | } | ||
448 | |||
424 | /* Converts a country IE to a regulatory domain. A regulatory domain | 449 | /* Converts a country IE to a regulatory domain. A regulatory domain |
425 | * structure has a lot of information which the IE doesn't yet have, | 450 | * structure has a lot of information which the IE doesn't yet have, |
426 | * so for the other values we use upper max values as we will intersect | 451 | * so for the other values we use upper max values as we will intersect |
@@ -538,6 +563,7 @@ static struct ieee80211_regdomain *country_ie_2_rd( | |||
538 | 563 | ||
539 | /* This time around we fill in the rd */ | 564 | /* This time around we fill in the rd */ |
540 | while (country_ie_len >= 3) { | 565 | while (country_ie_len >= 3) { |
566 | int end_channel = 0; | ||
541 | struct ieee80211_country_ie_triplet *triplet = | 567 | struct ieee80211_country_ie_triplet *triplet = |
542 | (struct ieee80211_country_ie_triplet *) country_ie; | 568 | (struct ieee80211_country_ie_triplet *) country_ie; |
543 | struct ieee80211_reg_rule *reg_rule = NULL; | 569 | struct ieee80211_reg_rule *reg_rule = NULL; |
@@ -559,6 +585,23 @@ static struct ieee80211_regdomain *country_ie_2_rd( | |||
559 | 585 | ||
560 | reg_rule->flags = flags; | 586 | reg_rule->flags = flags; |
561 | 587 | ||
588 | /* 2 GHz */ | ||
589 | if (triplet->chans.first_channel <= 14) | ||
590 | end_channel = triplet->chans.first_channel + | ||
591 | triplet->chans.num_channels; | ||
592 | else | ||
593 | /* | ||
594 | * 5 GHz -- For example in country IEs if the first | ||
595 | * channel given is 36 and the number of channels is 4 | ||
596 | * then the individual channel numbers defined for the | ||
597 | * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 | ||
598 | * and not 36, 37, 38, 39. | ||
599 | * | ||
600 | * See: http://tinyurl.com/11d-clarification | ||
601 | */ | ||
602 | end_channel = triplet->chans.first_channel + | ||
603 | (4 * (triplet->chans.num_channels - 1)); | ||
604 | |||
562 | /* The +10 is since the regulatory domain expects | 605 | /* The +10 is since the regulatory domain expects |
563 | * the actual band edge, not the center of freq for | 606 | * the actual band edge, not the center of freq for |
564 | * its start and end freqs, assuming 20 MHz bandwidth on | 607 | * its start and end freqs, assuming 20 MHz bandwidth on |
@@ -568,8 +611,7 @@ static struct ieee80211_regdomain *country_ie_2_rd( | |||
568 | triplet->chans.first_channel) - 10); | 611 | triplet->chans.first_channel) - 10); |
569 | freq_range->end_freq_khz = | 612 | freq_range->end_freq_khz = |
570 | MHZ_TO_KHZ(ieee80211_channel_to_frequency( | 613 | MHZ_TO_KHZ(ieee80211_channel_to_frequency( |
571 | triplet->chans.first_channel + | 614 | end_channel) + 10); |
572 | triplet->chans.num_channels) + 10); | ||
573 | 615 | ||
574 | /* Large arbitrary values, we intersect later */ | 616 | /* Large arbitrary values, we intersect later */ |
575 | /* Increment this if we ever support >= 40 MHz channels | 617 | /* Increment this if we ever support >= 40 MHz channels |
@@ -748,12 +790,23 @@ static u32 map_regdom_flags(u32 rd_flags) | |||
748 | * this value to the maximum allowed bandwidth. | 790 | * this value to the maximum allowed bandwidth. |
749 | * @reg_rule: the regulatory rule which we have for this frequency | 791 | * @reg_rule: the regulatory rule which we have for this frequency |
750 | * | 792 | * |
751 | * Use this function to get the regulatory rule for a specific frequency. | 793 | * Use this function to get the regulatory rule for a specific frequency on |
794 | * a given wireless device. If the device has a specific regulatory domain | ||
795 | * it wants to follow we respect that unless a country IE has been received | ||
796 | * and processed already. | ||
797 | * | ||
798 | * Returns 0 if it was able to find a valid regulatory rule which does | ||
799 | * apply to the given center_freq otherwise it returns non-zero. It will | ||
800 | * also return -ERANGE if we determine the given center_freq does not even have | ||
801 | * a regulatory rule for a frequency range in the center_freq's band. See | ||
802 | * freq_in_rule_band() for our current definition of a band -- this is purely | ||
803 | * subjective and right now its 802.11 specific. | ||
752 | */ | 804 | */ |
753 | static int freq_reg_info(u32 center_freq, u32 *bandwidth, | 805 | static int freq_reg_info(u32 center_freq, u32 *bandwidth, |
754 | const struct ieee80211_reg_rule **reg_rule) | 806 | const struct ieee80211_reg_rule **reg_rule) |
755 | { | 807 | { |
756 | int i; | 808 | int i; |
809 | bool band_rule_found = false; | ||
757 | u32 max_bandwidth = 0; | 810 | u32 max_bandwidth = 0; |
758 | 811 | ||
759 | if (!cfg80211_regdomain) | 812 | if (!cfg80211_regdomain) |
@@ -767,7 +820,15 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth, | |||
767 | rr = &cfg80211_regdomain->reg_rules[i]; | 820 | rr = &cfg80211_regdomain->reg_rules[i]; |
768 | fr = &rr->freq_range; | 821 | fr = &rr->freq_range; |
769 | pr = &rr->power_rule; | 822 | pr = &rr->power_rule; |
823 | |||
824 | /* We only need to know if one frequency rule was | ||
825 | * was in center_freq's band, that's enough, so lets | ||
826 | * not overwrite it once found */ | ||
827 | if (!band_rule_found) | ||
828 | band_rule_found = freq_in_rule_band(fr, center_freq); | ||
829 | |||
770 | max_bandwidth = freq_max_bandwidth(fr, center_freq); | 830 | max_bandwidth = freq_max_bandwidth(fr, center_freq); |
831 | |||
771 | if (max_bandwidth && *bandwidth <= max_bandwidth) { | 832 | if (max_bandwidth && *bandwidth <= max_bandwidth) { |
772 | *reg_rule = rr; | 833 | *reg_rule = rr; |
773 | *bandwidth = max_bandwidth; | 834 | *bandwidth = max_bandwidth; |
@@ -775,23 +836,64 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth, | |||
775 | } | 836 | } |
776 | } | 837 | } |
777 | 838 | ||
839 | if (!band_rule_found) | ||
840 | return -ERANGE; | ||
841 | |||
778 | return !max_bandwidth; | 842 | return !max_bandwidth; |
779 | } | 843 | } |
780 | 844 | ||
781 | static void handle_channel(struct ieee80211_channel *chan) | 845 | static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, |
846 | unsigned int chan_idx) | ||
782 | { | 847 | { |
783 | int r; | 848 | int r; |
784 | u32 flags = chan->orig_flags; | 849 | u32 flags; |
785 | u32 max_bandwidth = 0; | 850 | u32 max_bandwidth = 0; |
786 | const struct ieee80211_reg_rule *reg_rule = NULL; | 851 | const struct ieee80211_reg_rule *reg_rule = NULL; |
787 | const struct ieee80211_power_rule *power_rule = NULL; | 852 | const struct ieee80211_power_rule *power_rule = NULL; |
853 | struct ieee80211_supported_band *sband; | ||
854 | struct ieee80211_channel *chan; | ||
855 | |||
856 | sband = wiphy->bands[band]; | ||
857 | BUG_ON(chan_idx >= sband->n_channels); | ||
858 | chan = &sband->channels[chan_idx]; | ||
859 | |||
860 | flags = chan->orig_flags; | ||
788 | 861 | ||
789 | r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), | 862 | r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), |
790 | &max_bandwidth, ®_rule); | 863 | &max_bandwidth, ®_rule); |
791 | 864 | ||
792 | if (r) { | 865 | if (r) { |
793 | flags |= IEEE80211_CHAN_DISABLED; | 866 | /* This means no regulatory rule was found in the country IE |
794 | chan->flags = flags; | 867 | * with a frequency range on the center_freq's band, since |
868 | * IEEE-802.11 allows for a country IE to have a subset of the | ||
869 | * regulatory information provided in a country we ignore | ||
870 | * disabling the channel unless at least one reg rule was | ||
871 | * found on the center_freq's band. For details see this | ||
872 | * clarification: | ||
873 | * | ||
874 | * http://tinyurl.com/11d-clarification | ||
875 | */ | ||
876 | if (r == -ERANGE && | ||
877 | last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) { | ||
878 | #ifdef CONFIG_CFG80211_REG_DEBUG | ||
879 | printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz " | ||
880 | "intact on %s - no rule found in band on " | ||
881 | "Country IE\n", | ||
882 | chan->center_freq, wiphy_name(wiphy)); | ||
883 | #endif | ||
884 | } else { | ||
885 | /* In this case we know the country IE has at least one reg rule | ||
886 | * for the band so we respect its band definitions */ | ||
887 | #ifdef CONFIG_CFG80211_REG_DEBUG | ||
888 | if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) | ||
889 | printk(KERN_DEBUG "cfg80211: Disabling " | ||
890 | "channel %d MHz on %s due to " | ||
891 | "Country IE\n", | ||
892 | chan->center_freq, wiphy_name(wiphy)); | ||
893 | #endif | ||
894 | flags |= IEEE80211_CHAN_DISABLED; | ||
895 | chan->flags = flags; | ||
896 | } | ||
795 | return; | 897 | return; |
796 | } | 898 | } |
797 | 899 | ||
@@ -808,12 +910,16 @@ static void handle_channel(struct ieee80211_channel *chan) | |||
808 | chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); | 910 | chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); |
809 | } | 911 | } |
810 | 912 | ||
811 | static void handle_band(struct ieee80211_supported_band *sband) | 913 | static void handle_band(struct wiphy *wiphy, enum ieee80211_band band) |
812 | { | 914 | { |
813 | int i; | 915 | unsigned int i; |
916 | struct ieee80211_supported_band *sband; | ||
917 | |||
918 | BUG_ON(!wiphy->bands[band]); | ||
919 | sband = wiphy->bands[band]; | ||
814 | 920 | ||
815 | for (i = 0; i < sband->n_channels; i++) | 921 | for (i = 0; i < sband->n_channels; i++) |
816 | handle_channel(&sband->channels[i]); | 922 | handle_channel(wiphy, band, i); |
817 | } | 923 | } |
818 | 924 | ||
819 | static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby) | 925 | static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby) |
@@ -840,7 +946,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby) | |||
840 | enum ieee80211_band band; | 946 | enum ieee80211_band band; |
841 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 947 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
842 | if (wiphy->bands[band]) | 948 | if (wiphy->bands[band]) |
843 | handle_band(wiphy->bands[band]); | 949 | handle_band(wiphy, band); |
844 | if (wiphy->reg_notifier) | 950 | if (wiphy->reg_notifier) |
845 | wiphy->reg_notifier(wiphy, setby); | 951 | wiphy->reg_notifier(wiphy, setby); |
846 | } | 952 | } |