diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2011-12-21 15:59:45 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2011-12-21 15:59:45 -0500 |
commit | b00f4dc5ff022cb9cbaffd376d9454d7fa1e496f (patch) | |
tree | 40f1b232e2f1e8ac365317a14fdcbcb331722b46 /net | |
parent | 1eac8111e0763853266a171ce11214da3a347a0a (diff) | |
parent | b9e26dfdad5a4f9cbdaacafac6998614cc9c41bc (diff) |
Merge branch 'master' into pm-sleep
* master: (848 commits)
SELinux: Fix RCU deref check warning in sel_netport_insert()
binary_sysctl(): fix memory leak
mm/vmalloc.c: remove static declaration of va from __get_vm_area_node
ipmi_watchdog: restore settings when BMC reset
oom: fix integer overflow of points in oom_badness
memcg: keep root group unchanged if creation fails
nilfs2: potential integer overflow in nilfs_ioctl_clean_segments()
nilfs2: unbreak compat ioctl
cpusets: stall when updating mems_allowed for mempolicy or disjoint nodemask
evm: prevent racing during tfm allocation
evm: key must be set once during initialization
mmc: vub300: fix type of firmware_rom_wait_states module parameter
Revert "mmc: enable runtime PM by default"
mmc: sdhci: remove "state" argument from sdhci_suspend_host
x86, dumpstack: Fix code bytes breakage due to missing KERN_CONT
IB/qib: Correct sense on freectxts increment and decrement
RDMA/cma: Verify private data length
cgroups: fix a css_set not found bug in cgroup_attach_proc
oprofile: Fix uninitialized memory access when writing to writing to oprofilefs
Revert "xen/pv-on-hvm kexec: add xs_reset_watches to shutdown watches from old kernel"
...
Conflicts:
kernel/cgroup_freezer.c
Diffstat (limited to 'net')
60 files changed, 514 insertions, 324 deletions
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index c7aafc7c5ed4..5f09a578d49d 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -245,9 +245,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, | |||
245 | if (tt_global_entry) { | 245 | if (tt_global_entry) { |
246 | /* This node is probably going to update its tt table */ | 246 | /* This node is probably going to update its tt table */ |
247 | tt_global_entry->orig_node->tt_poss_change = true; | 247 | tt_global_entry->orig_node->tt_poss_change = true; |
248 | /* The global entry has to be marked as PENDING and has to be | 248 | /* The global entry has to be marked as ROAMING and has to be |
249 | * kept for consistency purpose */ | 249 | * kept for consistency purpose */ |
250 | tt_global_entry->flags |= TT_CLIENT_PENDING; | 250 | tt_global_entry->flags |= TT_CLIENT_ROAM; |
251 | tt_global_entry->roam_at = jiffies; | ||
252 | |||
251 | send_roam_adv(bat_priv, tt_global_entry->addr, | 253 | send_roam_adv(bat_priv, tt_global_entry->addr, |
252 | tt_global_entry->orig_node); | 254 | tt_global_entry->orig_node); |
253 | } | 255 | } |
@@ -694,6 +696,7 @@ void tt_global_del(struct bat_priv *bat_priv, | |||
694 | const char *message, bool roaming) | 696 | const char *message, bool roaming) |
695 | { | 697 | { |
696 | struct tt_global_entry *tt_global_entry = NULL; | 698 | struct tt_global_entry *tt_global_entry = NULL; |
699 | struct tt_local_entry *tt_local_entry = NULL; | ||
697 | 700 | ||
698 | tt_global_entry = tt_global_hash_find(bat_priv, addr); | 701 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
699 | if (!tt_global_entry) | 702 | if (!tt_global_entry) |
@@ -701,15 +704,29 @@ void tt_global_del(struct bat_priv *bat_priv, | |||
701 | 704 | ||
702 | if (tt_global_entry->orig_node == orig_node) { | 705 | if (tt_global_entry->orig_node == orig_node) { |
703 | if (roaming) { | 706 | if (roaming) { |
704 | tt_global_entry->flags |= TT_CLIENT_ROAM; | 707 | /* if we are deleting a global entry due to a roam |
705 | tt_global_entry->roam_at = jiffies; | 708 | * event, there are two possibilities: |
706 | goto out; | 709 | * 1) the client roamed from node A to node B => we mark |
710 | * it with TT_CLIENT_ROAM, we start a timer and we | ||
711 | * wait for node B to claim it. In case of timeout | ||
712 | * the entry is purged. | ||
713 | * 2) the client roamed to us => we can directly delete | ||
714 | * the global entry, since it is useless now. */ | ||
715 | tt_local_entry = tt_local_hash_find(bat_priv, | ||
716 | tt_global_entry->addr); | ||
717 | if (!tt_local_entry) { | ||
718 | tt_global_entry->flags |= TT_CLIENT_ROAM; | ||
719 | tt_global_entry->roam_at = jiffies; | ||
720 | goto out; | ||
721 | } | ||
707 | } | 722 | } |
708 | _tt_global_del(bat_priv, tt_global_entry, message); | 723 | _tt_global_del(bat_priv, tt_global_entry, message); |
709 | } | 724 | } |
710 | out: | 725 | out: |
711 | if (tt_global_entry) | 726 | if (tt_global_entry) |
712 | tt_global_entry_free_ref(tt_global_entry); | 727 | tt_global_entry_free_ref(tt_global_entry); |
728 | if (tt_local_entry) | ||
729 | tt_local_entry_free_ref(tt_local_entry); | ||
713 | } | 730 | } |
714 | 731 | ||
715 | void tt_global_del_orig(struct bat_priv *bat_priv, | 732 | void tt_global_del_orig(struct bat_priv *bat_priv, |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 91bcd3a961ec..1eea8208b2cc 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -79,17 +79,12 @@ static struct bnep_session *__bnep_get_session(u8 *dst) | |||
79 | 79 | ||
80 | static void __bnep_link_session(struct bnep_session *s) | 80 | static void __bnep_link_session(struct bnep_session *s) |
81 | { | 81 | { |
82 | /* It's safe to call __module_get() here because sessions are added | ||
83 | by the socket layer which has to hold the reference to this module. | ||
84 | */ | ||
85 | __module_get(THIS_MODULE); | ||
86 | list_add(&s->list, &bnep_session_list); | 82 | list_add(&s->list, &bnep_session_list); |
87 | } | 83 | } |
88 | 84 | ||
89 | static void __bnep_unlink_session(struct bnep_session *s) | 85 | static void __bnep_unlink_session(struct bnep_session *s) |
90 | { | 86 | { |
91 | list_del(&s->list); | 87 | list_del(&s->list); |
92 | module_put(THIS_MODULE); | ||
93 | } | 88 | } |
94 | 89 | ||
95 | static int bnep_send(struct bnep_session *s, void *data, size_t len) | 90 | static int bnep_send(struct bnep_session *s, void *data, size_t len) |
@@ -530,6 +525,7 @@ static int bnep_session(void *arg) | |||
530 | 525 | ||
531 | up_write(&bnep_session_sem); | 526 | up_write(&bnep_session_sem); |
532 | free_netdev(dev); | 527 | free_netdev(dev); |
528 | module_put_and_exit(0); | ||
533 | return 0; | 529 | return 0; |
534 | } | 530 | } |
535 | 531 | ||
@@ -616,9 +612,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) | |||
616 | 612 | ||
617 | __bnep_link_session(s); | 613 | __bnep_link_session(s); |
618 | 614 | ||
615 | __module_get(THIS_MODULE); | ||
619 | s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name); | 616 | s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name); |
620 | if (IS_ERR(s->task)) { | 617 | if (IS_ERR(s->task)) { |
621 | /* Session thread start failed, gotta cleanup. */ | 618 | /* Session thread start failed, gotta cleanup. */ |
619 | module_put(THIS_MODULE); | ||
622 | unregister_netdev(dev); | 620 | unregister_netdev(dev); |
623 | __bnep_unlink_session(s); | 621 | __bnep_unlink_session(s); |
624 | err = PTR_ERR(s->task); | 622 | err = PTR_ERR(s->task); |
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 7d00ddf9e9dc..5a6e634f7fca 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c | |||
@@ -67,14 +67,12 @@ static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) | |||
67 | 67 | ||
68 | static void __cmtp_link_session(struct cmtp_session *session) | 68 | static void __cmtp_link_session(struct cmtp_session *session) |
69 | { | 69 | { |
70 | __module_get(THIS_MODULE); | ||
71 | list_add(&session->list, &cmtp_session_list); | 70 | list_add(&session->list, &cmtp_session_list); |
72 | } | 71 | } |
73 | 72 | ||
74 | static void __cmtp_unlink_session(struct cmtp_session *session) | 73 | static void __cmtp_unlink_session(struct cmtp_session *session) |
75 | { | 74 | { |
76 | list_del(&session->list); | 75 | list_del(&session->list); |
77 | module_put(THIS_MODULE); | ||
78 | } | 76 | } |
79 | 77 | ||
80 | static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) | 78 | static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) |
@@ -327,6 +325,7 @@ static int cmtp_session(void *arg) | |||
327 | up_write(&cmtp_session_sem); | 325 | up_write(&cmtp_session_sem); |
328 | 326 | ||
329 | kfree(session); | 327 | kfree(session); |
328 | module_put_and_exit(0); | ||
330 | return 0; | 329 | return 0; |
331 | } | 330 | } |
332 | 331 | ||
@@ -376,9 +375,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) | |||
376 | 375 | ||
377 | __cmtp_link_session(session); | 376 | __cmtp_link_session(session); |
378 | 377 | ||
378 | __module_get(THIS_MODULE); | ||
379 | session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d", | 379 | session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d", |
380 | session->num); | 380 | session->num); |
381 | if (IS_ERR(session->task)) { | 381 | if (IS_ERR(session->task)) { |
382 | module_put(THIS_MODULE); | ||
382 | err = PTR_ERR(session->task); | 383 | err = PTR_ERR(session->task); |
383 | goto unlink; | 384 | goto unlink; |
384 | } | 385 | } |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index d7d96b6b1f0d..643a41b76e2e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -545,7 +545,7 @@ static void hci_setup(struct hci_dev *hdev) | |||
545 | { | 545 | { |
546 | hci_setup_event_mask(hdev); | 546 | hci_setup_event_mask(hdev); |
547 | 547 | ||
548 | if (hdev->lmp_ver > 1) | 548 | if (hdev->hci_ver > 1) |
549 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); | 549 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); |
550 | 550 | ||
551 | if (hdev->features[6] & LMP_SIMPLE_PAIR) { | 551 | if (hdev->features[6] & LMP_SIMPLE_PAIR) { |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e5f9ece3c9a0..a1daf8227ed1 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <net/sock.h> | 18 | #include <net/sock.h> |
19 | 19 | ||
20 | #include "br_private.h" | 20 | #include "br_private.h" |
21 | #include "br_private_stp.h" | ||
21 | 22 | ||
22 | static inline size_t br_nlmsg_size(void) | 23 | static inline size_t br_nlmsg_size(void) |
23 | { | 24 | { |
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
188 | 189 | ||
189 | p->state = new_state; | 190 | p->state = new_state; |
190 | br_log_state(p); | 191 | br_log_state(p); |
192 | |||
193 | spin_lock_bh(&p->br->lock); | ||
194 | br_port_state_selection(p->br); | ||
195 | spin_unlock_bh(&p->br->lock); | ||
196 | |||
191 | br_ifinfo_notify(RTM_NEWLINK, p); | 197 | br_ifinfo_notify(RTM_NEWLINK, p); |
192 | 198 | ||
193 | return 0; | 199 | return 0; |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index ad0a3f7cf6cc..dd147d78a588 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br) | |||
399 | struct net_bridge_port *p; | 399 | struct net_bridge_port *p; |
400 | unsigned int liveports = 0; | 400 | unsigned int liveports = 0; |
401 | 401 | ||
402 | /* Don't change port states if userspace is handling STP */ | ||
403 | if (br->stp_enabled == BR_USER_STP) | ||
404 | return; | ||
405 | |||
406 | list_for_each_entry(p, &br->port_list, list) { | 402 | list_for_each_entry(p, &br->port_list, list) { |
407 | if (p->state == BR_STATE_DISABLED) | 403 | if (p->state == BR_STATE_DISABLED) |
408 | continue; | 404 | continue; |
409 | 405 | ||
410 | if (p->port_no == br->root_port) { | 406 | /* Don't change port states if userspace is handling STP */ |
411 | p->config_pending = 0; | 407 | if (br->stp_enabled != BR_USER_STP) { |
412 | p->topology_change_ack = 0; | 408 | if (p->port_no == br->root_port) { |
413 | br_make_forwarding(p); | 409 | p->config_pending = 0; |
414 | } else if (br_is_designated_port(p)) { | 410 | p->topology_change_ack = 0; |
415 | del_timer(&p->message_age_timer); | 411 | br_make_forwarding(p); |
416 | br_make_forwarding(p); | 412 | } else if (br_is_designated_port(p)) { |
417 | } else { | 413 | del_timer(&p->message_age_timer); |
418 | p->config_pending = 0; | 414 | br_make_forwarding(p); |
419 | p->topology_change_ack = 0; | 415 | } else { |
420 | br_make_blocking(p); | 416 | p->config_pending = 0; |
417 | p->topology_change_ack = 0; | ||
418 | br_make_blocking(p); | ||
419 | } | ||
421 | } | 420 | } |
422 | 421 | ||
423 | if (p->state == BR_STATE_FORWARDING) | 422 | if (p->state == BR_STATE_FORWARDING) |
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c index f39921171d0d..d3ca87bf23b7 100644 --- a/net/caif/cffrml.c +++ b/net/caif/cffrml.c | |||
@@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) | |||
136 | 136 | ||
137 | static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) | 137 | static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) |
138 | { | 138 | { |
139 | int tmp; | ||
140 | u16 chks; | 139 | u16 chks; |
141 | u16 len; | 140 | u16 len; |
141 | __le16 data; | ||
142 | |||
142 | struct cffrml *this = container_obj(layr); | 143 | struct cffrml *this = container_obj(layr); |
143 | if (this->dofcs) { | 144 | if (this->dofcs) { |
144 | chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); | 145 | chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); |
145 | tmp = cpu_to_le16(chks); | 146 | data = cpu_to_le16(chks); |
146 | cfpkt_add_trail(pkt, &tmp, 2); | 147 | cfpkt_add_trail(pkt, &data, 2); |
147 | } else { | 148 | } else { |
148 | cfpkt_pad_trail(pkt, 2); | 149 | cfpkt_pad_trail(pkt, 2); |
149 | } | 150 | } |
150 | len = cfpkt_getlen(pkt); | 151 | len = cfpkt_getlen(pkt); |
151 | tmp = cpu_to_le16(len); | 152 | data = cpu_to_le16(len); |
152 | cfpkt_add_head(pkt, &tmp, 2); | 153 | cfpkt_add_head(pkt, &data, 2); |
153 | cfpkt_info(pkt)->hdr_len += 2; | 154 | cfpkt_info(pkt)->hdr_len += 2; |
154 | if (cfpkt_erroneous(pkt)) { | 155 | if (cfpkt_erroneous(pkt)) { |
155 | pr_err("Packet is erroneous!\n"); | 156 | pr_err("Packet is erroneous!\n"); |
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 42599e31dcad..3a94eae7abe9 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c | |||
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map, | |||
477 | int i, j; | 477 | int i, j; |
478 | int numrep; | 478 | int numrep; |
479 | int firstn; | 479 | int firstn; |
480 | int rc = -1; | ||
481 | 480 | ||
482 | BUG_ON(ruleno >= map->max_rules); | 481 | BUG_ON(ruleno >= map->max_rules); |
483 | 482 | ||
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map, | |||
491 | * that this may or may not correspond to the specific types | 490 | * that this may or may not correspond to the specific types |
492 | * referenced by the crush rule. | 491 | * referenced by the crush rule. |
493 | */ | 492 | */ |
494 | if (force >= 0) { | 493 | if (force >= 0 && |
495 | if (force >= map->max_devices || | 494 | force < map->max_devices && |
496 | map->device_parents[force] == 0) { | 495 | map->device_parents[force] != 0 && |
497 | /*dprintk("CRUSH: forcefed device dne\n");*/ | 496 | !is_out(map, weight, force, x)) { |
498 | rc = -1; /* force fed device dne */ | 497 | while (1) { |
499 | goto out; | 498 | force_context[++force_pos] = force; |
500 | } | 499 | if (force >= 0) |
501 | if (!is_out(map, weight, force, x)) { | 500 | force = map->device_parents[force]; |
502 | while (1) { | 501 | else |
503 | force_context[++force_pos] = force; | 502 | force = map->bucket_parents[-1-force]; |
504 | if (force >= 0) | 503 | if (force == 0) |
505 | force = map->device_parents[force]; | 504 | break; |
506 | else | ||
507 | force = map->bucket_parents[-1-force]; | ||
508 | if (force == 0) | ||
509 | break; | ||
510 | } | ||
511 | } | 505 | } |
512 | } | 506 | } |
513 | 507 | ||
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map, | |||
600 | BUG_ON(1); | 594 | BUG_ON(1); |
601 | } | 595 | } |
602 | } | 596 | } |
603 | rc = result_len; | 597 | return result_len; |
604 | |||
605 | out: | ||
606 | return rc; | ||
607 | } | 598 | } |
608 | 599 | ||
609 | 600 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 6ba50a1e404c..5a13edfc9f73 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1396,7 +1396,7 @@ rollback: | |||
1396 | for_each_net(net) { | 1396 | for_each_net(net) { |
1397 | for_each_netdev(net, dev) { | 1397 | for_each_netdev(net, dev) { |
1398 | if (dev == last) | 1398 | if (dev == last) |
1399 | break; | 1399 | goto outroll; |
1400 | 1400 | ||
1401 | if (dev->flags & IFF_UP) { | 1401 | if (dev->flags & IFF_UP) { |
1402 | nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); | 1402 | nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); |
@@ -1407,6 +1407,7 @@ rollback: | |||
1407 | } | 1407 | } |
1408 | } | 1408 | } |
1409 | 1409 | ||
1410 | outroll: | ||
1410 | raw_notifier_chain_unregister(&netdev_chain, nb); | 1411 | raw_notifier_chain_unregister(&netdev_chain, nb); |
1411 | goto unlock; | 1412 | goto unlock; |
1412 | } | 1413 | } |
@@ -4282,6 +4283,12 @@ static int dev_seq_open(struct inode *inode, struct file *file) | |||
4282 | sizeof(struct dev_iter_state)); | 4283 | sizeof(struct dev_iter_state)); |
4283 | } | 4284 | } |
4284 | 4285 | ||
4286 | int dev_seq_open_ops(struct inode *inode, struct file *file, | ||
4287 | const struct seq_operations *ops) | ||
4288 | { | ||
4289 | return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state)); | ||
4290 | } | ||
4291 | |||
4285 | static const struct file_operations dev_seq_fops = { | 4292 | static const struct file_operations dev_seq_fops = { |
4286 | .owner = THIS_MODULE, | 4293 | .owner = THIS_MODULE, |
4287 | .open = dev_seq_open, | 4294 | .open = dev_seq_open, |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 277faef9148d..febba516db62 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = { | |||
696 | 696 | ||
697 | static int dev_mc_seq_open(struct inode *inode, struct file *file) | 697 | static int dev_mc_seq_open(struct inode *inode, struct file *file) |
698 | { | 698 | { |
699 | return seq_open_net(inode, file, &dev_mc_seq_ops, | 699 | return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); |
700 | sizeof(struct seq_net_private)); | ||
701 | } | 700 | } |
702 | 701 | ||
703 | static const struct file_operations dev_mc_seq_fops = { | 702 | static const struct file_operations dev_mc_seq_fops = { |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 039d51e6c284..5ac07d31fbc9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -2397,7 +2397,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, | |||
2397 | struct net *net = seq_file_net(seq); | 2397 | struct net *net = seq_file_net(seq); |
2398 | struct neigh_table *tbl = state->tbl; | 2398 | struct neigh_table *tbl = state->tbl; |
2399 | 2399 | ||
2400 | pn = pn->next; | 2400 | do { |
2401 | pn = pn->next; | ||
2402 | } while (pn && !net_eq(pneigh_net(pn), net)); | ||
2403 | |||
2401 | while (!pn) { | 2404 | while (!pn) { |
2402 | if (++state->bucket > PNEIGH_HASHMASK) | 2405 | if (++state->bucket > PNEIGH_HASHMASK) |
2403 | break; | 2406 | break; |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 182236b2510a..9b570a6a33c5 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -26,10 +26,11 @@ | |||
26 | * but then some measure against one socket starving all other sockets | 26 | * but then some measure against one socket starving all other sockets |
27 | * would be needed. | 27 | * would be needed. |
28 | * | 28 | * |
29 | * It was 128 by default. Experiments with real servers show, that | 29 | * The minimum value of it is 128. Experiments with real servers show that |
30 | * it is absolutely not enough even at 100conn/sec. 256 cures most | 30 | * it is absolutely not enough even at 100conn/sec. 256 cures most |
31 | * of problems. This value is adjusted to 128 for very small machines | 31 | * of problems. |
32 | * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). | 32 | * This value is adjusted to 128 for low memory machines, |
33 | * and it will increase in proportion to the memory of machine. | ||
33 | * Note : Dont forget somaxconn that may limit backlog too. | 34 | * Note : Dont forget somaxconn that may limit backlog too. |
34 | */ | 35 | */ |
35 | int sysctl_max_syn_backlog = 256; | 36 | int sysctl_max_syn_backlog = 256; |
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 025233de25f9..925991ae6f52 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c | |||
@@ -19,6 +19,7 @@ static int __init net_secret_init(void) | |||
19 | } | 19 | } |
20 | late_initcall(net_secret_init); | 20 | late_initcall(net_secret_init); |
21 | 21 | ||
22 | #ifdef CONFIG_INET | ||
22 | static u32 seq_scale(u32 seq) | 23 | static u32 seq_scale(u32 seq) |
23 | { | 24 | { |
24 | /* | 25 | /* |
@@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq) | |||
33 | */ | 34 | */ |
34 | return seq + (ktime_to_ns(ktime_get_real()) >> 6); | 35 | return seq + (ktime_to_ns(ktime_get_real()) >> 6); |
35 | } | 36 | } |
37 | #endif | ||
36 | 38 | ||
37 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 39 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
38 | __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, | 40 | __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 18a3cebb753d..3c30ee4a5710 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2230,7 +2230,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb) | |||
2230 | * @shiftlen: shift up to this many bytes | 2230 | * @shiftlen: shift up to this many bytes |
2231 | * | 2231 | * |
2232 | * Attempts to shift up to shiftlen worth of bytes, which may be less than | 2232 | * Attempts to shift up to shiftlen worth of bytes, which may be less than |
2233 | * the length of the skb, from tgt to skb. Returns number bytes shifted. | 2233 | * the length of the skb, from skb to tgt. Returns number bytes shifted. |
2234 | * It's up to caller to free skb if everything was shifted. | 2234 | * It's up to caller to free skb if everything was shifted. |
2235 | * | 2235 | * |
2236 | * If @tgt runs out of frags, the whole operation is aborted. | 2236 | * If @tgt runs out of frags, the whole operation is aborted. |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 90a919afbed7..3f4e5414c8e5 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
111 | rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, | 111 | rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, |
112 | inet->inet_sport, inet->inet_dport, sk); | 112 | inet->inet_sport, inet->inet_dport, sk); |
113 | if (IS_ERR(rt)) { | 113 | if (IS_ERR(rt)) { |
114 | err = PTR_ERR(rt); | ||
114 | rt = NULL; | 115 | rt = NULL; |
115 | goto failure; | 116 | goto failure; |
116 | } | 117 | } |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index a77d16158eb6..94f4ec036669 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline; | |||
112 | static int dn_dst_gc(struct dst_ops *ops); | 112 | static int dn_dst_gc(struct dst_ops *ops); |
113 | static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); | 113 | static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); |
114 | static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); | 114 | static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); |
115 | static unsigned int dn_dst_default_mtu(const struct dst_entry *dst); | 115 | static unsigned int dn_dst_mtu(const struct dst_entry *dst); |
116 | static void dn_dst_destroy(struct dst_entry *); | 116 | static void dn_dst_destroy(struct dst_entry *); |
117 | static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); | 117 | static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); |
118 | static void dn_dst_link_failure(struct sk_buff *); | 118 | static void dn_dst_link_failure(struct sk_buff *); |
@@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = { | |||
135 | .gc = dn_dst_gc, | 135 | .gc = dn_dst_gc, |
136 | .check = dn_dst_check, | 136 | .check = dn_dst_check, |
137 | .default_advmss = dn_dst_default_advmss, | 137 | .default_advmss = dn_dst_default_advmss, |
138 | .default_mtu = dn_dst_default_mtu, | 138 | .mtu = dn_dst_mtu, |
139 | .cow_metrics = dst_cow_metrics_generic, | 139 | .cow_metrics = dst_cow_metrics_generic, |
140 | .destroy = dn_dst_destroy, | 140 | .destroy = dn_dst_destroy, |
141 | .negative_advice = dn_dst_negative_advice, | 141 | .negative_advice = dn_dst_negative_advice, |
@@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst) | |||
825 | return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); | 825 | return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); |
826 | } | 826 | } |
827 | 827 | ||
828 | static unsigned int dn_dst_default_mtu(const struct dst_entry *dst) | 828 | static unsigned int dn_dst_mtu(const struct dst_entry *dst) |
829 | { | 829 | { |
830 | return dst->dev->mtu; | 830 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
831 | |||
832 | return mtu ? : dst->dev->mtu; | ||
831 | } | 833 | } |
832 | 834 | ||
833 | static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) | 835 | static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c index 67f691bd4acf..d9c150cc59a9 100644 --- a/net/decnet/dn_timer.c +++ b/net/decnet/dn_timer.c | |||
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg); | |||
36 | 36 | ||
37 | void dn_start_slow_timer(struct sock *sk) | 37 | void dn_start_slow_timer(struct sock *sk) |
38 | { | 38 | { |
39 | sk->sk_timer.expires = jiffies + SLOW_INTERVAL; | 39 | setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk); |
40 | sk->sk_timer.function = dn_slow_timer; | 40 | sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); |
41 | sk->sk_timer.data = (unsigned long)sk; | ||
42 | |||
43 | add_timer(&sk->sk_timer); | ||
44 | } | 41 | } |
45 | 42 | ||
46 | void dn_stop_slow_timer(struct sock *sk) | 43 | void dn_stop_slow_timer(struct sock *sk) |
47 | { | 44 | { |
48 | del_timer(&sk->sk_timer); | 45 | sk_stop_timer(sk, &sk->sk_timer); |
49 | } | 46 | } |
50 | 47 | ||
51 | static void dn_slow_timer(unsigned long arg) | 48 | static void dn_slow_timer(unsigned long arg) |
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg) | |||
53 | struct sock *sk = (struct sock *)arg; | 50 | struct sock *sk = (struct sock *)arg; |
54 | struct dn_scp *scp = DN_SK(sk); | 51 | struct dn_scp *scp = DN_SK(sk); |
55 | 52 | ||
56 | sock_hold(sk); | ||
57 | bh_lock_sock(sk); | 53 | bh_lock_sock(sk); |
58 | 54 | ||
59 | if (sock_owned_by_user(sk)) { | 55 | if (sock_owned_by_user(sk)) { |
60 | sk->sk_timer.expires = jiffies + HZ / 10; | 56 | sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10); |
61 | add_timer(&sk->sk_timer); | ||
62 | goto out; | 57 | goto out; |
63 | } | 58 | } |
64 | 59 | ||
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg) | |||
100 | scp->keepalive_fxn(sk); | 95 | scp->keepalive_fxn(sk); |
101 | } | 96 | } |
102 | 97 | ||
103 | sk->sk_timer.expires = jiffies + SLOW_INTERVAL; | 98 | sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); |
104 | |||
105 | add_timer(&sk->sk_timer); | ||
106 | out: | 99 | out: |
107 | bh_unlock_sock(sk); | 100 | bh_unlock_sock(sk); |
108 | sock_put(sk); | 101 | sock_put(sk); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index c6b5092f29a1..65f01dc47565 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write, | |||
1490 | void __user *buffer, | 1490 | void __user *buffer, |
1491 | size_t *lenp, loff_t *ppos) | 1491 | size_t *lenp, loff_t *ppos) |
1492 | { | 1492 | { |
1493 | int old_value = *(int *)ctl->data; | ||
1493 | int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); | 1494 | int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
1495 | int new_value = *(int *)ctl->data; | ||
1494 | 1496 | ||
1495 | if (write) { | 1497 | if (write) { |
1496 | struct ipv4_devconf *cnf = ctl->extra1; | 1498 | struct ipv4_devconf *cnf = ctl->extra1; |
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write, | |||
1501 | 1503 | ||
1502 | if (cnf == net->ipv4.devconf_dflt) | 1504 | if (cnf == net->ipv4.devconf_dflt) |
1503 | devinet_copy_dflt_conf(net, i); | 1505 | devinet_copy_dflt_conf(net, i); |
1506 | if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1) | ||
1507 | if ((new_value == 0) && (old_value != 0)) | ||
1508 | rt_cache_flush(net, 0); | ||
1504 | } | 1509 | } |
1505 | 1510 | ||
1506 | return ret; | 1511 | return ret; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index c7472eff2d51..b2ca095cb9da 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1716,7 +1716,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, | |||
1716 | if (err) { | 1716 | if (err) { |
1717 | int j; | 1717 | int j; |
1718 | 1718 | ||
1719 | pmc->sfcount[sfmode]--; | 1719 | if (!delta) |
1720 | pmc->sfcount[sfmode]--; | ||
1720 | for (j=0; j<i; j++) | 1721 | for (j=0; j<i; j++) |
1721 | (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]); | 1722 | (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]); |
1722 | } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { | 1723 | } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 68e8ac514383..ccee270a9b65 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -108,9 +108,6 @@ static int inet_csk_diag_fill(struct sock *sk, | |||
108 | icsk->icsk_ca_ops->name); | 108 | icsk->icsk_ca_ops->name); |
109 | } | 109 | } |
110 | 110 | ||
111 | if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6)) | ||
112 | RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos); | ||
113 | |||
114 | r->idiag_family = sk->sk_family; | 111 | r->idiag_family = sk->sk_family; |
115 | r->idiag_state = sk->sk_state; | 112 | r->idiag_state = sk->sk_state; |
116 | r->idiag_timer = 0; | 113 | r->idiag_timer = 0; |
@@ -125,16 +122,23 @@ static int inet_csk_diag_fill(struct sock *sk, | |||
125 | r->id.idiag_src[0] = inet->inet_rcv_saddr; | 122 | r->id.idiag_src[0] = inet->inet_rcv_saddr; |
126 | r->id.idiag_dst[0] = inet->inet_daddr; | 123 | r->id.idiag_dst[0] = inet->inet_daddr; |
127 | 124 | ||
125 | /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, | ||
126 | * hence this needs to be included regardless of socket family. | ||
127 | */ | ||
128 | if (ext & (1 << (INET_DIAG_TOS - 1))) | ||
129 | RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos); | ||
130 | |||
128 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 131 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) |
129 | if (r->idiag_family == AF_INET6) { | 132 | if (r->idiag_family == AF_INET6) { |
130 | const struct ipv6_pinfo *np = inet6_sk(sk); | 133 | const struct ipv6_pinfo *np = inet6_sk(sk); |
131 | 134 | ||
135 | if (ext & (1 << (INET_DIAG_TCLASS - 1))) | ||
136 | RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); | ||
137 | |||
132 | ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, | 138 | ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, |
133 | &np->rcv_saddr); | 139 | &np->rcv_saddr); |
134 | ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, | 140 | ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, |
135 | &np->daddr); | 141 | &np->daddr); |
136 | if (ext & (1 << (INET_DIAG_TCLASS - 1))) | ||
137 | RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); | ||
138 | } | 142 | } |
139 | #endif | 143 | #endif |
140 | 144 | ||
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 3b34d1c86270..29a07b6c7168 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb) | |||
84 | 84 | ||
85 | rt = skb_rtable(skb); | 85 | rt = skb_rtable(skb); |
86 | 86 | ||
87 | if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway) | 87 | if (opt->is_strictroute && opt->nexthop != rt->rt_gateway) |
88 | goto sr_failed; | 88 | goto sr_failed; |
89 | 89 | ||
90 | if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && | 90 | if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 05d20cca9d66..1e60f7679075 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb) | |||
568 | ) { | 568 | ) { |
569 | if (srrptr + 3 > srrspace) | 569 | if (srrptr + 3 > srrspace) |
570 | break; | 570 | break; |
571 | if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0) | 571 | if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0) |
572 | break; | 572 | break; |
573 | } | 573 | } |
574 | if (srrptr + 3 <= srrspace) { | 574 | if (srrptr + 3 <= srrspace) { |
575 | opt->is_changed = 1; | 575 | opt->is_changed = 1; |
576 | ip_rt_get_source(&optptr[srrptr-1], skb, rt); | 576 | ip_rt_get_source(&optptr[srrptr-1], skb, rt); |
577 | ip_hdr(skb)->daddr = opt->nexthop; | ||
577 | optptr[2] = srrptr+4; | 578 | optptr[2] = srrptr+4; |
578 | } else if (net_ratelimit()) | 579 | } else if (net_ratelimit()) |
579 | printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); | 580 | printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); |
@@ -640,7 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
640 | } | 641 | } |
641 | if (srrptr <= srrspace) { | 642 | if (srrptr <= srrspace) { |
642 | opt->srr_is_hit = 1; | 643 | opt->srr_is_hit = 1; |
643 | iph->daddr = nexthop; | 644 | opt->nexthop = nexthop; |
644 | opt->is_changed = 1; | 645 | opt->is_changed = 1; |
645 | } | 646 | } |
646 | return 0; | 647 | return 0; |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 065effd8349a..0b2e7329abda 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net, | |||
285 | if (register_netdevice(dev) < 0) | 285 | if (register_netdevice(dev) < 0) |
286 | goto failed_free; | 286 | goto failed_free; |
287 | 287 | ||
288 | strcpy(nt->parms.name, dev->name); | ||
289 | |||
288 | dev_hold(dev); | 290 | dev_hold(dev); |
289 | ipip_tunnel_link(ipn, nt); | 291 | ipip_tunnel_link(ipn, nt); |
290 | return nt; | 292 | return nt; |
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev) | |||
759 | struct ip_tunnel *tunnel = netdev_priv(dev); | 761 | struct ip_tunnel *tunnel = netdev_priv(dev); |
760 | 762 | ||
761 | tunnel->dev = dev; | 763 | tunnel->dev = dev; |
762 | strcpy(tunnel->parms.name, dev->name); | ||
763 | 764 | ||
764 | memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); | 765 | memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); |
765 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); | 766 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); |
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head) | |||
825 | static int __net_init ipip_init_net(struct net *net) | 826 | static int __net_init ipip_init_net(struct net *net) |
826 | { | 827 | { |
827 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 828 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
829 | struct ip_tunnel *t; | ||
828 | int err; | 830 | int err; |
829 | 831 | ||
830 | ipn->tunnels[0] = ipn->tunnels_wc; | 832 | ipn->tunnels[0] = ipn->tunnels_wc; |
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net) | |||
848 | if ((err = register_netdev(ipn->fb_tunnel_dev))) | 850 | if ((err = register_netdev(ipn->fb_tunnel_dev))) |
849 | goto err_reg_dev; | 851 | goto err_reg_dev; |
850 | 852 | ||
853 | t = netdev_priv(ipn->fb_tunnel_dev); | ||
854 | |||
855 | strcpy(t->parms.name, ipn->fb_tunnel_dev->name); | ||
851 | return 0; | 856 | return 0; |
852 | 857 | ||
853 | err_reg_dev: | 858 | err_reg_dev: |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 9899619ab9b8..4f47e064e262 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
64 | /* Change in oif may mean change in hh_len. */ | 64 | /* Change in oif may mean change in hh_len. */ |
65 | hh_len = skb_dst(skb)->dev->hard_header_len; | 65 | hh_len = skb_dst(skb)->dev->hard_header_len; |
66 | if (skb_headroom(skb) < hh_len && | 66 | if (skb_headroom(skb) < hh_len && |
67 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) | 67 | pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), |
68 | 0, GFP_ATOMIC)) | ||
68 | return -1; | 69 | return -1; |
69 | 70 | ||
70 | return 0; | 71 | return 0; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 1dfc18a03fd4..f19f2182894c 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -325,7 +325,6 @@ config IP_NF_TARGET_TTL | |||
325 | # raw + specific targets | 325 | # raw + specific targets |
326 | config IP_NF_RAW | 326 | config IP_NF_RAW |
327 | tristate 'raw table support (required for NOTRACK/TRACE)' | 327 | tristate 'raw table support (required for NOTRACK/TRACE)' |
328 | depends on NETFILTER_ADVANCED | ||
329 | help | 328 | help |
330 | This option adds a `raw' table to iptables. This table is the very | 329 | This option adds a `raw' table to iptables. This table is the very |
331 | first in the netfilter framework and hooks in at the PREROUTING | 330 | first in the netfilter framework and hooks in at the PREROUTING |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0c74da8a0473..46af62363b8c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -112,7 +112,7 @@ | |||
112 | #include <net/secure_seq.h> | 112 | #include <net/secure_seq.h> |
113 | 113 | ||
114 | #define RT_FL_TOS(oldflp4) \ | 114 | #define RT_FL_TOS(oldflp4) \ |
115 | ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) | 115 | ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) |
116 | 116 | ||
117 | #define IP_MAX_MTU 0xFFF0 | 117 | #define IP_MAX_MTU 0xFFF0 |
118 | 118 | ||
@@ -131,6 +131,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; | |||
131 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; | 131 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; |
132 | static int ip_rt_min_advmss __read_mostly = 256; | 132 | static int ip_rt_min_advmss __read_mostly = 256; |
133 | static int rt_chain_length_max __read_mostly = 20; | 133 | static int rt_chain_length_max __read_mostly = 20; |
134 | static int redirect_genid; | ||
134 | 135 | ||
135 | /* | 136 | /* |
136 | * Interface to generic destination cache. | 137 | * Interface to generic destination cache. |
@@ -138,7 +139,7 @@ static int rt_chain_length_max __read_mostly = 20; | |||
138 | 139 | ||
139 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); | 140 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); |
140 | static unsigned int ipv4_default_advmss(const struct dst_entry *dst); | 141 | static unsigned int ipv4_default_advmss(const struct dst_entry *dst); |
141 | static unsigned int ipv4_default_mtu(const struct dst_entry *dst); | 142 | static unsigned int ipv4_mtu(const struct dst_entry *dst); |
142 | static void ipv4_dst_destroy(struct dst_entry *dst); | 143 | static void ipv4_dst_destroy(struct dst_entry *dst); |
143 | static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); | 144 | static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); |
144 | static void ipv4_link_failure(struct sk_buff *skb); | 145 | static void ipv4_link_failure(struct sk_buff *skb); |
@@ -193,7 +194,7 @@ static struct dst_ops ipv4_dst_ops = { | |||
193 | .gc = rt_garbage_collect, | 194 | .gc = rt_garbage_collect, |
194 | .check = ipv4_dst_check, | 195 | .check = ipv4_dst_check, |
195 | .default_advmss = ipv4_default_advmss, | 196 | .default_advmss = ipv4_default_advmss, |
196 | .default_mtu = ipv4_default_mtu, | 197 | .mtu = ipv4_mtu, |
197 | .cow_metrics = ipv4_cow_metrics, | 198 | .cow_metrics = ipv4_cow_metrics, |
198 | .destroy = ipv4_dst_destroy, | 199 | .destroy = ipv4_dst_destroy, |
199 | .ifdown = ipv4_dst_ifdown, | 200 | .ifdown = ipv4_dst_ifdown, |
@@ -416,9 +417,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) | |||
416 | else { | 417 | else { |
417 | struct rtable *r = v; | 418 | struct rtable *r = v; |
418 | struct neighbour *n; | 419 | struct neighbour *n; |
419 | int len; | 420 | int len, HHUptod; |
420 | 421 | ||
422 | rcu_read_lock(); | ||
421 | n = dst_get_neighbour(&r->dst); | 423 | n = dst_get_neighbour(&r->dst); |
424 | HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0; | ||
425 | rcu_read_unlock(); | ||
426 | |||
422 | seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" | 427 | seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" |
423 | "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", | 428 | "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", |
424 | r->dst.dev ? r->dst.dev->name : "*", | 429 | r->dst.dev ? r->dst.dev->name : "*", |
@@ -432,7 +437,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) | |||
432 | dst_metric(&r->dst, RTAX_RTTVAR)), | 437 | dst_metric(&r->dst, RTAX_RTTVAR)), |
433 | r->rt_key_tos, | 438 | r->rt_key_tos, |
434 | -1, | 439 | -1, |
435 | (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0, | 440 | HHUptod, |
436 | r->rt_spec_dst, &len); | 441 | r->rt_spec_dst, &len); |
437 | 442 | ||
438 | seq_printf(seq, "%*s\n", 127 - len, ""); | 443 | seq_printf(seq, "%*s\n", 127 - len, ""); |
@@ -837,6 +842,7 @@ static void rt_cache_invalidate(struct net *net) | |||
837 | 842 | ||
838 | get_random_bytes(&shuffle, sizeof(shuffle)); | 843 | get_random_bytes(&shuffle, sizeof(shuffle)); |
839 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); | 844 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); |
845 | redirect_genid++; | ||
840 | } | 846 | } |
841 | 847 | ||
842 | /* | 848 | /* |
@@ -1304,7 +1310,7 @@ static void rt_del(unsigned hash, struct rtable *rt) | |||
1304 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1310 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1305 | } | 1311 | } |
1306 | 1312 | ||
1307 | static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) | 1313 | static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) |
1308 | { | 1314 | { |
1309 | struct rtable *rt = (struct rtable *) dst; | 1315 | struct rtable *rt = (struct rtable *) dst; |
1310 | __be32 orig_gw = rt->rt_gateway; | 1316 | __be32 orig_gw = rt->rt_gateway; |
@@ -1315,21 +1321,19 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) | |||
1315 | rt->rt_gateway = peer->redirect_learned.a4; | 1321 | rt->rt_gateway = peer->redirect_learned.a4; |
1316 | 1322 | ||
1317 | n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway); | 1323 | n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway); |
1318 | if (IS_ERR(n)) | 1324 | if (IS_ERR(n)) { |
1319 | return PTR_ERR(n); | 1325 | rt->rt_gateway = orig_gw; |
1326 | return; | ||
1327 | } | ||
1320 | old_n = xchg(&rt->dst._neighbour, n); | 1328 | old_n = xchg(&rt->dst._neighbour, n); |
1321 | if (old_n) | 1329 | if (old_n) |
1322 | neigh_release(old_n); | 1330 | neigh_release(old_n); |
1323 | if (!n || !(n->nud_state & NUD_VALID)) { | 1331 | if (!(n->nud_state & NUD_VALID)) { |
1324 | if (n) | 1332 | neigh_event_send(n, NULL); |
1325 | neigh_event_send(n, NULL); | ||
1326 | rt->rt_gateway = orig_gw; | ||
1327 | return -EAGAIN; | ||
1328 | } else { | 1333 | } else { |
1329 | rt->rt_flags |= RTCF_REDIRECTED; | 1334 | rt->rt_flags |= RTCF_REDIRECTED; |
1330 | call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); | 1335 | call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); |
1331 | } | 1336 | } |
1332 | return 0; | ||
1333 | } | 1337 | } |
1334 | 1338 | ||
1335 | /* called in rcu_read_lock() section */ | 1339 | /* called in rcu_read_lock() section */ |
@@ -1391,8 +1395,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1391 | 1395 | ||
1392 | peer = rt->peer; | 1396 | peer = rt->peer; |
1393 | if (peer) { | 1397 | if (peer) { |
1394 | if (peer->redirect_learned.a4 != new_gw) { | 1398 | if (peer->redirect_learned.a4 != new_gw || |
1399 | peer->redirect_genid != redirect_genid) { | ||
1395 | peer->redirect_learned.a4 = new_gw; | 1400 | peer->redirect_learned.a4 = new_gw; |
1401 | peer->redirect_genid = redirect_genid; | ||
1396 | atomic_inc(&__rt_peer_genid); | 1402 | atomic_inc(&__rt_peer_genid); |
1397 | } | 1403 | } |
1398 | check_peer_redir(&rt->dst, peer); | 1404 | check_peer_redir(&rt->dst, peer); |
@@ -1685,12 +1691,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
1685 | } | 1691 | } |
1686 | 1692 | ||
1687 | 1693 | ||
1688 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | 1694 | static void ipv4_validate_peer(struct rtable *rt) |
1689 | { | 1695 | { |
1690 | struct rtable *rt = (struct rtable *) dst; | ||
1691 | |||
1692 | if (rt_is_expired(rt)) | ||
1693 | return NULL; | ||
1694 | if (rt->rt_peer_genid != rt_peer_genid()) { | 1696 | if (rt->rt_peer_genid != rt_peer_genid()) { |
1695 | struct inet_peer *peer; | 1697 | struct inet_peer *peer; |
1696 | 1698 | ||
@@ -1699,17 +1701,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | |||
1699 | 1701 | ||
1700 | peer = rt->peer; | 1702 | peer = rt->peer; |
1701 | if (peer) { | 1703 | if (peer) { |
1702 | check_peer_pmtu(dst, peer); | 1704 | check_peer_pmtu(&rt->dst, peer); |
1703 | 1705 | ||
1706 | if (peer->redirect_genid != redirect_genid) | ||
1707 | peer->redirect_learned.a4 = 0; | ||
1704 | if (peer->redirect_learned.a4 && | 1708 | if (peer->redirect_learned.a4 && |
1705 | peer->redirect_learned.a4 != rt->rt_gateway) { | 1709 | peer->redirect_learned.a4 != rt->rt_gateway) |
1706 | if (check_peer_redir(dst, peer)) | 1710 | check_peer_redir(&rt->dst, peer); |
1707 | return NULL; | ||
1708 | } | ||
1709 | } | 1711 | } |
1710 | 1712 | ||
1711 | rt->rt_peer_genid = rt_peer_genid(); | 1713 | rt->rt_peer_genid = rt_peer_genid(); |
1712 | } | 1714 | } |
1715 | } | ||
1716 | |||
1717 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | ||
1718 | { | ||
1719 | struct rtable *rt = (struct rtable *) dst; | ||
1720 | |||
1721 | if (rt_is_expired(rt)) | ||
1722 | return NULL; | ||
1723 | ipv4_validate_peer(rt); | ||
1713 | return dst; | 1724 | return dst; |
1714 | } | 1725 | } |
1715 | 1726 | ||
@@ -1814,12 +1825,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst) | |||
1814 | return advmss; | 1825 | return advmss; |
1815 | } | 1826 | } |
1816 | 1827 | ||
1817 | static unsigned int ipv4_default_mtu(const struct dst_entry *dst) | 1828 | static unsigned int ipv4_mtu(const struct dst_entry *dst) |
1818 | { | 1829 | { |
1819 | unsigned int mtu = dst->dev->mtu; | 1830 | const struct rtable *rt = (const struct rtable *) dst; |
1831 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); | ||
1832 | |||
1833 | if (mtu && rt_is_output_route(rt)) | ||
1834 | return mtu; | ||
1835 | |||
1836 | mtu = dst->dev->mtu; | ||
1820 | 1837 | ||
1821 | if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { | 1838 | if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { |
1822 | const struct rtable *rt = (const struct rtable *) dst; | ||
1823 | 1839 | ||
1824 | if (rt->rt_gateway != rt->rt_dst && mtu > 576) | 1840 | if (rt->rt_gateway != rt->rt_dst && mtu > 576) |
1825 | mtu = 576; | 1841 | mtu = 576; |
@@ -1852,6 +1868,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, | |||
1852 | dst_init_metrics(&rt->dst, peer->metrics, false); | 1868 | dst_init_metrics(&rt->dst, peer->metrics, false); |
1853 | 1869 | ||
1854 | check_peer_pmtu(&rt->dst, peer); | 1870 | check_peer_pmtu(&rt->dst, peer); |
1871 | if (peer->redirect_genid != redirect_genid) | ||
1872 | peer->redirect_learned.a4 = 0; | ||
1855 | if (peer->redirect_learned.a4 && | 1873 | if (peer->redirect_learned.a4 && |
1856 | peer->redirect_learned.a4 != rt->rt_gateway) { | 1874 | peer->redirect_learned.a4 != rt->rt_gateway) { |
1857 | rt->rt_gateway = peer->redirect_learned.a4; | 1875 | rt->rt_gateway = peer->redirect_learned.a4; |
@@ -2357,6 +2375,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2357 | rth->rt_mark == skb->mark && | 2375 | rth->rt_mark == skb->mark && |
2358 | net_eq(dev_net(rth->dst.dev), net) && | 2376 | net_eq(dev_net(rth->dst.dev), net) && |
2359 | !rt_is_expired(rth)) { | 2377 | !rt_is_expired(rth)) { |
2378 | ipv4_validate_peer(rth); | ||
2360 | if (noref) { | 2379 | if (noref) { |
2361 | dst_use_noref(&rth->dst, jiffies); | 2380 | dst_use_noref(&rth->dst, jiffies); |
2362 | skb_dst_set_noref(skb, &rth->dst); | 2381 | skb_dst_set_noref(skb, &rth->dst); |
@@ -2415,11 +2434,11 @@ EXPORT_SYMBOL(ip_route_input_common); | |||
2415 | static struct rtable *__mkroute_output(const struct fib_result *res, | 2434 | static struct rtable *__mkroute_output(const struct fib_result *res, |
2416 | const struct flowi4 *fl4, | 2435 | const struct flowi4 *fl4, |
2417 | __be32 orig_daddr, __be32 orig_saddr, | 2436 | __be32 orig_daddr, __be32 orig_saddr, |
2418 | int orig_oif, struct net_device *dev_out, | 2437 | int orig_oif, __u8 orig_rtos, |
2438 | struct net_device *dev_out, | ||
2419 | unsigned int flags) | 2439 | unsigned int flags) |
2420 | { | 2440 | { |
2421 | struct fib_info *fi = res->fi; | 2441 | struct fib_info *fi = res->fi; |
2422 | u32 tos = RT_FL_TOS(fl4); | ||
2423 | struct in_device *in_dev; | 2442 | struct in_device *in_dev; |
2424 | u16 type = res->type; | 2443 | u16 type = res->type; |
2425 | struct rtable *rth; | 2444 | struct rtable *rth; |
@@ -2470,7 +2489,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
2470 | rth->rt_genid = rt_genid(dev_net(dev_out)); | 2489 | rth->rt_genid = rt_genid(dev_net(dev_out)); |
2471 | rth->rt_flags = flags; | 2490 | rth->rt_flags = flags; |
2472 | rth->rt_type = type; | 2491 | rth->rt_type = type; |
2473 | rth->rt_key_tos = tos; | 2492 | rth->rt_key_tos = orig_rtos; |
2474 | rth->rt_dst = fl4->daddr; | 2493 | rth->rt_dst = fl4->daddr; |
2475 | rth->rt_src = fl4->saddr; | 2494 | rth->rt_src = fl4->saddr; |
2476 | rth->rt_route_iif = 0; | 2495 | rth->rt_route_iif = 0; |
@@ -2520,7 +2539,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
2520 | static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4) | 2539 | static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4) |
2521 | { | 2540 | { |
2522 | struct net_device *dev_out = NULL; | 2541 | struct net_device *dev_out = NULL; |
2523 | u32 tos = RT_FL_TOS(fl4); | 2542 | __u8 tos = RT_FL_TOS(fl4); |
2524 | unsigned int flags = 0; | 2543 | unsigned int flags = 0; |
2525 | struct fib_result res; | 2544 | struct fib_result res; |
2526 | struct rtable *rth; | 2545 | struct rtable *rth; |
@@ -2696,7 +2715,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4) | |||
2696 | 2715 | ||
2697 | make_route: | 2716 | make_route: |
2698 | rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif, | 2717 | rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif, |
2699 | dev_out, flags); | 2718 | tos, dev_out, flags); |
2700 | if (!IS_ERR(rth)) { | 2719 | if (!IS_ERR(rth)) { |
2701 | unsigned int hash; | 2720 | unsigned int hash; |
2702 | 2721 | ||
@@ -2732,6 +2751,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4) | |||
2732 | (IPTOS_RT_MASK | RTO_ONLINK)) && | 2751 | (IPTOS_RT_MASK | RTO_ONLINK)) && |
2733 | net_eq(dev_net(rth->dst.dev), net) && | 2752 | net_eq(dev_net(rth->dst.dev), net) && |
2734 | !rt_is_expired(rth)) { | 2753 | !rt_is_expired(rth)) { |
2754 | ipv4_validate_peer(rth); | ||
2735 | dst_use(&rth->dst, jiffies); | 2755 | dst_use(&rth->dst, jiffies); |
2736 | RT_CACHE_STAT_INC(out_hit); | 2756 | RT_CACHE_STAT_INC(out_hit); |
2737 | rcu_read_unlock_bh(); | 2757 | rcu_read_unlock_bh(); |
@@ -2755,9 +2775,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo | |||
2755 | return NULL; | 2775 | return NULL; |
2756 | } | 2776 | } |
2757 | 2777 | ||
2758 | static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst) | 2778 | static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) |
2759 | { | 2779 | { |
2760 | return 0; | 2780 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
2781 | |||
2782 | return mtu ? : dst->dev->mtu; | ||
2761 | } | 2783 | } |
2762 | 2784 | ||
2763 | static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) | 2785 | static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) |
@@ -2775,7 +2797,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
2775 | .protocol = cpu_to_be16(ETH_P_IP), | 2797 | .protocol = cpu_to_be16(ETH_P_IP), |
2776 | .destroy = ipv4_dst_destroy, | 2798 | .destroy = ipv4_dst_destroy, |
2777 | .check = ipv4_blackhole_dst_check, | 2799 | .check = ipv4_blackhole_dst_check, |
2778 | .default_mtu = ipv4_blackhole_default_mtu, | 2800 | .mtu = ipv4_blackhole_mtu, |
2779 | .default_advmss = ipv4_default_advmss, | 2801 | .default_advmss = ipv4_default_advmss, |
2780 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, | 2802 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, |
2781 | .cow_metrics = ipv4_rt_blackhole_cow_metrics, | 2803 | .cow_metrics = ipv4_rt_blackhole_cow_metrics, |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ab0966df1e2a..5a65eeac1d29 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1164 | struct inet_sock *inet = inet_sk(sk); | 1164 | struct inet_sock *inet = inet_sk(sk); |
1165 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | 1165 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; |
1166 | struct sk_buff *skb; | 1166 | struct sk_buff *skb; |
1167 | unsigned int ulen; | 1167 | unsigned int ulen, copied; |
1168 | int peeked; | 1168 | int peeked; |
1169 | int err; | 1169 | int err; |
1170 | int is_udplite = IS_UDPLITE(sk); | 1170 | int is_udplite = IS_UDPLITE(sk); |
@@ -1186,9 +1186,10 @@ try_again: | |||
1186 | goto out; | 1186 | goto out; |
1187 | 1187 | ||
1188 | ulen = skb->len - sizeof(struct udphdr); | 1188 | ulen = skb->len - sizeof(struct udphdr); |
1189 | if (len > ulen) | 1189 | copied = len; |
1190 | len = ulen; | 1190 | if (copied > ulen) |
1191 | else if (len < ulen) | 1191 | copied = ulen; |
1192 | else if (copied < ulen) | ||
1192 | msg->msg_flags |= MSG_TRUNC; | 1193 | msg->msg_flags |= MSG_TRUNC; |
1193 | 1194 | ||
1194 | /* | 1195 | /* |
@@ -1197,14 +1198,14 @@ try_again: | |||
1197 | * coverage checksum (UDP-Lite), do it before the copy. | 1198 | * coverage checksum (UDP-Lite), do it before the copy. |
1198 | */ | 1199 | */ |
1199 | 1200 | ||
1200 | if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { | 1201 | if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { |
1201 | if (udp_lib_checksum_complete(skb)) | 1202 | if (udp_lib_checksum_complete(skb)) |
1202 | goto csum_copy_err; | 1203 | goto csum_copy_err; |
1203 | } | 1204 | } |
1204 | 1205 | ||
1205 | if (skb_csum_unnecessary(skb)) | 1206 | if (skb_csum_unnecessary(skb)) |
1206 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | 1207 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), |
1207 | msg->msg_iov, len); | 1208 | msg->msg_iov, copied); |
1208 | else { | 1209 | else { |
1209 | err = skb_copy_and_csum_datagram_iovec(skb, | 1210 | err = skb_copy_and_csum_datagram_iovec(skb, |
1210 | sizeof(struct udphdr), | 1211 | sizeof(struct udphdr), |
@@ -1233,7 +1234,7 @@ try_again: | |||
1233 | if (inet->cmsg_flags) | 1234 | if (inet->cmsg_flags) |
1234 | ip_cmsg_recv(msg, skb); | 1235 | ip_cmsg_recv(msg, skb); |
1235 | 1236 | ||
1236 | err = len; | 1237 | err = copied; |
1237 | if (flags & MSG_TRUNC) | 1238 | if (flags & MSG_TRUNC) |
1238 | err = ulen; | 1239 | err = ulen; |
1239 | 1240 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index cf88df82e2c2..36806def8cfd 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1805,7 +1805,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | |||
1805 | return ERR_PTR(-EACCES); | 1805 | return ERR_PTR(-EACCES); |
1806 | 1806 | ||
1807 | /* Add default multicast route */ | 1807 | /* Add default multicast route */ |
1808 | addrconf_add_mroute(dev); | 1808 | if (!(dev->flags & IFF_LOOPBACK)) |
1809 | addrconf_add_mroute(dev); | ||
1809 | 1810 | ||
1810 | /* Add link local route */ | 1811 | /* Add link local route */ |
1811 | addrconf_add_lroute(dev); | 1812 | addrconf_add_lroute(dev); |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index fee46d5a2f12..1567fb120392 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk, | |||
85 | * request_sock (formerly open request) hash tables. | 85 | * request_sock (formerly open request) hash tables. |
86 | */ | 86 | */ |
87 | static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, | 87 | static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, |
88 | const u32 rnd, const u16 synq_hsize) | 88 | const u32 rnd, const u32 synq_hsize) |
89 | { | 89 | { |
90 | u32 c; | 90 | u32 c; |
91 | 91 | ||
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index c99e3ee9781f..26cb08c84b74 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -503,7 +503,7 @@ done: | |||
503 | goto e_inval; | 503 | goto e_inval; |
504 | if (val > 255 || val < -1) | 504 | if (val > 255 || val < -1) |
505 | goto e_inval; | 505 | goto e_inval; |
506 | np->mcast_hops = val; | 506 | np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val); |
507 | retv = 0; | 507 | retv = 0; |
508 | break; | 508 | break; |
509 | 509 | ||
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 44e5b7f2a6c1..0cb78d7ddaf5 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1571,7 +1571,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1571 | } | 1571 | } |
1572 | if (!rt->rt6i_peer) | 1572 | if (!rt->rt6i_peer) |
1573 | rt6_bind_peer(rt, 1); | 1573 | rt6_bind_peer(rt, 1); |
1574 | if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) | 1574 | if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) |
1575 | goto release; | 1575 | goto release; |
1576 | 1576 | ||
1577 | if (dev->addr_len) { | 1577 | if (dev->addr_len) { |
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 448464844a25..f792b34cbe9c 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
@@ -186,7 +186,6 @@ config IP6_NF_MANGLE | |||
186 | 186 | ||
187 | config IP6_NF_RAW | 187 | config IP6_NF_RAW |
188 | tristate 'raw table support (required for TRACE)' | 188 | tristate 'raw table support (required for TRACE)' |
189 | depends on NETFILTER_ADVANCED | ||
190 | help | 189 | help |
191 | This option adds a `raw' table to ip6tables. This table is the very | 190 | This option adds a `raw' table to ip6tables. This table is the very |
192 | first in the netfilter framework and hooks in at the PREROUTING | 191 | first in the netfilter framework and hooks in at the PREROUTING |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8473016bba4a..b582a0a0f1c5 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -77,7 +77,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, | |||
77 | const struct in6_addr *dest); | 77 | const struct in6_addr *dest); |
78 | static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); | 78 | static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); |
79 | static unsigned int ip6_default_advmss(const struct dst_entry *dst); | 79 | static unsigned int ip6_default_advmss(const struct dst_entry *dst); |
80 | static unsigned int ip6_default_mtu(const struct dst_entry *dst); | 80 | static unsigned int ip6_mtu(const struct dst_entry *dst); |
81 | static struct dst_entry *ip6_negative_advice(struct dst_entry *); | 81 | static struct dst_entry *ip6_negative_advice(struct dst_entry *); |
82 | static void ip6_dst_destroy(struct dst_entry *); | 82 | static void ip6_dst_destroy(struct dst_entry *); |
83 | static void ip6_dst_ifdown(struct dst_entry *, | 83 | static void ip6_dst_ifdown(struct dst_entry *, |
@@ -144,7 +144,7 @@ static struct dst_ops ip6_dst_ops_template = { | |||
144 | .gc_thresh = 1024, | 144 | .gc_thresh = 1024, |
145 | .check = ip6_dst_check, | 145 | .check = ip6_dst_check, |
146 | .default_advmss = ip6_default_advmss, | 146 | .default_advmss = ip6_default_advmss, |
147 | .default_mtu = ip6_default_mtu, | 147 | .mtu = ip6_mtu, |
148 | .cow_metrics = ipv6_cow_metrics, | 148 | .cow_metrics = ipv6_cow_metrics, |
149 | .destroy = ip6_dst_destroy, | 149 | .destroy = ip6_dst_destroy, |
150 | .ifdown = ip6_dst_ifdown, | 150 | .ifdown = ip6_dst_ifdown, |
@@ -155,9 +155,11 @@ static struct dst_ops ip6_dst_ops_template = { | |||
155 | .neigh_lookup = ip6_neigh_lookup, | 155 | .neigh_lookup = ip6_neigh_lookup, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst) | 158 | static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) |
159 | { | 159 | { |
160 | return 0; | 160 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
161 | |||
162 | return mtu ? : dst->dev->mtu; | ||
161 | } | 163 | } |
162 | 164 | ||
163 | static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) | 165 | static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) |
@@ -175,7 +177,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { | |||
175 | .protocol = cpu_to_be16(ETH_P_IPV6), | 177 | .protocol = cpu_to_be16(ETH_P_IPV6), |
176 | .destroy = ip6_dst_destroy, | 178 | .destroy = ip6_dst_destroy, |
177 | .check = ip6_dst_check, | 179 | .check = ip6_dst_check, |
178 | .default_mtu = ip6_blackhole_default_mtu, | 180 | .mtu = ip6_blackhole_mtu, |
179 | .default_advmss = ip6_default_advmss, | 181 | .default_advmss = ip6_default_advmss, |
180 | .update_pmtu = ip6_rt_blackhole_update_pmtu, | 182 | .update_pmtu = ip6_rt_blackhole_update_pmtu, |
181 | .cow_metrics = ip6_rt_blackhole_cow_metrics, | 183 | .cow_metrics = ip6_rt_blackhole_cow_metrics, |
@@ -726,7 +728,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, | |||
726 | int attempts = !in_softirq(); | 728 | int attempts = !in_softirq(); |
727 | 729 | ||
728 | if (!(rt->rt6i_flags&RTF_GATEWAY)) { | 730 | if (!(rt->rt6i_flags&RTF_GATEWAY)) { |
729 | if (rt->rt6i_dst.plen != 128 && | 731 | if (ort->rt6i_dst.plen != 128 && |
730 | ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) | 732 | ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) |
731 | rt->rt6i_flags |= RTF_ANYCAST; | 733 | rt->rt6i_flags |= RTF_ANYCAST; |
732 | ipv6_addr_copy(&rt->rt6i_gateway, daddr); | 734 | ipv6_addr_copy(&rt->rt6i_gateway, daddr); |
@@ -1041,10 +1043,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst) | |||
1041 | return mtu; | 1043 | return mtu; |
1042 | } | 1044 | } |
1043 | 1045 | ||
1044 | static unsigned int ip6_default_mtu(const struct dst_entry *dst) | 1046 | static unsigned int ip6_mtu(const struct dst_entry *dst) |
1045 | { | 1047 | { |
1046 | unsigned int mtu = IPV6_MIN_MTU; | ||
1047 | struct inet6_dev *idev; | 1048 | struct inet6_dev *idev; |
1049 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); | ||
1050 | |||
1051 | if (mtu) | ||
1052 | return mtu; | ||
1053 | |||
1054 | mtu = IPV6_MIN_MTU; | ||
1048 | 1055 | ||
1049 | rcu_read_lock(); | 1056 | rcu_read_lock(); |
1050 | idev = __in6_dev_get(dst->dev); | 1057 | idev = __in6_dev_get(dst->dev); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a7a18602a046..96f3623618e3 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, | |||
263 | if (register_netdevice(dev) < 0) | 263 | if (register_netdevice(dev) < 0) |
264 | goto failed_free; | 264 | goto failed_free; |
265 | 265 | ||
266 | strcpy(nt->parms.name, dev->name); | ||
267 | |||
266 | dev_hold(dev); | 268 | dev_hold(dev); |
267 | 269 | ||
268 | ipip6_tunnel_link(sitn, nt); | 270 | ipip6_tunnel_link(sitn, nt); |
@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev) | |||
1144 | struct ip_tunnel *tunnel = netdev_priv(dev); | 1146 | struct ip_tunnel *tunnel = netdev_priv(dev); |
1145 | 1147 | ||
1146 | tunnel->dev = dev; | 1148 | tunnel->dev = dev; |
1147 | strcpy(tunnel->parms.name, dev->name); | ||
1148 | 1149 | ||
1149 | memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); | 1150 | memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); |
1150 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); | 1151 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); |
@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea | |||
1207 | static int __net_init sit_init_net(struct net *net) | 1208 | static int __net_init sit_init_net(struct net *net) |
1208 | { | 1209 | { |
1209 | struct sit_net *sitn = net_generic(net, sit_net_id); | 1210 | struct sit_net *sitn = net_generic(net, sit_net_id); |
1211 | struct ip_tunnel *t; | ||
1210 | int err; | 1212 | int err; |
1211 | 1213 | ||
1212 | sitn->tunnels[0] = sitn->tunnels_wc; | 1214 | sitn->tunnels[0] = sitn->tunnels_wc; |
@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net) | |||
1231 | if ((err = register_netdev(sitn->fb_tunnel_dev))) | 1233 | if ((err = register_netdev(sitn->fb_tunnel_dev))) |
1232 | goto err_reg_dev; | 1234 | goto err_reg_dev; |
1233 | 1235 | ||
1236 | t = netdev_priv(sitn->fb_tunnel_dev); | ||
1237 | |||
1238 | strcpy(t->parms.name, sitn->fb_tunnel_dev->name); | ||
1234 | return 0; | 1239 | return 0; |
1235 | 1240 | ||
1236 | err_reg_dev: | 1241 | err_reg_dev: |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 36131d122a6f..2dea4bb7b54a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1255,6 +1255,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1255 | if (!want_cookie || tmp_opt.tstamp_ok) | 1255 | if (!want_cookie || tmp_opt.tstamp_ok) |
1256 | TCP_ECN_create_request(req, tcp_hdr(skb)); | 1256 | TCP_ECN_create_request(req, tcp_hdr(skb)); |
1257 | 1257 | ||
1258 | treq->iif = sk->sk_bound_dev_if; | ||
1259 | |||
1260 | /* So that link locals have meaning */ | ||
1261 | if (!sk->sk_bound_dev_if && | ||
1262 | ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) | ||
1263 | treq->iif = inet6_iif(skb); | ||
1264 | |||
1258 | if (!isn) { | 1265 | if (!isn) { |
1259 | struct inet_peer *peer = NULL; | 1266 | struct inet_peer *peer = NULL; |
1260 | 1267 | ||
@@ -1264,12 +1271,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1264 | atomic_inc(&skb->users); | 1271 | atomic_inc(&skb->users); |
1265 | treq->pktopts = skb; | 1272 | treq->pktopts = skb; |
1266 | } | 1273 | } |
1267 | treq->iif = sk->sk_bound_dev_if; | ||
1268 | |||
1269 | /* So that link locals have meaning */ | ||
1270 | if (!sk->sk_bound_dev_if && | ||
1271 | ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) | ||
1272 | treq->iif = inet6_iif(skb); | ||
1273 | 1274 | ||
1274 | if (want_cookie) { | 1275 | if (want_cookie) { |
1275 | isn = cookie_v6_init_sequence(sk, skb, &req->mss); | 1276 | isn = cookie_v6_init_sequence(sk, skb, &req->mss); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 846f4757eb8d..8c2541915183 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
340 | struct ipv6_pinfo *np = inet6_sk(sk); | 340 | struct ipv6_pinfo *np = inet6_sk(sk); |
341 | struct inet_sock *inet = inet_sk(sk); | 341 | struct inet_sock *inet = inet_sk(sk); |
342 | struct sk_buff *skb; | 342 | struct sk_buff *skb; |
343 | unsigned int ulen; | 343 | unsigned int ulen, copied; |
344 | int peeked; | 344 | int peeked; |
345 | int err; | 345 | int err; |
346 | int is_udplite = IS_UDPLITE(sk); | 346 | int is_udplite = IS_UDPLITE(sk); |
@@ -363,9 +363,10 @@ try_again: | |||
363 | goto out; | 363 | goto out; |
364 | 364 | ||
365 | ulen = skb->len - sizeof(struct udphdr); | 365 | ulen = skb->len - sizeof(struct udphdr); |
366 | if (len > ulen) | 366 | copied = len; |
367 | len = ulen; | 367 | if (copied > ulen) |
368 | else if (len < ulen) | 368 | copied = ulen; |
369 | else if (copied < ulen) | ||
369 | msg->msg_flags |= MSG_TRUNC; | 370 | msg->msg_flags |= MSG_TRUNC; |
370 | 371 | ||
371 | is_udp4 = (skb->protocol == htons(ETH_P_IP)); | 372 | is_udp4 = (skb->protocol == htons(ETH_P_IP)); |
@@ -376,14 +377,14 @@ try_again: | |||
376 | * coverage checksum (UDP-Lite), do it before the copy. | 377 | * coverage checksum (UDP-Lite), do it before the copy. |
377 | */ | 378 | */ |
378 | 379 | ||
379 | if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { | 380 | if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { |
380 | if (udp_lib_checksum_complete(skb)) | 381 | if (udp_lib_checksum_complete(skb)) |
381 | goto csum_copy_err; | 382 | goto csum_copy_err; |
382 | } | 383 | } |
383 | 384 | ||
384 | if (skb_csum_unnecessary(skb)) | 385 | if (skb_csum_unnecessary(skb)) |
385 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | 386 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), |
386 | msg->msg_iov,len); | 387 | msg->msg_iov, copied ); |
387 | else { | 388 | else { |
388 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); | 389 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); |
389 | if (err == -EINVAL) | 390 | if (err == -EINVAL) |
@@ -432,7 +433,7 @@ try_again: | |||
432 | datagram_recv_ctl(sk, msg, skb); | 433 | datagram_recv_ctl(sk, msg, skb); |
433 | } | 434 | } |
434 | 435 | ||
435 | err = len; | 436 | err = copied; |
436 | if (flags & MSG_TRUNC) | 437 | if (flags & MSG_TRUNC) |
437 | err = ulen; | 438 | err = ulen; |
438 | 439 | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index cf0f308abf5e..89ff8c67943e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len | |||
1072 | 1072 | ||
1073 | /* Get routing info from the tunnel socket */ | 1073 | /* Get routing info from the tunnel socket */ |
1074 | skb_dst_drop(skb); | 1074 | skb_dst_drop(skb); |
1075 | skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); | 1075 | skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); |
1076 | 1076 | ||
1077 | inet = inet_sk(sk); | 1077 | inet = inet_sk(sk); |
1078 | fl = &inet->cork.fl; | 1078 | fl = &inet->cork.fl; |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index b3f65520e7a7..2e4b961648d4 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -161,6 +161,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
161 | return -ENOENT; | 161 | return -ENOENT; |
162 | } | 162 | } |
163 | 163 | ||
164 | /* if we're already stopping ignore any new requests to stop */ | ||
165 | if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | ||
166 | spin_unlock_bh(&sta->lock); | ||
167 | return -EALREADY; | ||
168 | } | ||
169 | |||
164 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { | 170 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { |
165 | /* not even started yet! */ | 171 | /* not even started yet! */ |
166 | ieee80211_assign_tid_tx(sta, tid, NULL); | 172 | ieee80211_assign_tid_tx(sta, tid, NULL); |
@@ -169,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
169 | return 0; | 175 | return 0; |
170 | } | 176 | } |
171 | 177 | ||
178 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); | ||
179 | |||
172 | spin_unlock_bh(&sta->lock); | 180 | spin_unlock_bh(&sta->lock); |
173 | 181 | ||
174 | #ifdef CONFIG_MAC80211_HT_DEBUG | 182 | #ifdef CONFIG_MAC80211_HT_DEBUG |
@@ -176,8 +184,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
176 | sta->sta.addr, tid); | 184 | sta->sta.addr, tid); |
177 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 185 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
178 | 186 | ||
179 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); | ||
180 | |||
181 | del_timer_sync(&tid_tx->addba_resp_timer); | 187 | del_timer_sync(&tid_tx->addba_resp_timer); |
182 | 188 | ||
183 | /* | 189 | /* |
@@ -187,6 +193,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
187 | */ | 193 | */ |
188 | clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); | 194 | clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); |
189 | 195 | ||
196 | /* | ||
197 | * There might be a few packets being processed right now (on | ||
198 | * another CPU) that have already gotten past the aggregation | ||
199 | * check when it was still OPERATIONAL and consequently have | ||
200 | * IEEE80211_TX_CTL_AMPDU set. In that case, this code might | ||
201 | * call into the driver at the same time or even before the | ||
202 | * TX paths calls into it, which could confuse the driver. | ||
203 | * | ||
204 | * Wait for all currently running TX paths to finish before | ||
205 | * telling the driver. New packets will not go through since | ||
206 | * the aggregation session is no longer OPERATIONAL. | ||
207 | */ | ||
208 | synchronize_net(); | ||
209 | |||
190 | tid_tx->stop_initiator = initiator; | 210 | tid_tx->stop_initiator = initiator; |
191 | tid_tx->tx_stop = tx; | 211 | tid_tx->tx_stop = tx; |
192 | 212 | ||
@@ -283,6 +303,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) | |||
283 | __release(agg_queue); | 303 | __release(agg_queue); |
284 | } | 304 | } |
285 | 305 | ||
306 | /* | ||
307 | * splice packets from the STA's pending to the local pending, | ||
308 | * requires a call to ieee80211_agg_splice_finish later | ||
309 | */ | ||
310 | static void __acquires(agg_queue) | ||
311 | ieee80211_agg_splice_packets(struct ieee80211_local *local, | ||
312 | struct tid_ampdu_tx *tid_tx, u16 tid) | ||
313 | { | ||
314 | int queue = ieee80211_ac_from_tid(tid); | ||
315 | unsigned long flags; | ||
316 | |||
317 | ieee80211_stop_queue_agg(local, tid); | ||
318 | |||
319 | if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" | ||
320 | " from the pending queue\n", tid)) | ||
321 | return; | ||
322 | |||
323 | if (!skb_queue_empty(&tid_tx->pending)) { | ||
324 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
325 | /* copy over remaining packets */ | ||
326 | skb_queue_splice_tail_init(&tid_tx->pending, | ||
327 | &local->pending[queue]); | ||
328 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
329 | } | ||
330 | } | ||
331 | |||
332 | static void __releases(agg_queue) | ||
333 | ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) | ||
334 | { | ||
335 | ieee80211_wake_queue_agg(local, tid); | ||
336 | } | ||
337 | |||
286 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | 338 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) |
287 | { | 339 | { |
288 | struct tid_ampdu_tx *tid_tx; | 340 | struct tid_ampdu_tx *tid_tx; |
@@ -294,19 +346,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
294 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | 346 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); |
295 | 347 | ||
296 | /* | 348 | /* |
297 | * While we're asking the driver about the aggregation, | 349 | * Start queuing up packets for this aggregation session. |
298 | * stop the AC queue so that we don't have to worry | 350 | * We're going to release them once the driver is OK with |
299 | * about frames that came in while we were doing that, | 351 | * that. |
300 | * which would require us to put them to the AC pending | ||
301 | * afterwards which just makes the code more complex. | ||
302 | */ | 352 | */ |
303 | ieee80211_stop_queue_agg(local, tid); | ||
304 | |||
305 | clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); | 353 | clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); |
306 | 354 | ||
307 | /* | 355 | /* |
308 | * make sure no packets are being processed to get | 356 | * Make sure no packets are being processed. This ensures that |
309 | * valid starting sequence number | 357 | * we have a valid starting sequence number and that in-flight |
358 | * packets have been flushed out and no packets for this TID | ||
359 | * will go into the driver during the ampdu_action call. | ||
310 | */ | 360 | */ |
311 | synchronize_net(); | 361 | synchronize_net(); |
312 | 362 | ||
@@ -320,17 +370,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
320 | " tid %d\n", tid); | 370 | " tid %d\n", tid); |
321 | #endif | 371 | #endif |
322 | spin_lock_bh(&sta->lock); | 372 | spin_lock_bh(&sta->lock); |
373 | ieee80211_agg_splice_packets(local, tid_tx, tid); | ||
323 | ieee80211_assign_tid_tx(sta, tid, NULL); | 374 | ieee80211_assign_tid_tx(sta, tid, NULL); |
375 | ieee80211_agg_splice_finish(local, tid); | ||
324 | spin_unlock_bh(&sta->lock); | 376 | spin_unlock_bh(&sta->lock); |
325 | 377 | ||
326 | ieee80211_wake_queue_agg(local, tid); | ||
327 | kfree_rcu(tid_tx, rcu_head); | 378 | kfree_rcu(tid_tx, rcu_head); |
328 | return; | 379 | return; |
329 | } | 380 | } |
330 | 381 | ||
331 | /* we can take packets again now */ | ||
332 | ieee80211_wake_queue_agg(local, tid); | ||
333 | |||
334 | /* activate the timer for the recipient's addBA response */ | 382 | /* activate the timer for the recipient's addBA response */ |
335 | mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); | 383 | mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); |
336 | #ifdef CONFIG_MAC80211_HT_DEBUG | 384 | #ifdef CONFIG_MAC80211_HT_DEBUG |
@@ -446,38 +494,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
446 | } | 494 | } |
447 | EXPORT_SYMBOL(ieee80211_start_tx_ba_session); | 495 | EXPORT_SYMBOL(ieee80211_start_tx_ba_session); |
448 | 496 | ||
449 | /* | ||
450 | * splice packets from the STA's pending to the local pending, | ||
451 | * requires a call to ieee80211_agg_splice_finish later | ||
452 | */ | ||
453 | static void __acquires(agg_queue) | ||
454 | ieee80211_agg_splice_packets(struct ieee80211_local *local, | ||
455 | struct tid_ampdu_tx *tid_tx, u16 tid) | ||
456 | { | ||
457 | int queue = ieee80211_ac_from_tid(tid); | ||
458 | unsigned long flags; | ||
459 | |||
460 | ieee80211_stop_queue_agg(local, tid); | ||
461 | |||
462 | if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" | ||
463 | " from the pending queue\n", tid)) | ||
464 | return; | ||
465 | |||
466 | if (!skb_queue_empty(&tid_tx->pending)) { | ||
467 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
468 | /* copy over remaining packets */ | ||
469 | skb_queue_splice_tail_init(&tid_tx->pending, | ||
470 | &local->pending[queue]); | ||
471 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
472 | } | ||
473 | } | ||
474 | |||
475 | static void __releases(agg_queue) | ||
476 | ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) | ||
477 | { | ||
478 | ieee80211_wake_queue_agg(local, tid); | ||
479 | } | ||
480 | |||
481 | static void ieee80211_agg_tx_operational(struct ieee80211_local *local, | 497 | static void ieee80211_agg_tx_operational(struct ieee80211_local *local, |
482 | struct sta_info *sta, u16 tid) | 498 | struct sta_info *sta, u16 tid) |
483 | { | 499 | { |
@@ -757,11 +773,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
757 | goto out; | 773 | goto out; |
758 | } | 774 | } |
759 | 775 | ||
760 | del_timer(&tid_tx->addba_resp_timer); | 776 | del_timer_sync(&tid_tx->addba_resp_timer); |
761 | 777 | ||
762 | #ifdef CONFIG_MAC80211_HT_DEBUG | 778 | #ifdef CONFIG_MAC80211_HT_DEBUG |
763 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); | 779 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); |
764 | #endif | 780 | #endif |
781 | |||
782 | /* | ||
783 | * addba_resp_timer may have fired before we got here, and | ||
784 | * caused WANT_STOP to be set. If the stop then was already | ||
785 | * processed further, STOPPING might be set. | ||
786 | */ | ||
787 | if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || | ||
788 | test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | ||
789 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
790 | printk(KERN_DEBUG | ||
791 | "got addBA resp for tid %d but we already gave up\n", | ||
792 | tid); | ||
793 | #endif | ||
794 | goto out; | ||
795 | } | ||
796 | |||
765 | /* | 797 | /* |
766 | * IEEE 802.11-2007 7.3.1.14: | 798 | * IEEE 802.11-2007 7.3.1.14: |
767 | * In an ADDBA Response frame, when the Status Code field | 799 | * In an ADDBA Response frame, when the Status Code field |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index c5f341798c16..3110cbdc501b 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, | |||
274 | 274 | ||
275 | PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); | 275 | PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); |
276 | 276 | ||
277 | PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " | ||
278 | "3839 bytes"); | ||
279 | PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " | 277 | PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " |
278 | "3839 bytes"); | ||
279 | PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " | ||
280 | "7935 bytes"); | 280 | "7935 bytes"); |
281 | 281 | ||
282 | /* | 282 | /* |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index d999bf3b84e1..cae443563ec9 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -757,6 +757,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
757 | if (!local->int_scan_req) | 757 | if (!local->int_scan_req) |
758 | return -ENOMEM; | 758 | return -ENOMEM; |
759 | 759 | ||
760 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
761 | if (!local->hw.wiphy->bands[band]) | ||
762 | continue; | ||
763 | local->int_scan_req->rates[band] = (u32) -1; | ||
764 | } | ||
765 | |||
760 | /* if low-level driver supports AP, we also support VLAN */ | 766 | /* if low-level driver supports AP, we also support VLAN */ |
761 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { | 767 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { |
762 | hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); | 768 | hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 80de436eae20..16518f386117 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band | |||
260 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 260 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
261 | struct ieee80211_radiotap_header *rthdr; | 261 | struct ieee80211_radiotap_header *rthdr; |
262 | unsigned char *pos; | 262 | unsigned char *pos; |
263 | __le16 txflags; | 263 | u16 txflags; |
264 | 264 | ||
265 | rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len); | 265 | rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len); |
266 | 266 | ||
@@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band | |||
290 | txflags = 0; | 290 | txflags = 0; |
291 | if (!(info->flags & IEEE80211_TX_STAT_ACK) && | 291 | if (!(info->flags & IEEE80211_TX_STAT_ACK) && |
292 | !is_multicast_ether_addr(hdr->addr1)) | 292 | !is_multicast_ether_addr(hdr->addr1)) |
293 | txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); | 293 | txflags |= IEEE80211_RADIOTAP_F_TX_FAIL; |
294 | 294 | ||
295 | if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || | 295 | if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || |
296 | (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) | 296 | (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) |
297 | txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); | 297 | txflags |= IEEE80211_RADIOTAP_F_TX_CTS; |
298 | else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) | 298 | else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) |
299 | txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); | 299 | txflags |= IEEE80211_RADIOTAP_F_TX_RTS; |
300 | 300 | ||
301 | put_unaligned_le16(txflags, pos); | 301 | put_unaligned_le16(txflags, pos); |
302 | pos += 2; | 302 | pos += 2; |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index eca0fad09709..d5230ecc784d 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1039,7 +1039,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1039 | struct ieee80211_sub_if_data, | 1039 | struct ieee80211_sub_if_data, |
1040 | u.ap); | 1040 | u.ap); |
1041 | 1041 | ||
1042 | memset(&sta->sta.drv_priv, 0, hw->sta_data_size); | ||
1043 | WARN_ON(drv_sta_add(local, sdata, &sta->sta)); | 1042 | WARN_ON(drv_sta_add(local, sdata, &sta->sta)); |
1044 | } | 1043 | } |
1045 | } | 1044 | } |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 8260b13d93c9..d5597b759ba3 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST | |||
201 | 201 | ||
202 | config NF_CONNTRACK_NETBIOS_NS | 202 | config NF_CONNTRACK_NETBIOS_NS |
203 | tristate "NetBIOS name service protocol support" | 203 | tristate "NetBIOS name service protocol support" |
204 | depends on NETFILTER_ADVANCED | ||
205 | select NF_CONNTRACK_BROADCAST | 204 | select NF_CONNTRACK_BROADCAST |
206 | help | 205 | help |
207 | NetBIOS name service requests are sent as broadcast messages from an | 206 | NetBIOS name service requests are sent as broadcast messages from an |
@@ -542,7 +541,6 @@ config NETFILTER_XT_TARGET_NOTRACK | |||
542 | tristate '"NOTRACK" target support' | 541 | tristate '"NOTRACK" target support' |
543 | depends on IP_NF_RAW || IP6_NF_RAW | 542 | depends on IP_NF_RAW || IP6_NF_RAW |
544 | depends on NF_CONNTRACK | 543 | depends on NF_CONNTRACK |
545 | depends on NETFILTER_ADVANCED | ||
546 | help | 544 | help |
547 | The NOTRACK target allows a select rule to specify | 545 | The NOTRACK target allows a select rule to specify |
548 | which packets *not* to enter the conntrack/NAT | 546 | which packets *not* to enter the conntrack/NAT |
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 6ee10f5d59bd..37d667e3f6f8 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c | |||
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
158 | const struct ip_set_hash *h = set->data; | 158 | const struct ip_set_hash *h = set->data; |
159 | ipset_adtfn adtfn = set->variant->adt[adt]; | 159 | ipset_adtfn adtfn = set->variant->adt[adt]; |
160 | struct hash_ipport4_elem data = { }; | 160 | struct hash_ipport4_elem data = { }; |
161 | u32 ip, ip_to, p = 0, port, port_to; | 161 | u32 ip, ip_to = 0, p = 0, port, port_to; |
162 | u32 timeout = h->timeout; | 162 | u32 timeout = h->timeout; |
163 | bool with_ports = false; | 163 | bool with_ports = false; |
164 | int ret; | 164 | int ret; |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index fb90e344e907..e69e2718fbe1 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c | |||
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
162 | const struct ip_set_hash *h = set->data; | 162 | const struct ip_set_hash *h = set->data; |
163 | ipset_adtfn adtfn = set->variant->adt[adt]; | 163 | ipset_adtfn adtfn = set->variant->adt[adt]; |
164 | struct hash_ipportip4_elem data = { }; | 164 | struct hash_ipportip4_elem data = { }; |
165 | u32 ip, ip_to, p = 0, port, port_to; | 165 | u32 ip, ip_to = 0, p = 0, port, port_to; |
166 | u32 timeout = h->timeout; | 166 | u32 timeout = h->timeout; |
167 | bool with_ports = false; | 167 | bool with_ports = false; |
168 | int ret; | 168 | int ret; |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index deb3e3dfa5fc..64199b4e93c9 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c | |||
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
184 | const struct ip_set_hash *h = set->data; | 184 | const struct ip_set_hash *h = set->data; |
185 | ipset_adtfn adtfn = set->variant->adt[adt]; | 185 | ipset_adtfn adtfn = set->variant->adt[adt]; |
186 | struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; | 186 | struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; |
187 | u32 ip, ip_to, p = 0, port, port_to; | 187 | u32 ip, ip_to = 0, p = 0, port, port_to; |
188 | u32 ip2_from = 0, ip2_to, ip2_last, ip2; | 188 | u32 ip2_from = 0, ip2_to, ip2_last, ip2; |
189 | u32 timeout = h->timeout; | 189 | u32 timeout = h->timeout; |
190 | bool with_ports = false; | 190 | bool with_ports = false; |
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 6b368be937c6..b62c4148b921 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c | |||
@@ -27,22 +27,17 @@ | |||
27 | 27 | ||
28 | static DEFINE_MUTEX(nf_ct_ecache_mutex); | 28 | static DEFINE_MUTEX(nf_ct_ecache_mutex); |
29 | 29 | ||
30 | struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly; | ||
31 | EXPORT_SYMBOL_GPL(nf_conntrack_event_cb); | ||
32 | |||
33 | struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly; | ||
34 | EXPORT_SYMBOL_GPL(nf_expect_event_cb); | ||
35 | |||
36 | /* deliver cached events and clear cache entry - must be called with locally | 30 | /* deliver cached events and clear cache entry - must be called with locally |
37 | * disabled softirqs */ | 31 | * disabled softirqs */ |
38 | void nf_ct_deliver_cached_events(struct nf_conn *ct) | 32 | void nf_ct_deliver_cached_events(struct nf_conn *ct) |
39 | { | 33 | { |
34 | struct net *net = nf_ct_net(ct); | ||
40 | unsigned long events; | 35 | unsigned long events; |
41 | struct nf_ct_event_notifier *notify; | 36 | struct nf_ct_event_notifier *notify; |
42 | struct nf_conntrack_ecache *e; | 37 | struct nf_conntrack_ecache *e; |
43 | 38 | ||
44 | rcu_read_lock(); | 39 | rcu_read_lock(); |
45 | notify = rcu_dereference(nf_conntrack_event_cb); | 40 | notify = rcu_dereference(net->ct.nf_conntrack_event_cb); |
46 | if (notify == NULL) | 41 | if (notify == NULL) |
47 | goto out_unlock; | 42 | goto out_unlock; |
48 | 43 | ||
@@ -83,19 +78,20 @@ out_unlock: | |||
83 | } | 78 | } |
84 | EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); | 79 | EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); |
85 | 80 | ||
86 | int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new) | 81 | int nf_conntrack_register_notifier(struct net *net, |
82 | struct nf_ct_event_notifier *new) | ||
87 | { | 83 | { |
88 | int ret = 0; | 84 | int ret = 0; |
89 | struct nf_ct_event_notifier *notify; | 85 | struct nf_ct_event_notifier *notify; |
90 | 86 | ||
91 | mutex_lock(&nf_ct_ecache_mutex); | 87 | mutex_lock(&nf_ct_ecache_mutex); |
92 | notify = rcu_dereference_protected(nf_conntrack_event_cb, | 88 | notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, |
93 | lockdep_is_held(&nf_ct_ecache_mutex)); | 89 | lockdep_is_held(&nf_ct_ecache_mutex)); |
94 | if (notify != NULL) { | 90 | if (notify != NULL) { |
95 | ret = -EBUSY; | 91 | ret = -EBUSY; |
96 | goto out_unlock; | 92 | goto out_unlock; |
97 | } | 93 | } |
98 | RCU_INIT_POINTER(nf_conntrack_event_cb, new); | 94 | RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new); |
99 | mutex_unlock(&nf_ct_ecache_mutex); | 95 | mutex_unlock(&nf_ct_ecache_mutex); |
100 | return ret; | 96 | return ret; |
101 | 97 | ||
@@ -105,32 +101,34 @@ out_unlock: | |||
105 | } | 101 | } |
106 | EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); | 102 | EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); |
107 | 103 | ||
108 | void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new) | 104 | void nf_conntrack_unregister_notifier(struct net *net, |
105 | struct nf_ct_event_notifier *new) | ||
109 | { | 106 | { |
110 | struct nf_ct_event_notifier *notify; | 107 | struct nf_ct_event_notifier *notify; |
111 | 108 | ||
112 | mutex_lock(&nf_ct_ecache_mutex); | 109 | mutex_lock(&nf_ct_ecache_mutex); |
113 | notify = rcu_dereference_protected(nf_conntrack_event_cb, | 110 | notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, |
114 | lockdep_is_held(&nf_ct_ecache_mutex)); | 111 | lockdep_is_held(&nf_ct_ecache_mutex)); |
115 | BUG_ON(notify != new); | 112 | BUG_ON(notify != new); |
116 | RCU_INIT_POINTER(nf_conntrack_event_cb, NULL); | 113 | RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); |
117 | mutex_unlock(&nf_ct_ecache_mutex); | 114 | mutex_unlock(&nf_ct_ecache_mutex); |
118 | } | 115 | } |
119 | EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); | 116 | EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); |
120 | 117 | ||
121 | int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new) | 118 | int nf_ct_expect_register_notifier(struct net *net, |
119 | struct nf_exp_event_notifier *new) | ||
122 | { | 120 | { |
123 | int ret = 0; | 121 | int ret = 0; |
124 | struct nf_exp_event_notifier *notify; | 122 | struct nf_exp_event_notifier *notify; |
125 | 123 | ||
126 | mutex_lock(&nf_ct_ecache_mutex); | 124 | mutex_lock(&nf_ct_ecache_mutex); |
127 | notify = rcu_dereference_protected(nf_expect_event_cb, | 125 | notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, |
128 | lockdep_is_held(&nf_ct_ecache_mutex)); | 126 | lockdep_is_held(&nf_ct_ecache_mutex)); |
129 | if (notify != NULL) { | 127 | if (notify != NULL) { |
130 | ret = -EBUSY; | 128 | ret = -EBUSY; |
131 | goto out_unlock; | 129 | goto out_unlock; |
132 | } | 130 | } |
133 | RCU_INIT_POINTER(nf_expect_event_cb, new); | 131 | RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new); |
134 | mutex_unlock(&nf_ct_ecache_mutex); | 132 | mutex_unlock(&nf_ct_ecache_mutex); |
135 | return ret; | 133 | return ret; |
136 | 134 | ||
@@ -140,15 +138,16 @@ out_unlock: | |||
140 | } | 138 | } |
141 | EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier); | 139 | EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier); |
142 | 140 | ||
143 | void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new) | 141 | void nf_ct_expect_unregister_notifier(struct net *net, |
142 | struct nf_exp_event_notifier *new) | ||
144 | { | 143 | { |
145 | struct nf_exp_event_notifier *notify; | 144 | struct nf_exp_event_notifier *notify; |
146 | 145 | ||
147 | mutex_lock(&nf_ct_ecache_mutex); | 146 | mutex_lock(&nf_ct_ecache_mutex); |
148 | notify = rcu_dereference_protected(nf_expect_event_cb, | 147 | notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, |
149 | lockdep_is_held(&nf_ct_ecache_mutex)); | 148 | lockdep_is_held(&nf_ct_ecache_mutex)); |
150 | BUG_ON(notify != new); | 149 | BUG_ON(notify != new); |
151 | RCU_INIT_POINTER(nf_expect_event_cb, NULL); | 150 | RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); |
152 | mutex_unlock(&nf_ct_ecache_mutex); | 151 | mutex_unlock(&nf_ct_ecache_mutex); |
153 | } | 152 | } |
154 | EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); | 153 | EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index e58aa9b1fe8a..ef21b221f036 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * (C) 2001 by Jay Schulist <jschlst@samba.org> | 4 | * (C) 2001 by Jay Schulist <jschlst@samba.org> |
5 | * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> | 5 | * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> |
6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> | 6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> |
7 | * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org> | 7 | * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org> |
8 | * | 8 | * |
9 | * Initial connection tracking via netlink development funded and | 9 | * Initial connection tracking via netlink development funded and |
10 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) | 10 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) |
@@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink"); | |||
2163 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); | 2163 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); |
2164 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); | 2164 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); |
2165 | 2165 | ||
2166 | static int __net_init ctnetlink_net_init(struct net *net) | ||
2167 | { | ||
2168 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
2169 | int ret; | ||
2170 | |||
2171 | ret = nf_conntrack_register_notifier(net, &ctnl_notifier); | ||
2172 | if (ret < 0) { | ||
2173 | pr_err("ctnetlink_init: cannot register notifier.\n"); | ||
2174 | goto err_out; | ||
2175 | } | ||
2176 | |||
2177 | ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp); | ||
2178 | if (ret < 0) { | ||
2179 | pr_err("ctnetlink_init: cannot expect register notifier.\n"); | ||
2180 | goto err_unreg_notifier; | ||
2181 | } | ||
2182 | #endif | ||
2183 | return 0; | ||
2184 | |||
2185 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
2186 | err_unreg_notifier: | ||
2187 | nf_conntrack_unregister_notifier(net, &ctnl_notifier); | ||
2188 | err_out: | ||
2189 | return ret; | ||
2190 | #endif | ||
2191 | } | ||
2192 | |||
2193 | static void ctnetlink_net_exit(struct net *net) | ||
2194 | { | ||
2195 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
2196 | nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp); | ||
2197 | nf_conntrack_unregister_notifier(net, &ctnl_notifier); | ||
2198 | #endif | ||
2199 | } | ||
2200 | |||
2201 | static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list) | ||
2202 | { | ||
2203 | struct net *net; | ||
2204 | |||
2205 | list_for_each_entry(net, net_exit_list, exit_list) | ||
2206 | ctnetlink_net_exit(net); | ||
2207 | } | ||
2208 | |||
2209 | static struct pernet_operations ctnetlink_net_ops = { | ||
2210 | .init = ctnetlink_net_init, | ||
2211 | .exit_batch = ctnetlink_net_exit_batch, | ||
2212 | }; | ||
2213 | |||
2166 | static int __init ctnetlink_init(void) | 2214 | static int __init ctnetlink_init(void) |
2167 | { | 2215 | { |
2168 | int ret; | 2216 | int ret; |
@@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void) | |||
2180 | goto err_unreg_subsys; | 2228 | goto err_unreg_subsys; |
2181 | } | 2229 | } |
2182 | 2230 | ||
2183 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2231 | if (register_pernet_subsys(&ctnetlink_net_ops)) { |
2184 | ret = nf_conntrack_register_notifier(&ctnl_notifier); | 2232 | pr_err("ctnetlink_init: cannot register pernet operations\n"); |
2185 | if (ret < 0) { | ||
2186 | pr_err("ctnetlink_init: cannot register notifier.\n"); | ||
2187 | goto err_unreg_exp_subsys; | 2233 | goto err_unreg_exp_subsys; |
2188 | } | 2234 | } |
2189 | 2235 | ||
2190 | ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp); | ||
2191 | if (ret < 0) { | ||
2192 | pr_err("ctnetlink_init: cannot expect register notifier.\n"); | ||
2193 | goto err_unreg_notifier; | ||
2194 | } | ||
2195 | #endif | ||
2196 | |||
2197 | return 0; | 2236 | return 0; |
2198 | 2237 | ||
2199 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
2200 | err_unreg_notifier: | ||
2201 | nf_conntrack_unregister_notifier(&ctnl_notifier); | ||
2202 | err_unreg_exp_subsys: | 2238 | err_unreg_exp_subsys: |
2203 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); | 2239 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); |
2204 | #endif | ||
2205 | err_unreg_subsys: | 2240 | err_unreg_subsys: |
2206 | nfnetlink_subsys_unregister(&ctnl_subsys); | 2241 | nfnetlink_subsys_unregister(&ctnl_subsys); |
2207 | err_out: | 2242 | err_out: |
@@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void) | |||
2213 | pr_info("ctnetlink: unregistering from nfnetlink.\n"); | 2248 | pr_info("ctnetlink: unregistering from nfnetlink.\n"); |
2214 | 2249 | ||
2215 | nf_ct_remove_userspace_expectations(); | 2250 | nf_ct_remove_userspace_expectations(); |
2216 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 2251 | unregister_pernet_subsys(&ctnetlink_net_ops); |
2217 | nf_ct_expect_unregister_notifier(&ctnl_notifier_exp); | ||
2218 | nf_conntrack_unregister_notifier(&ctnl_notifier); | ||
2219 | #endif | ||
2220 | |||
2221 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); | 2252 | nfnetlink_subsys_unregister(&ctnl_exp_subsys); |
2222 | nfnetlink_subsys_unregister(&ctnl_subsys); | 2253 | nfnetlink_subsys_unregister(&ctnl_subsys); |
2223 | } | 2254 | } |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 9c24de10a657..824f184f7a9b 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
111 | struct netlbl_domaddr_map *addrmap = NULL; | 111 | struct netlbl_domaddr_map *addrmap = NULL; |
112 | struct netlbl_domaddr4_map *map4 = NULL; | 112 | struct netlbl_domaddr4_map *map4 = NULL; |
113 | struct netlbl_domaddr6_map *map6 = NULL; | 113 | struct netlbl_domaddr6_map *map6 = NULL; |
114 | const struct in_addr *addr4, *mask4; | ||
115 | const struct in6_addr *addr6, *mask6; | ||
116 | 114 | ||
117 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 115 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
118 | if (entry == NULL) | 116 | if (entry == NULL) |
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
133 | INIT_LIST_HEAD(&addrmap->list6); | 131 | INIT_LIST_HEAD(&addrmap->list6); |
134 | 132 | ||
135 | switch (family) { | 133 | switch (family) { |
136 | case AF_INET: | 134 | case AF_INET: { |
137 | addr4 = addr; | 135 | const struct in_addr *addr4 = addr; |
138 | mask4 = mask; | 136 | const struct in_addr *mask4 = mask; |
139 | map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); | 137 | map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); |
140 | if (map4 == NULL) | 138 | if (map4 == NULL) |
141 | goto cfg_unlbl_map_add_failure; | 139 | goto cfg_unlbl_map_add_failure; |
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
148 | if (ret_val != 0) | 146 | if (ret_val != 0) |
149 | goto cfg_unlbl_map_add_failure; | 147 | goto cfg_unlbl_map_add_failure; |
150 | break; | 148 | break; |
151 | case AF_INET6: | 149 | } |
152 | addr6 = addr; | 150 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
153 | mask6 = mask; | 151 | case AF_INET6: { |
152 | const struct in6_addr *addr6 = addr; | ||
153 | const struct in6_addr *mask6 = mask; | ||
154 | map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); | 154 | map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); |
155 | if (map6 == NULL) | 155 | if (map6 == NULL) |
156 | goto cfg_unlbl_map_add_failure; | 156 | goto cfg_unlbl_map_add_failure; |
@@ -162,11 +162,13 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
162 | map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; | 162 | map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; |
163 | ipv6_addr_copy(&map6->list.mask, mask6); | 163 | ipv6_addr_copy(&map6->list.mask, mask6); |
164 | map6->list.valid = 1; | 164 | map6->list.valid = 1; |
165 | ret_val = netlbl_af4list_add(&map4->list, | 165 | ret_val = netlbl_af6list_add(&map6->list, |
166 | &addrmap->list4); | 166 | &addrmap->list6); |
167 | if (ret_val != 0) | 167 | if (ret_val != 0) |
168 | goto cfg_unlbl_map_add_failure; | 168 | goto cfg_unlbl_map_add_failure; |
169 | break; | 169 | break; |
170 | } | ||
171 | #endif /* IPv6 */ | ||
170 | default: | 172 | default: |
171 | goto cfg_unlbl_map_add_failure; | 173 | goto cfg_unlbl_map_add_failure; |
172 | break; | 174 | break; |
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net, | |||
225 | case AF_INET: | 227 | case AF_INET: |
226 | addr_len = sizeof(struct in_addr); | 228 | addr_len = sizeof(struct in_addr); |
227 | break; | 229 | break; |
230 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
228 | case AF_INET6: | 231 | case AF_INET6: |
229 | addr_len = sizeof(struct in6_addr); | 232 | addr_len = sizeof(struct in6_addr); |
230 | break; | 233 | break; |
234 | #endif /* IPv6 */ | ||
231 | default: | 235 | default: |
232 | return -EPFNOSUPPORT; | 236 | return -EPFNOSUPPORT; |
233 | } | 237 | } |
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net, | |||
266 | case AF_INET: | 270 | case AF_INET: |
267 | addr_len = sizeof(struct in_addr); | 271 | addr_len = sizeof(struct in_addr); |
268 | break; | 272 | break; |
273 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
269 | case AF_INET6: | 274 | case AF_INET6: |
270 | addr_len = sizeof(struct in6_addr); | 275 | addr_len = sizeof(struct in6_addr); |
271 | break; | 276 | break; |
277 | #endif /* IPv6 */ | ||
272 | default: | 278 | default: |
273 | return -EPFNOSUPPORT; | 279 | return -EPFNOSUPPORT; |
274 | } | 280 | } |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index b9493a09a870..6cd8ddfb512d 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, | |||
385 | struct gred_sched_data *q; | 385 | struct gred_sched_data *q; |
386 | 386 | ||
387 | if (table->tab[dp] == NULL) { | 387 | if (table->tab[dp] == NULL) { |
388 | table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); | 388 | table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC); |
389 | if (table->tab[dp] == NULL) | 389 | if (table->tab[dp] == NULL) |
390 | return -ENOMEM; | 390 | return -ENOMEM; |
391 | } | 391 | } |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 6649463da1b6..d617161f8dd3 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) | |||
209 | ctl->Plog, ctl->Scell_log, | 209 | ctl->Plog, ctl->Scell_log, |
210 | nla_data(tb[TCA_RED_STAB])); | 210 | nla_data(tb[TCA_RED_STAB])); |
211 | 211 | ||
212 | if (skb_queue_empty(&sch->q)) | 212 | if (!q->qdisc->q.qlen) |
213 | red_end_of_idle_period(&q->parms); | 213 | red_start_of_idle_period(&q->parms); |
214 | 214 | ||
215 | sch_tree_unlock(sch); | 215 | sch_tree_unlock(sch); |
216 | return 0; | 216 | return 0; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index a3b7120fcc74..4f4c52c0eeb3 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | |||
225 | 225 | ||
226 | 226 | ||
227 | static int | 227 | static int |
228 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) | 228 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, |
229 | struct net_device *dev, struct netdev_queue *txq, | ||
230 | struct neighbour *mn) | ||
229 | { | 231 | { |
230 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); | 232 | struct teql_sched_data *q = qdisc_priv(txq->qdisc); |
231 | struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); | ||
232 | struct neighbour *mn = dst_get_neighbour(skb_dst(skb)); | ||
233 | struct neighbour *n = q->ncache; | 233 | struct neighbour *n = q->ncache; |
234 | 234 | ||
235 | if (mn->tbl == NULL) | 235 | if (mn->tbl == NULL) |
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * | |||
262 | } | 262 | } |
263 | 263 | ||
264 | static inline int teql_resolve(struct sk_buff *skb, | 264 | static inline int teql_resolve(struct sk_buff *skb, |
265 | struct sk_buff *skb_res, struct net_device *dev) | 265 | struct sk_buff *skb_res, |
266 | struct net_device *dev, | ||
267 | struct netdev_queue *txq) | ||
266 | { | 268 | { |
267 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | 269 | struct dst_entry *dst = skb_dst(skb); |
270 | struct neighbour *mn; | ||
271 | int res; | ||
272 | |||
268 | if (txq->qdisc == &noop_qdisc) | 273 | if (txq->qdisc == &noop_qdisc) |
269 | return -ENODEV; | 274 | return -ENODEV; |
270 | 275 | ||
271 | if (dev->header_ops == NULL || | 276 | if (!dev->header_ops || !dst) |
272 | skb_dst(skb) == NULL || | ||
273 | dst_get_neighbour(skb_dst(skb)) == NULL) | ||
274 | return 0; | 277 | return 0; |
275 | return __teql_resolve(skb, skb_res, dev); | 278 | |
279 | rcu_read_lock(); | ||
280 | mn = dst_get_neighbour(dst); | ||
281 | res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0; | ||
282 | rcu_read_unlock(); | ||
283 | |||
284 | return res; | ||
276 | } | 285 | } |
277 | 286 | ||
278 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 287 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -307,7 +316,7 @@ restart: | |||
307 | continue; | 316 | continue; |
308 | } | 317 | } |
309 | 318 | ||
310 | switch (teql_resolve(skb, skb_res, slave)) { | 319 | switch (teql_resolve(skb, skb_res, slave, slave_txq)) { |
311 | case 0: | 320 | case 0: |
312 | if (__netif_tx_trylock(slave_txq)) { | 321 | if (__netif_tx_trylock(slave_txq)) { |
313 | unsigned int length = qdisc_pkt_len(skb); | 322 | unsigned int length = qdisc_pkt_len(skb); |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 865e68fef21c..bf812048cf6f 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) | |||
82 | struct sctp_auth_bytes *key; | 82 | struct sctp_auth_bytes *key; |
83 | 83 | ||
84 | /* Verify that we are not going to overflow INT_MAX */ | 84 | /* Verify that we are not going to overflow INT_MAX */ |
85 | if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) | 85 | if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes))) |
86 | return NULL; | 86 | return NULL; |
87 | 87 | ||
88 | /* Allocate the shared key */ | 88 | /* Allocate the shared key */ |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 5317b9341b53..3341d8962786 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -591,6 +591,27 @@ void rpc_prepare_task(struct rpc_task *task) | |||
591 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | 591 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); |
592 | } | 592 | } |
593 | 593 | ||
594 | static void | ||
595 | rpc_init_task_statistics(struct rpc_task *task) | ||
596 | { | ||
597 | /* Initialize retry counters */ | ||
598 | task->tk_garb_retry = 2; | ||
599 | task->tk_cred_retry = 2; | ||
600 | task->tk_rebind_retry = 2; | ||
601 | |||
602 | /* starting timestamp */ | ||
603 | task->tk_start = ktime_get(); | ||
604 | } | ||
605 | |||
606 | static void | ||
607 | rpc_reset_task_statistics(struct rpc_task *task) | ||
608 | { | ||
609 | task->tk_timeouts = 0; | ||
610 | task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); | ||
611 | |||
612 | rpc_init_task_statistics(task); | ||
613 | } | ||
614 | |||
594 | /* | 615 | /* |
595 | * Helper that calls task->tk_ops->rpc_call_done if it exists | 616 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
596 | */ | 617 | */ |
@@ -603,6 +624,7 @@ void rpc_exit_task(struct rpc_task *task) | |||
603 | WARN_ON(RPC_ASSASSINATED(task)); | 624 | WARN_ON(RPC_ASSASSINATED(task)); |
604 | /* Always release the RPC slot and buffer memory */ | 625 | /* Always release the RPC slot and buffer memory */ |
605 | xprt_release(task); | 626 | xprt_release(task); |
627 | rpc_reset_task_statistics(task); | ||
606 | } | 628 | } |
607 | } | 629 | } |
608 | } | 630 | } |
@@ -805,11 +827,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta | |||
805 | task->tk_calldata = task_setup_data->callback_data; | 827 | task->tk_calldata = task_setup_data->callback_data; |
806 | INIT_LIST_HEAD(&task->tk_task); | 828 | INIT_LIST_HEAD(&task->tk_task); |
807 | 829 | ||
808 | /* Initialize retry counters */ | ||
809 | task->tk_garb_retry = 2; | ||
810 | task->tk_cred_retry = 2; | ||
811 | task->tk_rebind_retry = 2; | ||
812 | |||
813 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; | 830 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
814 | task->tk_owner = current->tgid; | 831 | task->tk_owner = current->tgid; |
815 | 832 | ||
@@ -819,8 +836,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta | |||
819 | if (task->tk_ops->rpc_call_prepare != NULL) | 836 | if (task->tk_ops->rpc_call_prepare != NULL) |
820 | task->tk_action = rpc_prepare_task; | 837 | task->tk_action = rpc_prepare_task; |
821 | 838 | ||
822 | /* starting timestamp */ | 839 | rpc_init_task_statistics(task); |
823 | task->tk_start = ktime_get(); | ||
824 | 840 | ||
825 | dprintk("RPC: new task initialized, procpid %u\n", | 841 | dprintk("RPC: new task initialized, procpid %u\n", |
826 | task_pid_nr(current)); | 842 | task_pid_nr(current)); |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f4385e45a5fc..c64c0ef519b5 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -995,13 +995,11 @@ out_init_req: | |||
995 | 995 | ||
996 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | 996 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) |
997 | { | 997 | { |
998 | if (xprt_dynamic_free_slot(xprt, req)) | ||
999 | return; | ||
1000 | |||
1001 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
1002 | |||
1003 | spin_lock(&xprt->reserve_lock); | 998 | spin_lock(&xprt->reserve_lock); |
1004 | list_add(&req->rq_list, &xprt->free); | 999 | if (!xprt_dynamic_free_slot(xprt, req)) { |
1000 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
1001 | list_add(&req->rq_list, &xprt->free); | ||
1002 | } | ||
1005 | rpc_wake_up_next(&xprt->backlog); | 1003 | rpc_wake_up_next(&xprt->backlog); |
1006 | spin_unlock(&xprt->reserve_lock); | 1004 | spin_unlock(&xprt->reserve_lock); |
1007 | } | 1005 | } |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 2d78d95955ab..55472c48825e 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task) | |||
496 | struct rpc_rqst *req = task->tk_rqstp; | 496 | struct rpc_rqst *req = task->tk_rqstp; |
497 | struct rpc_xprt *xprt = req->rq_xprt; | 497 | struct rpc_xprt *xprt = req->rq_xprt; |
498 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 498 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
499 | int ret = 0; | 499 | int ret = -EAGAIN; |
500 | 500 | ||
501 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", | 501 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", |
502 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | 502 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, |
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task) | |||
508 | /* Don't race with disconnect */ | 508 | /* Don't race with disconnect */ |
509 | if (xprt_connected(xprt)) { | 509 | if (xprt_connected(xprt)) { |
510 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { | 510 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { |
511 | ret = -EAGAIN; | ||
512 | /* | 511 | /* |
513 | * Notify TCP that we're limited by the application | 512 | * Notify TCP that we're limited by the application |
514 | * window size | 513 | * window size |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 466fbcc5cf77..b595a3d8679f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1957 | if ((UNIXCB(skb).pid != siocb->scm->pid) || | 1957 | if ((UNIXCB(skb).pid != siocb->scm->pid) || |
1958 | (UNIXCB(skb).cred != siocb->scm->cred)) { | 1958 | (UNIXCB(skb).cred != siocb->scm->cred)) { |
1959 | skb_queue_head(&sk->sk_receive_queue, skb); | 1959 | skb_queue_head(&sk->sk_receive_queue, skb); |
1960 | sk->sk_data_ready(sk, skb->len); | ||
1960 | break; | 1961 | break; |
1961 | } | 1962 | } |
1962 | } else { | 1963 | } else { |
@@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1974 | chunk = min_t(unsigned int, skb->len, size); | 1975 | chunk = min_t(unsigned int, skb->len, size); |
1975 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { | 1976 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { |
1976 | skb_queue_head(&sk->sk_receive_queue, skb); | 1977 | skb_queue_head(&sk->sk_receive_queue, skb); |
1978 | sk->sk_data_ready(sk, skb->len); | ||
1977 | if (copied == 0) | 1979 | if (copied == 0) |
1978 | copied = -EFAULT; | 1980 | copied = -EFAULT; |
1979 | break; | 1981 | break; |
@@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1991 | /* put the skb back if we didn't use it up.. */ | 1993 | /* put the skb back if we didn't use it up.. */ |
1992 | if (skb->len) { | 1994 | if (skb->len) { |
1993 | skb_queue_head(&sk->sk_receive_queue, skb); | 1995 | skb_queue_head(&sk->sk_receive_queue, skb); |
1996 | sk->sk_data_ready(sk, skb->len); | ||
1994 | break; | 1997 | break; |
1995 | } | 1998 | } |
1996 | 1999 | ||
@@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
2006 | 2009 | ||
2007 | /* put message back and return */ | 2010 | /* put message back and return */ |
2008 | skb_queue_head(&sk->sk_receive_queue, skb); | 2011 | skb_queue_head(&sk->sk_receive_queue, skb); |
2012 | sk->sk_data_ready(sk, skb->len); | ||
2009 | break; | 2013 | break; |
2010 | } | 2014 | } |
2011 | } while (size); | 2015 | } while (size); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b3a476fe8272..ffafda5022c2 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { | |||
89 | [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, | 89 | [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, |
90 | [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, | 90 | [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, |
91 | 91 | ||
92 | [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, | 92 | [NL80211_ATTR_MAC] = { .len = ETH_ALEN }, |
93 | [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN }, | 93 | [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN }, |
94 | 94 | ||
95 | [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, | 95 | [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, |
96 | [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, | 96 | [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index e71f5a66574e..3302c56f60d1 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -57,8 +57,17 @@ | |||
57 | #define REG_DBG_PRINT(args...) | 57 | #define REG_DBG_PRINT(args...) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | static struct regulatory_request core_request_world = { | ||
61 | .initiator = NL80211_REGDOM_SET_BY_CORE, | ||
62 | .alpha2[0] = '0', | ||
63 | .alpha2[1] = '0', | ||
64 | .intersect = false, | ||
65 | .processed = true, | ||
66 | .country_ie_env = ENVIRON_ANY, | ||
67 | }; | ||
68 | |||
60 | /* Receipt of information from last regulatory request */ | 69 | /* Receipt of information from last regulatory request */ |
61 | static struct regulatory_request *last_request; | 70 | static struct regulatory_request *last_request = &core_request_world; |
62 | 71 | ||
63 | /* To trigger userspace events */ | 72 | /* To trigger userspace events */ |
64 | static struct platform_device *reg_pdev; | 73 | static struct platform_device *reg_pdev; |
@@ -150,7 +159,7 @@ static char user_alpha2[2]; | |||
150 | module_param(ieee80211_regdom, charp, 0444); | 159 | module_param(ieee80211_regdom, charp, 0444); |
151 | MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); | 160 | MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); |
152 | 161 | ||
153 | static void reset_regdomains(void) | 162 | static void reset_regdomains(bool full_reset) |
154 | { | 163 | { |
155 | /* avoid freeing static information or freeing something twice */ | 164 | /* avoid freeing static information or freeing something twice */ |
156 | if (cfg80211_regdomain == cfg80211_world_regdom) | 165 | if (cfg80211_regdomain == cfg80211_world_regdom) |
@@ -165,6 +174,13 @@ static void reset_regdomains(void) | |||
165 | 174 | ||
166 | cfg80211_world_regdom = &world_regdom; | 175 | cfg80211_world_regdom = &world_regdom; |
167 | cfg80211_regdomain = NULL; | 176 | cfg80211_regdomain = NULL; |
177 | |||
178 | if (!full_reset) | ||
179 | return; | ||
180 | |||
181 | if (last_request != &core_request_world) | ||
182 | kfree(last_request); | ||
183 | last_request = &core_request_world; | ||
168 | } | 184 | } |
169 | 185 | ||
170 | /* | 186 | /* |
@@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd) | |||
175 | { | 191 | { |
176 | BUG_ON(!last_request); | 192 | BUG_ON(!last_request); |
177 | 193 | ||
178 | reset_regdomains(); | 194 | reset_regdomains(false); |
179 | 195 | ||
180 | cfg80211_world_regdom = rd; | 196 | cfg80211_world_regdom = rd; |
181 | cfg80211_regdomain = rd; | 197 | cfg80211_regdomain = rd; |
@@ -1407,7 +1423,8 @@ static int __regulatory_hint(struct wiphy *wiphy, | |||
1407 | } | 1423 | } |
1408 | 1424 | ||
1409 | new_request: | 1425 | new_request: |
1410 | kfree(last_request); | 1426 | if (last_request != &core_request_world) |
1427 | kfree(last_request); | ||
1411 | 1428 | ||
1412 | last_request = pending_request; | 1429 | last_request = pending_request; |
1413 | last_request->intersect = intersect; | 1430 | last_request->intersect = intersect; |
@@ -1577,9 +1594,6 @@ static int regulatory_hint_core(const char *alpha2) | |||
1577 | { | 1594 | { |
1578 | struct regulatory_request *request; | 1595 | struct regulatory_request *request; |
1579 | 1596 | ||
1580 | kfree(last_request); | ||
1581 | last_request = NULL; | ||
1582 | |||
1583 | request = kzalloc(sizeof(struct regulatory_request), | 1597 | request = kzalloc(sizeof(struct regulatory_request), |
1584 | GFP_KERNEL); | 1598 | GFP_KERNEL); |
1585 | if (!request) | 1599 | if (!request) |
@@ -1777,7 +1791,7 @@ static void restore_regulatory_settings(bool reset_user) | |||
1777 | mutex_lock(&cfg80211_mutex); | 1791 | mutex_lock(&cfg80211_mutex); |
1778 | mutex_lock(®_mutex); | 1792 | mutex_lock(®_mutex); |
1779 | 1793 | ||
1780 | reset_regdomains(); | 1794 | reset_regdomains(true); |
1781 | restore_alpha2(alpha2, reset_user); | 1795 | restore_alpha2(alpha2, reset_user); |
1782 | 1796 | ||
1783 | /* | 1797 | /* |
@@ -2037,12 +2051,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) | |||
2037 | } | 2051 | } |
2038 | 2052 | ||
2039 | request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); | 2053 | request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); |
2054 | if (!request_wiphy && | ||
2055 | (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || | ||
2056 | last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) { | ||
2057 | schedule_delayed_work(®_timeout, 0); | ||
2058 | return -ENODEV; | ||
2059 | } | ||
2040 | 2060 | ||
2041 | if (!last_request->intersect) { | 2061 | if (!last_request->intersect) { |
2042 | int r; | 2062 | int r; |
2043 | 2063 | ||
2044 | if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { | 2064 | if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { |
2045 | reset_regdomains(); | 2065 | reset_regdomains(false); |
2046 | cfg80211_regdomain = rd; | 2066 | cfg80211_regdomain = rd; |
2047 | return 0; | 2067 | return 0; |
2048 | } | 2068 | } |
@@ -2063,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) | |||
2063 | if (r) | 2083 | if (r) |
2064 | return r; | 2084 | return r; |
2065 | 2085 | ||
2066 | reset_regdomains(); | 2086 | reset_regdomains(false); |
2067 | cfg80211_regdomain = rd; | 2087 | cfg80211_regdomain = rd; |
2068 | return 0; | 2088 | return 0; |
2069 | } | 2089 | } |
@@ -2088,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) | |||
2088 | 2108 | ||
2089 | rd = NULL; | 2109 | rd = NULL; |
2090 | 2110 | ||
2091 | reset_regdomains(); | 2111 | reset_regdomains(false); |
2092 | cfg80211_regdomain = intersected_rd; | 2112 | cfg80211_regdomain = intersected_rd; |
2093 | 2113 | ||
2094 | return 0; | 2114 | return 0; |
@@ -2108,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) | |||
2108 | kfree(rd); | 2128 | kfree(rd); |
2109 | rd = NULL; | 2129 | rd = NULL; |
2110 | 2130 | ||
2111 | reset_regdomains(); | 2131 | reset_regdomains(false); |
2112 | cfg80211_regdomain = intersected_rd; | 2132 | cfg80211_regdomain = intersected_rd; |
2113 | 2133 | ||
2114 | return 0; | 2134 | return 0; |
@@ -2261,11 +2281,8 @@ void /* __init_or_exit */ regulatory_exit(void) | |||
2261 | mutex_lock(&cfg80211_mutex); | 2281 | mutex_lock(&cfg80211_mutex); |
2262 | mutex_lock(®_mutex); | 2282 | mutex_lock(®_mutex); |
2263 | 2283 | ||
2264 | reset_regdomains(); | 2284 | reset_regdomains(true); |
2265 | |||
2266 | kfree(last_request); | ||
2267 | 2285 | ||
2268 | last_request = NULL; | ||
2269 | dev_set_uevent_suppress(®_pdev->dev, true); | 2286 | dev_set_uevent_suppress(®_pdev->dev, true); |
2270 | 2287 | ||
2271 | platform_device_unregister(reg_pdev); | 2288 | platform_device_unregister(reg_pdev); |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 552df27dcf53..2118d6446630 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2382,9 +2382,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst) | |||
2382 | return dst_metric_advmss(dst->path); | 2382 | return dst_metric_advmss(dst->path); |
2383 | } | 2383 | } |
2384 | 2384 | ||
2385 | static unsigned int xfrm_default_mtu(const struct dst_entry *dst) | 2385 | static unsigned int xfrm_mtu(const struct dst_entry *dst) |
2386 | { | 2386 | { |
2387 | return dst_mtu(dst->path); | 2387 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
2388 | |||
2389 | return mtu ? : dst_mtu(dst->path); | ||
2388 | } | 2390 | } |
2389 | 2391 | ||
2390 | static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) | 2392 | static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
@@ -2411,8 +2413,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
2411 | dst_ops->check = xfrm_dst_check; | 2413 | dst_ops->check = xfrm_dst_check; |
2412 | if (likely(dst_ops->default_advmss == NULL)) | 2414 | if (likely(dst_ops->default_advmss == NULL)) |
2413 | dst_ops->default_advmss = xfrm_default_advmss; | 2415 | dst_ops->default_advmss = xfrm_default_advmss; |
2414 | if (likely(dst_ops->default_mtu == NULL)) | 2416 | if (likely(dst_ops->mtu == NULL)) |
2415 | dst_ops->default_mtu = xfrm_default_mtu; | 2417 | dst_ops->mtu = xfrm_mtu; |
2416 | if (likely(dst_ops->negative_advice == NULL)) | 2418 | if (likely(dst_ops->negative_advice == NULL)) |
2417 | dst_ops->negative_advice = xfrm_negative_advice; | 2419 | dst_ops->negative_advice = xfrm_negative_advice; |
2418 | if (likely(dst_ops->link_failure == NULL)) | 2420 | if (likely(dst_ops->link_failure == NULL)) |