diff options
Diffstat (limited to 'net')
34 files changed, 187 insertions, 171 deletions
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 29b1b220d6cf..ef09c7b3a858 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -78,7 +78,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst) | |||
78 | static void __bnep_link_session(struct bnep_session *s) | 78 | static void __bnep_link_session(struct bnep_session *s) |
79 | { | 79 | { |
80 | /* It's safe to call __module_get() here because sessions are added | 80 | /* It's safe to call __module_get() here because sessions are added |
81 | by the socket layer which has to hold the refference to this module. | 81 | by the socket layer which has to hold the reference to this module. |
82 | */ | 82 | */ |
83 | __module_get(THIS_MODULE); | 83 | __module_get(THIS_MODULE); |
84 | list_add(&s->list, &bnep_session_list); | 84 | list_add(&s->list, &bnep_session_list); |
@@ -632,7 +632,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) | |||
632 | s = __bnep_get_session(req->dst); | 632 | s = __bnep_get_session(req->dst); |
633 | if (s) { | 633 | if (s) { |
634 | /* Wakeup user-space which is polling for socket errors. | 634 | /* Wakeup user-space which is polling for socket errors. |
635 | * This is temporary hack untill we have shutdown in L2CAP */ | 635 | * This is temporary hack until we have shutdown in L2CAP */ |
636 | s->sock->sk->sk_err = EUNATCH; | 636 | s->sock->sk->sk_err = EUNATCH; |
637 | 637 | ||
638 | /* Kill session thread */ | 638 | /* Kill session thread */ |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 569750010fd3..18e7f5a43dc4 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -770,7 +770,7 @@ static int hidp_setup_hid(struct hidp_session *session, | |||
770 | 770 | ||
771 | hid = hid_allocate_device(); | 771 | hid = hid_allocate_device(); |
772 | if (IS_ERR(hid)) | 772 | if (IS_ERR(hid)) |
773 | return PTR_ERR(session->hid); | 773 | return PTR_ERR(hid); |
774 | 774 | ||
775 | session->hid = hid; | 775 | session->hid = hid; |
776 | session->req = req; | 776 | session->req = req; |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 5129b88c8e5b..1120cf14a548 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -1212,6 +1212,7 @@ static void l2cap_monitor_timeout(unsigned long arg) | |||
1212 | bh_lock_sock(sk); | 1212 | bh_lock_sock(sk); |
1213 | if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { | 1213 | if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { |
1214 | l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); | 1214 | l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); |
1215 | bh_unlock_sock(sk); | ||
1215 | return; | 1216 | return; |
1216 | } | 1217 | } |
1217 | 1218 | ||
@@ -3435,8 +3436,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str | |||
3435 | (pi->unacked_frames > 0)) | 3436 | (pi->unacked_frames > 0)) |
3436 | __mod_retrans_timer(); | 3437 | __mod_retrans_timer(); |
3437 | 3438 | ||
3438 | l2cap_ertm_send(sk); | ||
3439 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; | 3439 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; |
3440 | l2cap_ertm_send(sk); | ||
3440 | } | 3441 | } |
3441 | break; | 3442 | break; |
3442 | 3443 | ||
@@ -3471,9 +3472,9 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str | |||
3471 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; | 3472 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; |
3472 | 3473 | ||
3473 | if (rx_control & L2CAP_CTRL_POLL) { | 3474 | if (rx_control & L2CAP_CTRL_POLL) { |
3474 | l2cap_retransmit_frame(sk, tx_seq); | ||
3475 | pi->expected_ack_seq = tx_seq; | 3475 | pi->expected_ack_seq = tx_seq; |
3476 | l2cap_drop_acked_frames(sk); | 3476 | l2cap_drop_acked_frames(sk); |
3477 | l2cap_retransmit_frame(sk, tx_seq); | ||
3477 | l2cap_ertm_send(sk); | 3478 | l2cap_ertm_send(sk); |
3478 | if (pi->conn_state & L2CAP_CONN_WAIT_F) { | 3479 | if (pi->conn_state & L2CAP_CONN_WAIT_F) { |
3479 | pi->srej_save_reqseq = tx_seq; | 3480 | pi->srej_save_reqseq = tx_seq; |
diff --git a/net/compat.c b/net/compat.c index e1a56ade803b..a1fb1b079a82 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -754,26 +754,21 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, | |||
754 | 754 | ||
755 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, | 755 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, |
756 | unsigned vlen, unsigned int flags, | 756 | unsigned vlen, unsigned int flags, |
757 | struct timespec __user *timeout) | 757 | struct compat_timespec __user *timeout) |
758 | { | 758 | { |
759 | int datagrams; | 759 | int datagrams; |
760 | struct timespec ktspec; | 760 | struct timespec ktspec; |
761 | struct compat_timespec __user *utspec; | ||
762 | 761 | ||
763 | if (timeout == NULL) | 762 | if (timeout == NULL) |
764 | return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, | 763 | return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, |
765 | flags | MSG_CMSG_COMPAT, NULL); | 764 | flags | MSG_CMSG_COMPAT, NULL); |
766 | 765 | ||
767 | utspec = (struct compat_timespec __user *)timeout; | 766 | if (get_compat_timespec(&ktspec, timeout)) |
768 | if (get_user(ktspec.tv_sec, &utspec->tv_sec) || | ||
769 | get_user(ktspec.tv_nsec, &utspec->tv_nsec)) | ||
770 | return -EFAULT; | 767 | return -EFAULT; |
771 | 768 | ||
772 | datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, | 769 | datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, |
773 | flags | MSG_CMSG_COMPAT, &ktspec); | 770 | flags | MSG_CMSG_COMPAT, &ktspec); |
774 | if (datagrams > 0 && | 771 | if (datagrams > 0 && put_compat_timespec(&ktspec, timeout)) |
775 | (put_user(ktspec.tv_sec, &utspec->tv_sec) || | ||
776 | put_user(ktspec.tv_nsec, &utspec->tv_nsec))) | ||
777 | datagrams = -EFAULT; | 772 | datagrams = -EFAULT; |
778 | 773 | ||
779 | return datagrams; | 774 | return datagrams; |
diff --git a/net/core/dev.c b/net/core/dev.c index c36a17aafcf3..be9924f60ec3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4771,21 +4771,23 @@ static void net_set_todo(struct net_device *dev) | |||
4771 | 4771 | ||
4772 | static void rollback_registered_many(struct list_head *head) | 4772 | static void rollback_registered_many(struct list_head *head) |
4773 | { | 4773 | { |
4774 | struct net_device *dev; | 4774 | struct net_device *dev, *tmp; |
4775 | 4775 | ||
4776 | BUG_ON(dev_boot_phase); | 4776 | BUG_ON(dev_boot_phase); |
4777 | ASSERT_RTNL(); | 4777 | ASSERT_RTNL(); |
4778 | 4778 | ||
4779 | list_for_each_entry(dev, head, unreg_list) { | 4779 | list_for_each_entry_safe(dev, tmp, head, unreg_list) { |
4780 | /* Some devices call without registering | 4780 | /* Some devices call without registering |
4781 | * for initialization unwind. | 4781 | * for initialization unwind. Remove those |
4782 | * devices and proceed with the remaining. | ||
4782 | */ | 4783 | */ |
4783 | if (dev->reg_state == NETREG_UNINITIALIZED) { | 4784 | if (dev->reg_state == NETREG_UNINITIALIZED) { |
4784 | pr_debug("unregister_netdevice: device %s/%p never " | 4785 | pr_debug("unregister_netdevice: device %s/%p never " |
4785 | "was registered\n", dev->name, dev); | 4786 | "was registered\n", dev->name, dev); |
4786 | 4787 | ||
4787 | WARN_ON(1); | 4788 | WARN_ON(1); |
4788 | return; | 4789 | list_del(&dev->unreg_list); |
4790 | continue; | ||
4789 | } | 4791 | } |
4790 | 4792 | ||
4791 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | 4793 | BUG_ON(dev->reg_state != NETREG_REGISTERED); |
@@ -5033,6 +5035,11 @@ int register_netdevice(struct net_device *dev) | |||
5033 | rollback_registered(dev); | 5035 | rollback_registered(dev); |
5034 | dev->reg_state = NETREG_UNREGISTERED; | 5036 | dev->reg_state = NETREG_UNREGISTERED; |
5035 | } | 5037 | } |
5038 | /* | ||
5039 | * Prevent userspace races by waiting until the network | ||
5040 | * device is fully setup before sending notifications. | ||
5041 | */ | ||
5042 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
5036 | 5043 | ||
5037 | out: | 5044 | out: |
5038 | return ret; | 5045 | return ret; |
@@ -5595,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5595 | /* Notify protocols, that a new device appeared. */ | 5602 | /* Notify protocols, that a new device appeared. */ |
5596 | call_netdevice_notifiers(NETDEV_REGISTER, dev); | 5603 | call_netdevice_notifiers(NETDEV_REGISTER, dev); |
5597 | 5604 | ||
5605 | /* | ||
5606 | * Prevent userspace races by waiting until the network | ||
5607 | * device is fully setup before sending notifications. | ||
5608 | */ | ||
5609 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
5610 | |||
5598 | synchronize_net(); | 5611 | synchronize_net(); |
5599 | err = 0; | 5612 | err = 0; |
5600 | out: | 5613 | out: |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 33148a568199..794bcb897ff0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
1364 | case NETDEV_UNREGISTER: | 1364 | case NETDEV_UNREGISTER: |
1365 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | 1365 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); |
1366 | break; | 1366 | break; |
1367 | case NETDEV_REGISTER: | ||
1368 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
1369 | break; | ||
1370 | case NETDEV_UP: | 1367 | case NETDEV_UP: |
1371 | case NETDEV_DOWN: | 1368 | case NETDEV_DOWN: |
1372 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | 1369 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); |
1373 | break; | 1370 | break; |
1371 | case NETDEV_POST_INIT: | ||
1372 | case NETDEV_REGISTER: | ||
1374 | case NETDEV_CHANGE: | 1373 | case NETDEV_CHANGE: |
1375 | case NETDEV_GOING_DOWN: | 1374 | case NETDEV_GOING_DOWN: |
1375 | case NETDEV_UNREGISTER_BATCH: | ||
1376 | break; | 1376 | break; |
1377 | default: | 1377 | default: |
1378 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); | 1378 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); |
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index 549e206cdd42..ea5cea2415c1 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
@@ -50,7 +50,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo) | |||
50 | struct tcphdr _tcph, *tcph; | 50 | struct tcphdr _tcph, *tcph; |
51 | __be16 oldval; | 51 | __be16 oldval; |
52 | 52 | ||
53 | /* Not enought header? */ | 53 | /* Not enough header? */ |
54 | tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); | 54 | tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); |
55 | if (!tcph) | 55 | if (!tcph) |
56 | return false; | 56 | return false; |
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index fa2d6b6fc3e5..331ead3ebd1b 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <net/route.h> | 14 | #include <net/route.h> |
15 | #include <net/ip.h> | 15 | #include <net/ip.h> |
16 | 16 | ||
17 | #include <linux/netfilter_bridge.h> | ||
17 | #include <linux/netfilter_ipv4.h> | 18 | #include <linux/netfilter_ipv4.h> |
18 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> | 19 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> |
19 | 20 | ||
@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) | |||
34 | return err; | 35 | return err; |
35 | } | 36 | } |
36 | 37 | ||
38 | static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, | ||
39 | struct sk_buff *skb) | ||
40 | { | ||
41 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
42 | if (skb->nf_bridge && | ||
43 | skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) | ||
44 | return IP_DEFRAG_CONNTRACK_BRIDGE_IN; | ||
45 | #endif | ||
46 | if (hooknum == NF_INET_PRE_ROUTING) | ||
47 | return IP_DEFRAG_CONNTRACK_IN; | ||
48 | else | ||
49 | return IP_DEFRAG_CONNTRACK_OUT; | ||
50 | } | ||
51 | |||
37 | static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, | 52 | static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, |
38 | struct sk_buff *skb, | 53 | struct sk_buff *skb, |
39 | const struct net_device *in, | 54 | const struct net_device *in, |
@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, | |||
50 | #endif | 65 | #endif |
51 | /* Gather fragments. */ | 66 | /* Gather fragments. */ |
52 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { | 67 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
53 | if (nf_ct_ipv4_gather_frags(skb, | 68 | enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb); |
54 | hooknum == NF_INET_PRE_ROUTING ? | 69 | if (nf_ct_ipv4_gather_frags(skb, user)) |
55 | IP_DEFRAG_CONNTRACK_IN : | ||
56 | IP_DEFRAG_CONNTRACK_OUT)) | ||
57 | return NF_STOLEN; | 70 | return NF_STOLEN; |
58 | } | 71 | } |
59 | return NF_ACCEPT; | 72 | return NF_ACCEPT; |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 26399ad2a289..66fd80ef2473 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
277 | 277 | ||
278 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); | 278 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
279 | 279 | ||
280 | /* check for timestamp cookie support */ | ||
281 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
282 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0); | ||
283 | |||
284 | if (tcp_opt.saw_tstamp) | ||
285 | cookie_check_timestamp(&tcp_opt); | ||
286 | |||
280 | ret = NULL; | 287 | ret = NULL; |
281 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ | 288 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ |
282 | if (!req) | 289 | if (!req) |
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
292 | ireq->loc_addr = ip_hdr(skb)->daddr; | 299 | ireq->loc_addr = ip_hdr(skb)->daddr; |
293 | ireq->rmt_addr = ip_hdr(skb)->saddr; | 300 | ireq->rmt_addr = ip_hdr(skb)->saddr; |
294 | ireq->ecn_ok = 0; | 301 | ireq->ecn_ok = 0; |
302 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
303 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
304 | ireq->sack_ok = tcp_opt.sack_ok; | ||
305 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
306 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
307 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
295 | 308 | ||
296 | /* We throwed the options of the initial SYN away, so we hope | 309 | /* We throwed the options of the initial SYN away, so we hope |
297 | * the ACK carries the same options again (see RFC1122 4.2.3.8) | 310 | * the ACK carries the same options again (see RFC1122 4.2.3.8) |
@@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
340 | } | 353 | } |
341 | } | 354 | } |
342 | 355 | ||
343 | /* check for timestamp cookie support */ | ||
344 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
345 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst); | ||
346 | |||
347 | if (tcp_opt.saw_tstamp) | ||
348 | cookie_check_timestamp(&tcp_opt); | ||
349 | |||
350 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
351 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
352 | ireq->sack_ok = tcp_opt.sack_ok; | ||
353 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
354 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
355 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
356 | |||
357 | /* Try to redo what tcp_v4_send_synack did. */ | 356 | /* Try to redo what tcp_v4_send_synack did. */ |
358 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); | 357 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); |
359 | 358 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 12cab7d74dba..28e029632493 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3727,7 +3727,7 @@ old_ack: | |||
3727 | * the fast version below fails. | 3727 | * the fast version below fails. |
3728 | */ | 3728 | */ |
3729 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | 3729 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, |
3730 | u8 **hvpp, int estab, struct dst_entry *dst) | 3730 | u8 **hvpp, int estab) |
3731 | { | 3731 | { |
3732 | unsigned char *ptr; | 3732 | unsigned char *ptr; |
3733 | struct tcphdr *th = tcp_hdr(skb); | 3733 | struct tcphdr *th = tcp_hdr(skb); |
@@ -3766,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3766 | break; | 3766 | break; |
3767 | case TCPOPT_WINDOW: | 3767 | case TCPOPT_WINDOW: |
3768 | if (opsize == TCPOLEN_WINDOW && th->syn && | 3768 | if (opsize == TCPOLEN_WINDOW && th->syn && |
3769 | !estab && sysctl_tcp_window_scaling && | 3769 | !estab && sysctl_tcp_window_scaling) { |
3770 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) { | ||
3771 | __u8 snd_wscale = *(__u8 *)ptr; | 3770 | __u8 snd_wscale = *(__u8 *)ptr; |
3772 | opt_rx->wscale_ok = 1; | 3771 | opt_rx->wscale_ok = 1; |
3773 | if (snd_wscale > 14) { | 3772 | if (snd_wscale > 14) { |
@@ -3783,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3783 | case TCPOPT_TIMESTAMP: | 3782 | case TCPOPT_TIMESTAMP: |
3784 | if ((opsize == TCPOLEN_TIMESTAMP) && | 3783 | if ((opsize == TCPOLEN_TIMESTAMP) && |
3785 | ((estab && opt_rx->tstamp_ok) || | 3784 | ((estab && opt_rx->tstamp_ok) || |
3786 | (!estab && sysctl_tcp_timestamps && | 3785 | (!estab && sysctl_tcp_timestamps))) { |
3787 | !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) { | ||
3788 | opt_rx->saw_tstamp = 1; | 3786 | opt_rx->saw_tstamp = 1; |
3789 | opt_rx->rcv_tsval = get_unaligned_be32(ptr); | 3787 | opt_rx->rcv_tsval = get_unaligned_be32(ptr); |
3790 | opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); | 3788 | opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); |
@@ -3792,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3792 | break; | 3790 | break; |
3793 | case TCPOPT_SACK_PERM: | 3791 | case TCPOPT_SACK_PERM: |
3794 | if (opsize == TCPOLEN_SACK_PERM && th->syn && | 3792 | if (opsize == TCPOLEN_SACK_PERM && th->syn && |
3795 | !estab && sysctl_tcp_sack && | 3793 | !estab && sysctl_tcp_sack) { |
3796 | !dst_feature(dst, RTAX_FEATURE_NO_SACK)) { | ||
3797 | opt_rx->sack_ok = 1; | 3794 | opt_rx->sack_ok = 1; |
3798 | tcp_sack_reset(opt_rx); | 3795 | tcp_sack_reset(opt_rx); |
3799 | } | 3796 | } |
@@ -3878,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | |||
3878 | if (tcp_parse_aligned_timestamp(tp, th)) | 3875 | if (tcp_parse_aligned_timestamp(tp, th)) |
3879 | return 1; | 3876 | return 1; |
3880 | } | 3877 | } |
3881 | tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); | 3878 | tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); |
3882 | return 1; | 3879 | return 1; |
3883 | } | 3880 | } |
3884 | 3881 | ||
@@ -4133,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, | |||
4133 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) | 4130 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) |
4134 | { | 4131 | { |
4135 | struct tcp_sock *tp = tcp_sk(sk); | 4132 | struct tcp_sock *tp = tcp_sk(sk); |
4136 | struct dst_entry *dst = __sk_dst_get(sk); | ||
4137 | 4133 | ||
4138 | if (tcp_is_sack(tp) && sysctl_tcp_dsack && | 4134 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { |
4139 | !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { | ||
4140 | int mib_idx; | 4135 | int mib_idx; |
4141 | 4136 | ||
4142 | if (before(seq, tp->rcv_nxt)) | 4137 | if (before(seq, tp->rcv_nxt)) |
@@ -4165,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) | |||
4165 | static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | 4160 | static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) |
4166 | { | 4161 | { |
4167 | struct tcp_sock *tp = tcp_sk(sk); | 4162 | struct tcp_sock *tp = tcp_sk(sk); |
4168 | struct dst_entry *dst = __sk_dst_get(sk); | ||
4169 | 4163 | ||
4170 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 4164 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
4171 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 4165 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
4172 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); | 4166 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); |
4173 | tcp_enter_quickack_mode(sk); | 4167 | tcp_enter_quickack_mode(sk); |
4174 | 4168 | ||
4175 | if (tcp_is_sack(tp) && sysctl_tcp_dsack && | 4169 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { |
4176 | !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { | ||
4177 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 4170 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
4178 | 4171 | ||
4179 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) | 4172 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) |
@@ -5428,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5428 | u8 *hash_location; | 5421 | u8 *hash_location; |
5429 | struct inet_connection_sock *icsk = inet_csk(sk); | 5422 | struct inet_connection_sock *icsk = inet_csk(sk); |
5430 | struct tcp_sock *tp = tcp_sk(sk); | 5423 | struct tcp_sock *tp = tcp_sk(sk); |
5431 | struct dst_entry *dst = __sk_dst_get(sk); | ||
5432 | struct tcp_cookie_values *cvp = tp->cookie_values; | 5424 | struct tcp_cookie_values *cvp = tp->cookie_values; |
5433 | int saved_clamp = tp->rx_opt.mss_clamp; | 5425 | int saved_clamp = tp->rx_opt.mss_clamp; |
5434 | 5426 | ||
5435 | tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst); | 5427 | tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); |
5436 | 5428 | ||
5437 | if (th->ack) { | 5429 | if (th->ack) { |
5438 | /* rfc793: | 5430 | /* rfc793: |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 15e96030ce47..65b8ebfd078a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1262 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; | 1262 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; |
1263 | #endif | 1263 | #endif |
1264 | 1264 | ||
1265 | ireq = inet_rsk(req); | ||
1266 | ireq->loc_addr = daddr; | ||
1267 | ireq->rmt_addr = saddr; | ||
1268 | ireq->no_srccheck = inet_sk(sk)->transparent; | ||
1269 | ireq->opt = tcp_v4_save_options(sk, skb); | ||
1270 | |||
1271 | dst = inet_csk_route_req(sk, req); | ||
1272 | if(!dst) | ||
1273 | goto drop_and_free; | ||
1274 | |||
1275 | tcp_clear_options(&tmp_opt); | 1265 | tcp_clear_options(&tmp_opt); |
1276 | tmp_opt.mss_clamp = TCP_MSS_DEFAULT; | 1266 | tmp_opt.mss_clamp = TCP_MSS_DEFAULT; |
1277 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1267 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
1278 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); | 1268 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
1279 | 1269 | ||
1280 | if (tmp_opt.cookie_plus > 0 && | 1270 | if (tmp_opt.cookie_plus > 0 && |
1281 | tmp_opt.saw_tstamp && | 1271 | tmp_opt.saw_tstamp && |
@@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1319 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; | 1309 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; |
1320 | tcp_openreq_init(req, &tmp_opt, skb); | 1310 | tcp_openreq_init(req, &tmp_opt, skb); |
1321 | 1311 | ||
1312 | ireq = inet_rsk(req); | ||
1313 | ireq->loc_addr = daddr; | ||
1314 | ireq->rmt_addr = saddr; | ||
1315 | ireq->no_srccheck = inet_sk(sk)->transparent; | ||
1316 | ireq->opt = tcp_v4_save_options(sk, skb); | ||
1317 | |||
1322 | if (security_inet_conn_request(sk, skb, req)) | 1318 | if (security_inet_conn_request(sk, skb, req)) |
1323 | goto drop_and_release; | 1319 | goto drop_and_free; |
1324 | 1320 | ||
1325 | if (!want_cookie) | 1321 | if (!want_cookie) |
1326 | TCP_ECN_create_request(req, tcp_hdr(skb)); | 1322 | TCP_ECN_create_request(req, tcp_hdr(skb)); |
@@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1345 | */ | 1341 | */ |
1346 | if (tmp_opt.saw_tstamp && | 1342 | if (tmp_opt.saw_tstamp && |
1347 | tcp_death_row.sysctl_tw_recycle && | 1343 | tcp_death_row.sysctl_tw_recycle && |
1344 | (dst = inet_csk_route_req(sk, req)) != NULL && | ||
1348 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 1345 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
1349 | peer->v4daddr == saddr) { | 1346 | peer->v4daddr == saddr) { |
1350 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && | 1347 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 87accec8d097..f206ee5dda80 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, | |||
95 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | 95 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
96 | int paws_reject = 0; | 96 | int paws_reject = 0; |
97 | 97 | ||
98 | tmp_opt.saw_tstamp = 0; | ||
98 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { | 99 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
99 | tmp_opt.tstamp_ok = 1; | 100 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
100 | tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); | ||
101 | 101 | ||
102 | if (tmp_opt.saw_tstamp) { | 102 | if (tmp_opt.saw_tstamp) { |
103 | tmp_opt.ts_recent = tcptw->tw_ts_recent; | 103 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
526 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); | 526 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
527 | int paws_reject = 0; | 527 | int paws_reject = 0; |
528 | 528 | ||
529 | if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) { | 529 | tmp_opt.saw_tstamp = 0; |
530 | tmp_opt.tstamp_ok = 1; | 530 | if (th->doff > (sizeof(struct tcphdr)>>2)) { |
531 | tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); | 531 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
532 | 532 | ||
533 | if (tmp_opt.saw_tstamp) { | 533 | if (tmp_opt.saw_tstamp) { |
534 | tmp_opt.ts_recent = req->ts_recent; | 534 | tmp_opt.ts_recent = req->ts_recent; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 93316a96d820..383ce237640f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
553 | struct tcp_md5sig_key **md5) { | 553 | struct tcp_md5sig_key **md5) { |
554 | struct tcp_sock *tp = tcp_sk(sk); | 554 | struct tcp_sock *tp = tcp_sk(sk); |
555 | struct tcp_cookie_values *cvp = tp->cookie_values; | 555 | struct tcp_cookie_values *cvp = tp->cookie_values; |
556 | struct dst_entry *dst = __sk_dst_get(sk); | ||
557 | unsigned remaining = MAX_TCP_OPTION_SPACE; | 556 | unsigned remaining = MAX_TCP_OPTION_SPACE; |
558 | u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? | 557 | u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? |
559 | tcp_cookie_size_check(cvp->cookie_desired) : | 558 | tcp_cookie_size_check(cvp->cookie_desired) : |
@@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
581 | opts->mss = tcp_advertise_mss(sk); | 580 | opts->mss = tcp_advertise_mss(sk); |
582 | remaining -= TCPOLEN_MSS_ALIGNED; | 581 | remaining -= TCPOLEN_MSS_ALIGNED; |
583 | 582 | ||
584 | if (likely(sysctl_tcp_timestamps && | 583 | if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { |
585 | !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) && | ||
586 | *md5 == NULL)) { | ||
587 | opts->options |= OPTION_TS; | 584 | opts->options |= OPTION_TS; |
588 | opts->tsval = TCP_SKB_CB(skb)->when; | 585 | opts->tsval = TCP_SKB_CB(skb)->when; |
589 | opts->tsecr = tp->rx_opt.ts_recent; | 586 | opts->tsecr = tp->rx_opt.ts_recent; |
590 | remaining -= TCPOLEN_TSTAMP_ALIGNED; | 587 | remaining -= TCPOLEN_TSTAMP_ALIGNED; |
591 | } | 588 | } |
592 | if (likely(sysctl_tcp_window_scaling && | 589 | if (likely(sysctl_tcp_window_scaling)) { |
593 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) { | ||
594 | opts->ws = tp->rx_opt.rcv_wscale; | 590 | opts->ws = tp->rx_opt.rcv_wscale; |
595 | opts->options |= OPTION_WSCALE; | 591 | opts->options |= OPTION_WSCALE; |
596 | remaining -= TCPOLEN_WSCALE_ALIGNED; | 592 | remaining -= TCPOLEN_WSCALE_ALIGNED; |
597 | } | 593 | } |
598 | if (likely(sysctl_tcp_sack && | 594 | if (likely(sysctl_tcp_sack)) { |
599 | !dst_feature(dst, RTAX_FEATURE_NO_SACK))) { | ||
600 | opts->options |= OPTION_SACK_ADVERTISE; | 595 | opts->options |= OPTION_SACK_ADVERTISE; |
601 | if (unlikely(!(OPTION_TS & opts->options))) | 596 | if (unlikely(!(OPTION_TS & opts->options))) |
602 | remaining -= TCPOLEN_SACKPERM_ALIGNED; | 597 | remaining -= TCPOLEN_SACKPERM_ALIGNED; |
@@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2527 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. | 2522 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. |
2528 | */ | 2523 | */ |
2529 | tp->tcp_header_len = sizeof(struct tcphdr) + | 2524 | tp->tcp_header_len = sizeof(struct tcphdr) + |
2530 | (sysctl_tcp_timestamps && | 2525 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); |
2531 | (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ? | ||
2532 | TCPOLEN_TSTAMP_ALIGNED : 0)); | ||
2533 | 2526 | ||
2534 | #ifdef CONFIG_TCP_MD5SIG | 2527 | #ifdef CONFIG_TCP_MD5SIG |
2535 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) | 2528 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) |
@@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2555 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), | 2548 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), |
2556 | &tp->rcv_wnd, | 2549 | &tp->rcv_wnd, |
2557 | &tp->window_clamp, | 2550 | &tp->window_clamp, |
2558 | (sysctl_tcp_window_scaling && | 2551 | sysctl_tcp_window_scaling, |
2559 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)), | ||
2560 | &rcv_wscale); | 2552 | &rcv_wscale); |
2561 | 2553 | ||
2562 | tp->rx_opt.rcv_wscale = rcv_wscale; | 2554 | tp->rx_opt.rcv_wscale = rcv_wscale; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1f9534846ca9..f0126fdd7e04 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
216 | * force rand to be an odd multiple of UDP_HTABLE_SIZE | 216 | * force rand to be an odd multiple of UDP_HTABLE_SIZE |
217 | */ | 217 | */ |
218 | rand = (rand | 1) * (udptable->mask + 1); | 218 | rand = (rand | 1) * (udptable->mask + 1); |
219 | for (last = first + udptable->mask + 1; | 219 | last = first + udptable->mask + 1; |
220 | first != last; | 220 | do { |
221 | first++) { | ||
222 | hslot = udp_hashslot(udptable, net, first); | 221 | hslot = udp_hashslot(udptable, net, first); |
223 | bitmap_zero(bitmap, PORTS_PER_CHAIN); | 222 | bitmap_zero(bitmap, PORTS_PER_CHAIN); |
224 | spin_lock_bh(&hslot->lock); | 223 | spin_lock_bh(&hslot->lock); |
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
238 | snum += rand; | 237 | snum += rand; |
239 | } while (snum != first); | 238 | } while (snum != first); |
240 | spin_unlock_bh(&hslot->lock); | 239 | spin_unlock_bh(&hslot->lock); |
241 | } | 240 | } while (++first != last); |
242 | goto fail; | 241 | goto fail; |
243 | } else { | 242 | } else { |
244 | hslot = udp_hashslot(udptable, net, snum); | 243 | hslot = udp_hashslot(udptable, net, snum); |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 5f2ec208a8c3..0956ebabbff2 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <net/ipv6.h> | 20 | #include <net/ipv6.h> |
21 | #include <net/inet_frag.h> | 21 | #include <net/inet_frag.h> |
22 | 22 | ||
23 | #include <linux/netfilter_bridge.h> | ||
23 | #include <linux/netfilter_ipv6.h> | 24 | #include <linux/netfilter_ipv6.h> |
24 | #include <net/netfilter/nf_conntrack.h> | 25 | #include <net/netfilter/nf_conntrack.h> |
25 | #include <net/netfilter/nf_conntrack_helper.h> | 26 | #include <net/netfilter/nf_conntrack_helper.h> |
@@ -187,6 +188,21 @@ out: | |||
187 | return nf_conntrack_confirm(skb); | 188 | return nf_conntrack_confirm(skb); |
188 | } | 189 | } |
189 | 190 | ||
191 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | ||
192 | struct sk_buff *skb) | ||
193 | { | ||
194 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
195 | if (skb->nf_bridge && | ||
196 | skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) | ||
197 | return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; | ||
198 | #endif | ||
199 | if (hooknum == NF_INET_PRE_ROUTING) | ||
200 | return IP6_DEFRAG_CONNTRACK_IN; | ||
201 | else | ||
202 | return IP6_DEFRAG_CONNTRACK_OUT; | ||
203 | |||
204 | } | ||
205 | |||
190 | static unsigned int ipv6_defrag(unsigned int hooknum, | 206 | static unsigned int ipv6_defrag(unsigned int hooknum, |
191 | struct sk_buff *skb, | 207 | struct sk_buff *skb, |
192 | const struct net_device *in, | 208 | const struct net_device *in, |
@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
199 | if (skb->nfct) | 215 | if (skb->nfct) |
200 | return NF_ACCEPT; | 216 | return NF_ACCEPT; |
201 | 217 | ||
202 | reasm = nf_ct_frag6_gather(skb); | 218 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); |
203 | |||
204 | /* queued */ | 219 | /* queued */ |
205 | if (reasm == NULL) | 220 | if (reasm == NULL) |
206 | return NF_STOLEN; | 221 | return NF_STOLEN; |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index e0b9424fa1b2..312c20adc83f 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -168,13 +168,14 @@ out: | |||
168 | /* Creation primitives. */ | 168 | /* Creation primitives. */ |
169 | 169 | ||
170 | static __inline__ struct nf_ct_frag6_queue * | 170 | static __inline__ struct nf_ct_frag6_queue * |
171 | fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) | 171 | fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst) |
172 | { | 172 | { |
173 | struct inet_frag_queue *q; | 173 | struct inet_frag_queue *q; |
174 | struct ip6_create_arg arg; | 174 | struct ip6_create_arg arg; |
175 | unsigned int hash; | 175 | unsigned int hash; |
176 | 176 | ||
177 | arg.id = id; | 177 | arg.id = id; |
178 | arg.user = user; | ||
178 | arg.src = src; | 179 | arg.src = src; |
179 | arg.dst = dst; | 180 | arg.dst = dst; |
180 | 181 | ||
@@ -559,7 +560,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) | |||
559 | return 0; | 560 | return 0; |
560 | } | 561 | } |
561 | 562 | ||
562 | struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | 563 | struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) |
563 | { | 564 | { |
564 | struct sk_buff *clone; | 565 | struct sk_buff *clone; |
565 | struct net_device *dev = skb->dev; | 566 | struct net_device *dev = skb->dev; |
@@ -605,7 +606,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | |||
605 | if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) | 606 | if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) |
606 | nf_ct_frag6_evictor(); | 607 | nf_ct_frag6_evictor(); |
607 | 608 | ||
608 | fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); | 609 | fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr); |
609 | if (fq == NULL) { | 610 | if (fq == NULL) { |
610 | pr_debug("Can't find and can't create new queue\n"); | 611 | pr_debug("Can't find and can't create new queue\n"); |
611 | goto ret_orig; | 612 | goto ret_orig; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 4d98549a6868..2cddea3bd6be 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -72,6 +72,7 @@ struct frag_queue | |||
72 | struct inet_frag_queue q; | 72 | struct inet_frag_queue q; |
73 | 73 | ||
74 | __be32 id; /* fragment id */ | 74 | __be32 id; /* fragment id */ |
75 | u32 user; | ||
75 | struct in6_addr saddr; | 76 | struct in6_addr saddr; |
76 | struct in6_addr daddr; | 77 | struct in6_addr daddr; |
77 | 78 | ||
@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) | |||
141 | struct ip6_create_arg *arg = a; | 142 | struct ip6_create_arg *arg = a; |
142 | 143 | ||
143 | fq = container_of(q, struct frag_queue, q); | 144 | fq = container_of(q, struct frag_queue, q); |
144 | return (fq->id == arg->id && | 145 | return (fq->id == arg->id && fq->user == arg->user && |
145 | ipv6_addr_equal(&fq->saddr, arg->src) && | 146 | ipv6_addr_equal(&fq->saddr, arg->src) && |
146 | ipv6_addr_equal(&fq->daddr, arg->dst)); | 147 | ipv6_addr_equal(&fq->daddr, arg->dst)); |
147 | } | 148 | } |
@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a) | |||
163 | struct ip6_create_arg *arg = a; | 164 | struct ip6_create_arg *arg = a; |
164 | 165 | ||
165 | fq->id = arg->id; | 166 | fq->id = arg->id; |
167 | fq->user = arg->user; | ||
166 | ipv6_addr_copy(&fq->saddr, arg->src); | 168 | ipv6_addr_copy(&fq->saddr, arg->src); |
167 | ipv6_addr_copy(&fq->daddr, arg->dst); | 169 | ipv6_addr_copy(&fq->daddr, arg->dst); |
168 | } | 170 | } |
@@ -243,6 +245,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
243 | unsigned int hash; | 245 | unsigned int hash; |
244 | 246 | ||
245 | arg.id = id; | 247 | arg.id = id; |
248 | arg.user = IP6_DEFRAG_LOCAL_DELIVER; | ||
246 | arg.src = src; | 249 | arg.src = src; |
247 | arg.dst = dst; | 250 | arg.dst = dst; |
248 | 251 | ||
@@ -705,7 +708,8 @@ static void ip6_frags_ns_sysctl_unregister(struct net *net) | |||
705 | 708 | ||
706 | table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; | 709 | table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; |
707 | unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); | 710 | unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); |
708 | kfree(table); | 711 | if (!net_eq(net, &init_net)) |
712 | kfree(table); | ||
709 | } | 713 | } |
710 | 714 | ||
711 | static struct ctl_table_header *ip6_ctl_header; | 715 | static struct ctl_table_header *ip6_ctl_header; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index db3b27303890..c2bd74c5f8d9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2630,6 +2630,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net) | |||
2630 | table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; | 2630 | table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; |
2631 | table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; | 2631 | table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; |
2632 | table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; | 2632 | table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; |
2633 | table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; | ||
2633 | } | 2634 | } |
2634 | 2635 | ||
2635 | return table; | 2636 | return table; |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 5b9af508b8f2..7208a06576c6 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
185 | 185 | ||
186 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); | 186 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
187 | 187 | ||
188 | /* check for timestamp cookie support */ | ||
189 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
190 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0); | ||
191 | |||
192 | if (tcp_opt.saw_tstamp) | ||
193 | cookie_check_timestamp(&tcp_opt); | ||
194 | |||
188 | ret = NULL; | 195 | ret = NULL; |
189 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); | 196 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); |
190 | if (!req) | 197 | if (!req) |
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
218 | req->expires = 0UL; | 225 | req->expires = 0UL; |
219 | req->retrans = 0; | 226 | req->retrans = 0; |
220 | ireq->ecn_ok = 0; | 227 | ireq->ecn_ok = 0; |
228 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
229 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
230 | ireq->sack_ok = tcp_opt.sack_ok; | ||
231 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
232 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
233 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
221 | treq->rcv_isn = ntohl(th->seq) - 1; | 234 | treq->rcv_isn = ntohl(th->seq) - 1; |
222 | treq->snt_isn = cookie; | 235 | treq->snt_isn = cookie; |
223 | 236 | ||
@@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
253 | goto out_free; | 266 | goto out_free; |
254 | } | 267 | } |
255 | 268 | ||
256 | /* check for timestamp cookie support */ | ||
257 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
258 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst); | ||
259 | |||
260 | if (tcp_opt.saw_tstamp) | ||
261 | cookie_check_timestamp(&tcp_opt); | ||
262 | |||
263 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
264 | |||
265 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
266 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
267 | ireq->sack_ok = tcp_opt.sack_ok; | ||
268 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
269 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
270 | |||
271 | req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); | 269 | req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); |
272 | tcp_select_initial_window(tcp_full_space(sk), req->mss, | 270 | tcp_select_initial_window(tcp_full_space(sk), req->mss, |
273 | &req->rcv_wnd, &req->window_clamp, | 271 | &req->rcv_wnd, &req->window_clamp, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ee9cf62458d4..febfd595a40d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1169 | struct inet6_request_sock *treq; | 1169 | struct inet6_request_sock *treq; |
1170 | struct ipv6_pinfo *np = inet6_sk(sk); | 1170 | struct ipv6_pinfo *np = inet6_sk(sk); |
1171 | struct tcp_sock *tp = tcp_sk(sk); | 1171 | struct tcp_sock *tp = tcp_sk(sk); |
1172 | struct dst_entry *dst = __sk_dst_get(sk); | ||
1173 | __u32 isn = TCP_SKB_CB(skb)->when; | 1172 | __u32 isn = TCP_SKB_CB(skb)->when; |
1174 | #ifdef CONFIG_SYN_COOKIES | 1173 | #ifdef CONFIG_SYN_COOKIES |
1175 | int want_cookie = 0; | 1174 | int want_cookie = 0; |
@@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1208 | tcp_clear_options(&tmp_opt); | 1207 | tcp_clear_options(&tmp_opt); |
1209 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 1208 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
1210 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1209 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
1211 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); | 1210 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
1212 | 1211 | ||
1213 | if (tmp_opt.cookie_plus > 0 && | 1212 | if (tmp_opt.cookie_plus > 0 && |
1214 | tmp_opt.saw_tstamp && | 1213 | tmp_opt.saw_tstamp && |
diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 356e65b1dc42..783c5f367d29 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c | |||
@@ -450,10 +450,10 @@ void irlap_disconnect_request(struct irlap_cb *self) | |||
450 | 450 | ||
451 | /* Check if we are in the right state for disconnecting */ | 451 | /* Check if we are in the right state for disconnecting */ |
452 | switch (self->state) { | 452 | switch (self->state) { |
453 | case LAP_XMIT_P: /* FALLTROUGH */ | 453 | case LAP_XMIT_P: /* FALLTHROUGH */ |
454 | case LAP_XMIT_S: /* FALLTROUGH */ | 454 | case LAP_XMIT_S: /* FALLTHROUGH */ |
455 | case LAP_CONN: /* FALLTROUGH */ | 455 | case LAP_CONN: /* FALLTHROUGH */ |
456 | case LAP_RESET_WAIT: /* FALLTROUGH */ | 456 | case LAP_RESET_WAIT: /* FALLTHROUGH */ |
457 | case LAP_RESET_CHECK: | 457 | case LAP_RESET_CHECK: |
458 | irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); | 458 | irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); |
459 | break; | 459 | break; |
@@ -485,9 +485,9 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) | |||
485 | IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); | 485 | IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); |
486 | irlap_do_event(self, RESET_REQUEST, NULL, NULL); | 486 | irlap_do_event(self, RESET_REQUEST, NULL, NULL); |
487 | break; | 487 | break; |
488 | case LAP_NO_RESPONSE: /* FALLTROUGH */ | 488 | case LAP_NO_RESPONSE: /* FALLTHROUGH */ |
489 | case LAP_DISC_INDICATION: /* FALLTROUGH */ | 489 | case LAP_DISC_INDICATION: /* FALLTHROUGH */ |
490 | case LAP_FOUND_NONE: /* FALLTROUGH */ | 490 | case LAP_FOUND_NONE: /* FALLTHROUGH */ |
491 | case LAP_MEDIA_BUSY: | 491 | case LAP_MEDIA_BUSY: |
492 | irlmp_link_disconnect_indication(self->notify.instance, self, | 492 | irlmp_link_disconnect_indication(self->notify.instance, self, |
493 | reason, NULL); | 493 | reason, NULL); |
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index c5c51959e3ce..94a9884d7146 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c | |||
@@ -1741,7 +1741,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, | |||
1741 | * Function irlap_state_xmit_s (event, skb, info) | 1741 | * Function irlap_state_xmit_s (event, skb, info) |
1742 | * | 1742 | * |
1743 | * XMIT_S, The secondary station has been given the right to transmit, | 1743 | * XMIT_S, The secondary station has been given the right to transmit, |
1744 | * and we therefor do not expect to receive any transmissions from other | 1744 | * and we therefore do not expect to receive any transmissions from other |
1745 | * stations. | 1745 | * stations. |
1746 | */ | 1746 | */ |
1747 | static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, | 1747 | static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, |
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 7bf5b913828b..0e7d8bde145d 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c | |||
@@ -105,7 +105,7 @@ int __init irlmp_init(void) | |||
105 | 105 | ||
106 | init_timer(&irlmp->discovery_timer); | 106 | init_timer(&irlmp->discovery_timer); |
107 | 107 | ||
108 | /* Do discovery every 3 seconds, conditionaly */ | 108 | /* Do discovery every 3 seconds, conditionally */ |
109 | if (sysctl_discovery) | 109 | if (sysctl_discovery) |
110 | irlmp_start_discovery_timer(irlmp, | 110 | irlmp_start_discovery_timer(irlmp, |
111 | sysctl_discovery_timeout*HZ); | 111 | sysctl_discovery_timeout*HZ); |
@@ -1842,7 +1842,7 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason) | |||
1842 | reason = LM_CONNECT_FAILURE; | 1842 | reason = LM_CONNECT_FAILURE; |
1843 | break; | 1843 | break; |
1844 | default: | 1844 | default: |
1845 | IRDA_DEBUG(1, "%s(), Unknow IrLAP disconnect reason %d!\n", | 1845 | IRDA_DEBUG(1, "%s(), Unknown IrLAP disconnect reason %d!\n", |
1846 | __func__, lap_reason); | 1846 | __func__, lap_reason); |
1847 | reason = LM_LAP_DISCONNECT; | 1847 | reason = LM_LAP_DISCONNECT; |
1848 | break; | 1848 | break; |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 84209fbbeb17..76fa6fef6473 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1193,6 +1193,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
1193 | x->aalg->alg_key_len = key->sadb_key_bits; | 1193 | x->aalg->alg_key_len = key->sadb_key_bits; |
1194 | memcpy(x->aalg->alg_key, key+1, keysize); | 1194 | memcpy(x->aalg->alg_key, key+1, keysize); |
1195 | } | 1195 | } |
1196 | x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits; | ||
1196 | x->props.aalgo = sa->sadb_sa_auth; | 1197 | x->props.aalgo = sa->sadb_sa_auth; |
1197 | /* x->algo.flags = sa->sadb_sa_flags; */ | 1198 | /* x->algo.flags = sa->sadb_sa_flags; */ |
1198 | } | 1199 | } |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index a8da23905c70..0192cfdacae4 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -244,7 +244,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data | |||
244 | * @addr: destination address of the path (ETH_ALEN length) | 244 | * @addr: destination address of the path (ETH_ALEN length) |
245 | * @sdata: local subif | 245 | * @sdata: local subif |
246 | * | 246 | * |
247 | * Returns: 0 on sucess | 247 | * Returns: 0 on success |
248 | * | 248 | * |
249 | * State: the initial state of the new path is set to 0 | 249 | * State: the initial state of the new path is set to 0 |
250 | */ | 250 | */ |
@@ -532,7 +532,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) | |||
532 | * @addr: dst address (ETH_ALEN length) | 532 | * @addr: dst address (ETH_ALEN length) |
533 | * @sdata: local subif | 533 | * @sdata: local subif |
534 | * | 534 | * |
535 | * Returns: 0 if succesful | 535 | * Returns: 0 if successful |
536 | */ | 536 | */ |
537 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | 537 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
538 | { | 538 | { |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index b95699f00545..847ffca40184 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1366,6 +1366,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
1366 | == sysctl_ip_vs_sync_threshold[0])) || | 1366 | == sysctl_ip_vs_sync_threshold[0])) || |
1367 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && | 1367 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && |
1368 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || | 1368 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || |
1369 | (cp->state == IP_VS_TCP_S_CLOSE) || | ||
1369 | (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || | 1370 | (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || |
1370 | (cp->state == IP_VS_TCP_S_TIME_WAIT))))) | 1371 | (cp->state == IP_VS_TCP_S_TIME_WAIT))))) |
1371 | ip_vs_sync_conn(cp); | 1372 | ip_vs_sync_conn(cp); |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index e55a6861d26f..6bde12da2fe0 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -2714,6 +2714,8 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc, | |||
2714 | if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) | 2714 | if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) |
2715 | return -EINVAL; | 2715 | return -EINVAL; |
2716 | 2716 | ||
2717 | memset(usvc, 0, sizeof(*usvc)); | ||
2718 | |||
2717 | usvc->af = nla_get_u16(nla_af); | 2719 | usvc->af = nla_get_u16(nla_af); |
2718 | #ifdef CONFIG_IP_VS_IPV6 | 2720 | #ifdef CONFIG_IP_VS_IPV6 |
2719 | if (usvc->af != AF_INET && usvc->af != AF_INET6) | 2721 | if (usvc->af != AF_INET && usvc->af != AF_INET6) |
@@ -2901,6 +2903,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, | |||
2901 | if (!(nla_addr && nla_port)) | 2903 | if (!(nla_addr && nla_port)) |
2902 | return -EINVAL; | 2904 | return -EINVAL; |
2903 | 2905 | ||
2906 | memset(udest, 0, sizeof(*udest)); | ||
2907 | |||
2904 | nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); | 2908 | nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); |
2905 | udest->port = nla_get_u16(nla_port); | 2909 | udest->port = nla_get_u16(nla_port); |
2906 | 2910 | ||
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 7a10bbe02c13..c5d9f97ef217 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
@@ -682,7 +682,7 @@ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain, | |||
682 | * buckets and @skip_chain entries. For each entry in the table call | 682 | * buckets and @skip_chain entries. For each entry in the table call |
683 | * @callback, if @callback returns a negative value stop 'walking' through the | 683 | * @callback, if @callback returns a negative value stop 'walking' through the |
684 | * table and return. Updates the values in @skip_bkt and @skip_chain on | 684 | * table and return. Updates the values in @skip_bkt and @skip_chain on |
685 | * return. Returns zero on succcess, negative values on failure. | 685 | * return. Returns zero on success, negative values on failure. |
686 | * | 686 | * |
687 | */ | 687 | */ |
688 | int netlbl_domhsh_walk(u32 *skip_bkt, | 688 | int netlbl_domhsh_walk(u32 *skip_bkt, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 020562164b56..e0516a22be2e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -415,7 +415,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
415 | { | 415 | { |
416 | struct sock *sk = sock->sk; | 416 | struct sock *sk = sock->sk; |
417 | struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; | 417 | struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; |
418 | struct sk_buff *skb; | 418 | struct sk_buff *skb = NULL; |
419 | struct net_device *dev; | 419 | struct net_device *dev; |
420 | __be16 proto = 0; | 420 | __be16 proto = 0; |
421 | int err; | 421 | int err; |
@@ -437,6 +437,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
437 | */ | 437 | */ |
438 | 438 | ||
439 | saddr->spkt_device[13] = 0; | 439 | saddr->spkt_device[13] = 0; |
440 | retry: | ||
440 | rcu_read_lock(); | 441 | rcu_read_lock(); |
441 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); | 442 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); |
442 | err = -ENODEV; | 443 | err = -ENODEV; |
@@ -456,58 +457,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
456 | if (len > dev->mtu + dev->hard_header_len) | 457 | if (len > dev->mtu + dev->hard_header_len) |
457 | goto out_unlock; | 458 | goto out_unlock; |
458 | 459 | ||
459 | err = -ENOBUFS; | 460 | if (!skb) { |
460 | skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL); | 461 | size_t reserved = LL_RESERVED_SPACE(dev); |
461 | 462 | unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; | |
462 | /* | 463 | |
463 | * If the write buffer is full, then tough. At this level the user | 464 | rcu_read_unlock(); |
464 | * gets to deal with the problem - do your own algorithmic backoffs. | 465 | skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL); |
465 | * That's far more flexible. | 466 | if (skb == NULL) |
466 | */ | 467 | return -ENOBUFS; |
467 | 468 | /* FIXME: Save some space for broken drivers that write a hard | |
468 | if (skb == NULL) | 469 | * header at transmission time by themselves. PPP is the notable |
469 | goto out_unlock; | 470 | * one here. This should really be fixed at the driver level. |
470 | 471 | */ | |
471 | /* | 472 | skb_reserve(skb, reserved); |
472 | * Fill it in | 473 | skb_reset_network_header(skb); |
473 | */ | 474 | |
474 | 475 | /* Try to align data part correctly */ | |
475 | /* FIXME: Save some space for broken drivers that write a | 476 | if (hhlen) { |
476 | * hard header at transmission time by themselves. PPP is the | 477 | skb->data -= hhlen; |
477 | * notable one here. This should really be fixed at the driver level. | 478 | skb->tail -= hhlen; |
478 | */ | 479 | if (len < hhlen) |
479 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 480 | skb_reset_network_header(skb); |
480 | skb_reset_network_header(skb); | 481 | } |
481 | 482 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | |
482 | /* Try to align data part correctly */ | 483 | if (err) |
483 | if (dev->header_ops) { | 484 | goto out_free; |
484 | skb->data -= dev->hard_header_len; | 485 | goto retry; |
485 | skb->tail -= dev->hard_header_len; | ||
486 | if (len < dev->hard_header_len) | ||
487 | skb_reset_network_header(skb); | ||
488 | } | 486 | } |
489 | 487 | ||
490 | /* Returns -EFAULT on error */ | 488 | |
491 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | ||
492 | skb->protocol = proto; | 489 | skb->protocol = proto; |
493 | skb->dev = dev; | 490 | skb->dev = dev; |
494 | skb->priority = sk->sk_priority; | 491 | skb->priority = sk->sk_priority; |
495 | skb->mark = sk->sk_mark; | 492 | skb->mark = sk->sk_mark; |
496 | if (err) | ||
497 | goto out_free; | ||
498 | |||
499 | /* | ||
500 | * Now send it | ||
501 | */ | ||
502 | 493 | ||
503 | dev_queue_xmit(skb); | 494 | dev_queue_xmit(skb); |
504 | rcu_read_unlock(); | 495 | rcu_read_unlock(); |
505 | return len; | 496 | return len; |
506 | 497 | ||
507 | out_free: | ||
508 | kfree_skb(skb); | ||
509 | out_unlock: | 498 | out_unlock: |
510 | rcu_read_unlock(); | 499 | rcu_read_unlock(); |
500 | out_free: | ||
501 | kfree_skb(skb); | ||
511 | return err; | 502 | return err; |
512 | } | 503 | } |
513 | 504 | ||
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 2a740035aa6b..64f5e328cee9 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -598,7 +598,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, | |||
598 | goto errout; | 598 | goto errout; |
599 | 599 | ||
600 | /* compat_mode being true specifies a call that is supposed | 600 | /* compat_mode being true specifies a call that is supposed |
601 | * to add additional backward compatiblity statistic TLVs. | 601 | * to add additional backward compatibility statistic TLVs. |
602 | */ | 602 | */ |
603 | if (compat_mode) { | 603 | if (compat_mode) { |
604 | if (a->type == TCA_OLD_COMPAT) | 604 | if (a->type == TCA_OLD_COMPAT) |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d771cc1b777a..4e4ca65cd320 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -717,7 +717,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, | |||
717 | 717 | ||
718 | if (sctp_style(sk, TCP)) { | 718 | if (sctp_style(sk, TCP)) { |
719 | /* Change the sk->sk_state of a TCP-style socket that has | 719 | /* Change the sk->sk_state of a TCP-style socket that has |
720 | * sucessfully completed a connect() call. | 720 | * successfully completed a connect() call. |
721 | */ | 721 | */ |
722 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) | 722 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) |
723 | sk->sk_state = SCTP_SS_ESTABLISHED; | 723 | sk->sk_state = SCTP_SS_ESTABLISHED; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 1ef9de9bbae9..47bc20d3a85b 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -3577,7 +3577,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3577 | * To do this properly, we'll set the destination address of the chunk | 3577 | * To do this properly, we'll set the destination address of the chunk |
3578 | * and at the transmit time, will try look up the transport to use. | 3578 | * and at the transmit time, will try look up the transport to use. |
3579 | * Since ASCONFs may be bundled, the correct transport may not be | 3579 | * Since ASCONFs may be bundled, the correct transport may not be |
3580 | * created untill we process the entire packet, thus this workaround. | 3580 | * created until we process the entire packet, thus this workaround. |
3581 | */ | 3581 | */ |
3582 | asconf_ack->dest = chunk->source; | 3582 | asconf_ack->dest = chunk->source; |
3583 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); | 3583 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index f11be72a1a80..b15e1ebb2bfa 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -54,7 +54,7 @@ | |||
54 | * Assumptions: | 54 | * Assumptions: |
55 | * - head[0] is physically contiguous. | 55 | * - head[0] is physically contiguous. |
56 | * - tail[0] is physically contiguous. | 56 | * - tail[0] is physically contiguous. |
57 | * - pages[] is not physically or virtually contigous and consists of | 57 | * - pages[] is not physically or virtually contiguous and consists of |
58 | * PAGE_SIZE elements. | 58 | * PAGE_SIZE elements. |
59 | * | 59 | * |
60 | * Output: | 60 | * Output: |
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c index ca269178c4d4..35f370091f4f 100644 --- a/net/wimax/op-reset.c +++ b/net/wimax/op-reset.c | |||
@@ -62,7 +62,7 @@ | |||
62 | * Called when wanting to reset the device for any reason. Device is | 62 | * Called when wanting to reset the device for any reason. Device is |
63 | * taken back to power on status. | 63 | * taken back to power on status. |
64 | * | 64 | * |
65 | * This call blocks; on succesful return, the device has completed the | 65 | * This call blocks; on successful return, the device has completed the |
66 | * reset process and is ready to operate. | 66 | * reset process and is ready to operate. |
67 | */ | 67 | */ |
68 | int wimax_reset(struct wimax_dev *wimax_dev) | 68 | int wimax_reset(struct wimax_dev *wimax_dev) |