diff options
Diffstat (limited to 'net')
53 files changed, 342 insertions, 249 deletions
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index a081ce1c0514..cebaae7e148b 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h | |||
@@ -20,8 +20,8 @@ | |||
20 | #ifndef _NET_BATMAN_ADV_BITARRAY_H_ | 20 | #ifndef _NET_BATMAN_ADV_BITARRAY_H_ |
21 | #define _NET_BATMAN_ADV_BITARRAY_H_ | 21 | #define _NET_BATMAN_ADV_BITARRAY_H_ |
22 | 22 | ||
23 | /* returns true if the corresponding bit in the given seq_bits indicates true | 23 | /* Returns 1 if the corresponding bit in the given seq_bits indicates true |
24 | * and curr_seqno is within range of last_seqno | 24 | * and curr_seqno is within range of last_seqno. Otherwise returns 0. |
25 | */ | 25 | */ |
26 | static inline int batadv_test_bit(const unsigned long *seq_bits, | 26 | static inline int batadv_test_bit(const unsigned long *seq_bits, |
27 | uint32_t last_seqno, uint32_t curr_seqno) | 27 | uint32_t last_seqno, uint32_t curr_seqno) |
@@ -32,7 +32,7 @@ static inline int batadv_test_bit(const unsigned long *seq_bits, | |||
32 | if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE) | 32 | if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE) |
33 | return 0; | 33 | return 0; |
34 | else | 34 | else |
35 | return test_bit(diff, seq_bits); | 35 | return test_bit(diff, seq_bits) != 0; |
36 | } | 36 | } |
37 | 37 | ||
38 | /* turn corresponding bit on, so we can remember that we got the packet */ | 38 | /* turn corresponding bit on, so we can remember that we got the packet */ |
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 5e5f5b410e0b..1eaacf10d19d 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -58,7 +58,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
58 | switch (cmd) { | 58 | switch (cmd) { |
59 | case BNEPCONNADD: | 59 | case BNEPCONNADD: |
60 | if (!capable(CAP_NET_ADMIN)) | 60 | if (!capable(CAP_NET_ADMIN)) |
61 | return -EACCES; | 61 | return -EPERM; |
62 | 62 | ||
63 | if (copy_from_user(&ca, argp, sizeof(ca))) | 63 | if (copy_from_user(&ca, argp, sizeof(ca))) |
64 | return -EFAULT; | 64 | return -EFAULT; |
@@ -84,7 +84,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
84 | 84 | ||
85 | case BNEPCONNDEL: | 85 | case BNEPCONNDEL: |
86 | if (!capable(CAP_NET_ADMIN)) | 86 | if (!capable(CAP_NET_ADMIN)) |
87 | return -EACCES; | 87 | return -EPERM; |
88 | 88 | ||
89 | if (copy_from_user(&cd, argp, sizeof(cd))) | 89 | if (copy_from_user(&cd, argp, sizeof(cd))) |
90 | return -EFAULT; | 90 | return -EFAULT; |
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index 311668d14571..32dc83dcb6b2 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c | |||
@@ -72,7 +72,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
72 | switch (cmd) { | 72 | switch (cmd) { |
73 | case CMTPCONNADD: | 73 | case CMTPCONNADD: |
74 | if (!capable(CAP_NET_ADMIN)) | 74 | if (!capable(CAP_NET_ADMIN)) |
75 | return -EACCES; | 75 | return -EPERM; |
76 | 76 | ||
77 | if (copy_from_user(&ca, argp, sizeof(ca))) | 77 | if (copy_from_user(&ca, argp, sizeof(ca))) |
78 | return -EFAULT; | 78 | return -EFAULT; |
@@ -97,7 +97,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
97 | 97 | ||
98 | case CMTPCONNDEL: | 98 | case CMTPCONNDEL: |
99 | if (!capable(CAP_NET_ADMIN)) | 99 | if (!capable(CAP_NET_ADMIN)) |
100 | return -EACCES; | 100 | return -EPERM; |
101 | 101 | ||
102 | if (copy_from_user(&cd, argp, sizeof(cd))) | 102 | if (copy_from_user(&cd, argp, sizeof(cd))) |
103 | return -EFAULT; | 103 | return -EFAULT; |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5ad7da217474..3c094e78dde9 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <net/bluetooth/bluetooth.h> | 29 | #include <net/bluetooth/bluetooth.h> |
30 | #include <net/bluetooth/hci_core.h> | 30 | #include <net/bluetooth/hci_core.h> |
31 | #include <net/bluetooth/a2mp.h> | 31 | #include <net/bluetooth/a2mp.h> |
32 | #include <net/bluetooth/smp.h> | ||
32 | 33 | ||
33 | static void hci_le_connect(struct hci_conn *conn) | 34 | static void hci_le_connect(struct hci_conn *conn) |
34 | { | 35 | { |
@@ -619,6 +620,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
619 | { | 620 | { |
620 | BT_DBG("hcon %p", conn); | 621 | BT_DBG("hcon %p", conn); |
621 | 622 | ||
623 | if (conn->type == LE_LINK) | ||
624 | return smp_conn_security(conn, sec_level); | ||
625 | |||
622 | /* For sdp we don't need the link key. */ | 626 | /* For sdp we don't need the link key. */ |
623 | if (sec_level == BT_SECURITY_SDP) | 627 | if (sec_level == BT_SECURITY_SDP) |
624 | return 1; | 628 | return 1; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 19fdac78e555..d5ace1eda3ed 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -490,7 +490,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, | |||
490 | switch (cmd) { | 490 | switch (cmd) { |
491 | case HCISETRAW: | 491 | case HCISETRAW: |
492 | if (!capable(CAP_NET_ADMIN)) | 492 | if (!capable(CAP_NET_ADMIN)) |
493 | return -EACCES; | 493 | return -EPERM; |
494 | 494 | ||
495 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) | 495 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) |
496 | return -EPERM; | 496 | return -EPERM; |
@@ -510,12 +510,12 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, | |||
510 | 510 | ||
511 | case HCIBLOCKADDR: | 511 | case HCIBLOCKADDR: |
512 | if (!capable(CAP_NET_ADMIN)) | 512 | if (!capable(CAP_NET_ADMIN)) |
513 | return -EACCES; | 513 | return -EPERM; |
514 | return hci_sock_blacklist_add(hdev, (void __user *) arg); | 514 | return hci_sock_blacklist_add(hdev, (void __user *) arg); |
515 | 515 | ||
516 | case HCIUNBLOCKADDR: | 516 | case HCIUNBLOCKADDR: |
517 | if (!capable(CAP_NET_ADMIN)) | 517 | if (!capable(CAP_NET_ADMIN)) |
518 | return -EACCES; | 518 | return -EPERM; |
519 | return hci_sock_blacklist_del(hdev, (void __user *) arg); | 519 | return hci_sock_blacklist_del(hdev, (void __user *) arg); |
520 | 520 | ||
521 | default: | 521 | default: |
@@ -546,22 +546,22 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, | |||
546 | 546 | ||
547 | case HCIDEVUP: | 547 | case HCIDEVUP: |
548 | if (!capable(CAP_NET_ADMIN)) | 548 | if (!capable(CAP_NET_ADMIN)) |
549 | return -EACCES; | 549 | return -EPERM; |
550 | return hci_dev_open(arg); | 550 | return hci_dev_open(arg); |
551 | 551 | ||
552 | case HCIDEVDOWN: | 552 | case HCIDEVDOWN: |
553 | if (!capable(CAP_NET_ADMIN)) | 553 | if (!capable(CAP_NET_ADMIN)) |
554 | return -EACCES; | 554 | return -EPERM; |
555 | return hci_dev_close(arg); | 555 | return hci_dev_close(arg); |
556 | 556 | ||
557 | case HCIDEVRESET: | 557 | case HCIDEVRESET: |
558 | if (!capable(CAP_NET_ADMIN)) | 558 | if (!capable(CAP_NET_ADMIN)) |
559 | return -EACCES; | 559 | return -EPERM; |
560 | return hci_dev_reset(arg); | 560 | return hci_dev_reset(arg); |
561 | 561 | ||
562 | case HCIDEVRESTAT: | 562 | case HCIDEVRESTAT: |
563 | if (!capable(CAP_NET_ADMIN)) | 563 | if (!capable(CAP_NET_ADMIN)) |
564 | return -EACCES; | 564 | return -EPERM; |
565 | return hci_dev_reset_stat(arg); | 565 | return hci_dev_reset_stat(arg); |
566 | 566 | ||
567 | case HCISETSCAN: | 567 | case HCISETSCAN: |
@@ -573,7 +573,7 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, | |||
573 | case HCISETACLMTU: | 573 | case HCISETACLMTU: |
574 | case HCISETSCOMTU: | 574 | case HCISETSCOMTU: |
575 | if (!capable(CAP_NET_ADMIN)) | 575 | if (!capable(CAP_NET_ADMIN)) |
576 | return -EACCES; | 576 | return -EPERM; |
577 | return hci_dev_cmd(cmd, argp); | 577 | return hci_dev_cmd(cmd, argp); |
578 | 578 | ||
579 | case HCIINQUIRY: | 579 | case HCIINQUIRY: |
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 18b3f6892a36..b24fb3bd8625 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c | |||
@@ -56,7 +56,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
56 | switch (cmd) { | 56 | switch (cmd) { |
57 | case HIDPCONNADD: | 57 | case HIDPCONNADD: |
58 | if (!capable(CAP_NET_ADMIN)) | 58 | if (!capable(CAP_NET_ADMIN)) |
59 | return -EACCES; | 59 | return -EPERM; |
60 | 60 | ||
61 | if (copy_from_user(&ca, argp, sizeof(ca))) | 61 | if (copy_from_user(&ca, argp, sizeof(ca))) |
62 | return -EFAULT; | 62 | return -EFAULT; |
@@ -91,7 +91,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
91 | 91 | ||
92 | case HIDPCONNDEL: | 92 | case HIDPCONNDEL: |
93 | if (!capable(CAP_NET_ADMIN)) | 93 | if (!capable(CAP_NET_ADMIN)) |
94 | return -EACCES; | 94 | return -EPERM; |
95 | 95 | ||
96 | if (copy_from_user(&cd, argp, sizeof(cd))) | 96 | if (copy_from_user(&cd, argp, sizeof(cd))) |
97 | return -EFAULT; | 97 | return -EFAULT; |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index daa149b7003c..4ea1710a4783 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -1199,14 +1199,15 @@ clean: | |||
1199 | static void l2cap_conn_ready(struct l2cap_conn *conn) | 1199 | static void l2cap_conn_ready(struct l2cap_conn *conn) |
1200 | { | 1200 | { |
1201 | struct l2cap_chan *chan; | 1201 | struct l2cap_chan *chan; |
1202 | struct hci_conn *hcon = conn->hcon; | ||
1202 | 1203 | ||
1203 | BT_DBG("conn %p", conn); | 1204 | BT_DBG("conn %p", conn); |
1204 | 1205 | ||
1205 | if (!conn->hcon->out && conn->hcon->type == LE_LINK) | 1206 | if (!hcon->out && hcon->type == LE_LINK) |
1206 | l2cap_le_conn_ready(conn); | 1207 | l2cap_le_conn_ready(conn); |
1207 | 1208 | ||
1208 | if (conn->hcon->out && conn->hcon->type == LE_LINK) | 1209 | if (hcon->out && hcon->type == LE_LINK) |
1209 | smp_conn_security(conn, conn->hcon->pending_sec_level); | 1210 | smp_conn_security(hcon, hcon->pending_sec_level); |
1210 | 1211 | ||
1211 | mutex_lock(&conn->chan_lock); | 1212 | mutex_lock(&conn->chan_lock); |
1212 | 1213 | ||
@@ -1219,8 +1220,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) | |||
1219 | continue; | 1220 | continue; |
1220 | } | 1221 | } |
1221 | 1222 | ||
1222 | if (conn->hcon->type == LE_LINK) { | 1223 | if (hcon->type == LE_LINK) { |
1223 | if (smp_conn_security(conn, chan->sec_level)) | 1224 | if (smp_conn_security(hcon, chan->sec_level)) |
1224 | l2cap_chan_ready(chan); | 1225 | l2cap_chan_ready(chan); |
1225 | 1226 | ||
1226 | } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { | 1227 | } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { |
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 1497edd191a2..34bbe1c5e389 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
@@ -616,7 +616,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
616 | break; | 616 | break; |
617 | } | 617 | } |
618 | 618 | ||
619 | if (smp_conn_security(conn, sec.level)) | 619 | if (smp_conn_security(conn->hcon, sec.level)) |
620 | break; | 620 | break; |
621 | sk->sk_state = BT_CONFIG; | 621 | sk->sk_state = BT_CONFIG; |
622 | chan->state = BT_CONFIG; | 622 | chan->state = BT_CONFIG; |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 901a616c8083..8c225ef349cd 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -267,10 +267,10 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) | |||
267 | mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, | 267 | mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, |
268 | hcon->dst_type, reason); | 268 | hcon->dst_type, reason); |
269 | 269 | ||
270 | if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { | 270 | cancel_delayed_work_sync(&conn->security_timer); |
271 | cancel_delayed_work_sync(&conn->security_timer); | 271 | |
272 | if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) | ||
272 | smp_chan_destroy(conn); | 273 | smp_chan_destroy(conn); |
273 | } | ||
274 | } | 274 | } |
275 | 275 | ||
276 | #define JUST_WORKS 0x00 | 276 | #define JUST_WORKS 0x00 |
@@ -760,9 +760,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
760 | return 0; | 760 | return 0; |
761 | } | 761 | } |
762 | 762 | ||
763 | int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) | 763 | int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) |
764 | { | 764 | { |
765 | struct hci_conn *hcon = conn->hcon; | 765 | struct l2cap_conn *conn = hcon->l2cap_data; |
766 | struct smp_chan *smp = conn->smp_chan; | 766 | struct smp_chan *smp = conn->smp_chan; |
767 | __u8 authreq; | 767 | __u8 authreq; |
768 | 768 | ||
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index f88ee537fb2b..92de5e5f9db2 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -80,7 +80,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum, | |||
80 | unsigned int bitmask; | 80 | unsigned int bitmask; |
81 | 81 | ||
82 | spin_lock_bh(&ebt_log_lock); | 82 | spin_lock_bh(&ebt_log_lock); |
83 | printk("<%c>%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", | 83 | printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", |
84 | '0' + loginfo->u.log.level, prefix, | 84 | '0' + loginfo->u.log.level, prefix, |
85 | in ? in->name : "", out ? out->name : "", | 85 | in ? in->name : "", out ? out->name : "", |
86 | eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, | 86 | eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, |
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index dd485f6128e8..ba217e90765e 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c | |||
@@ -211,9 +211,10 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer, | |||
211 | void (*put)(struct cflayer *lyr)) | 211 | void (*put)(struct cflayer *lyr)) |
212 | { | 212 | { |
213 | struct cfsrvl *service; | 213 | struct cfsrvl *service; |
214 | service = container_of(adapt_layer->dn, struct cfsrvl, layer); | ||
215 | 214 | ||
216 | WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL); | 215 | if (WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL)) |
216 | return; | ||
217 | service = container_of(adapt_layer->dn, struct cfsrvl, layer); | ||
217 | service->hold = hold; | 218 | service->hold = hold; |
218 | service->put = put; | 219 | service->put = put; |
219 | } | 220 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index 83988362805e..89e33a5d4d93 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2134,7 +2134,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) | |||
2134 | static netdev_features_t harmonize_features(struct sk_buff *skb, | 2134 | static netdev_features_t harmonize_features(struct sk_buff *skb, |
2135 | __be16 protocol, netdev_features_t features) | 2135 | __be16 protocol, netdev_features_t features) |
2136 | { | 2136 | { |
2137 | if (!can_checksum_protocol(features, protocol)) { | 2137 | if (skb->ip_summed != CHECKSUM_NONE && |
2138 | !can_checksum_protocol(features, protocol)) { | ||
2138 | features &= ~NETIF_F_ALL_CSUM; | 2139 | features &= ~NETIF_F_ALL_CSUM; |
2139 | features &= ~NETIF_F_SG; | 2140 | features &= ~NETIF_F_SG; |
2140 | } else if (illegal_highdma(skb->dev, skb)) { | 2141 | } else if (illegal_highdma(skb->dev, skb)) { |
@@ -2647,15 +2648,16 @@ void __skb_get_rxhash(struct sk_buff *skb) | |||
2647 | if (!skb_flow_dissect(skb, &keys)) | 2648 | if (!skb_flow_dissect(skb, &keys)) |
2648 | return; | 2649 | return; |
2649 | 2650 | ||
2650 | if (keys.ports) { | 2651 | if (keys.ports) |
2651 | if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) | ||
2652 | swap(keys.port16[0], keys.port16[1]); | ||
2653 | skb->l4_rxhash = 1; | 2652 | skb->l4_rxhash = 1; |
2654 | } | ||
2655 | 2653 | ||
2656 | /* get a consistent hash (same value on both flow directions) */ | 2654 | /* get a consistent hash (same value on both flow directions) */ |
2657 | if ((__force u32)keys.dst < (__force u32)keys.src) | 2655 | if (((__force u32)keys.dst < (__force u32)keys.src) || |
2656 | (((__force u32)keys.dst == (__force u32)keys.src) && | ||
2657 | ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) { | ||
2658 | swap(keys.dst, keys.src); | 2658 | swap(keys.dst, keys.src); |
2659 | swap(keys.port16[0], keys.port16[1]); | ||
2660 | } | ||
2659 | 2661 | ||
2660 | hash = jhash_3words((__force u32)keys.dst, | 2662 | hash = jhash_3words((__force u32)keys.dst, |
2661 | (__force u32)keys.src, | 2663 | (__force u32)keys.src, |
@@ -3321,7 +3323,7 @@ ncls: | |||
3321 | 3323 | ||
3322 | if (pt_prev) { | 3324 | if (pt_prev) { |
3323 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) | 3325 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) |
3324 | ret = -ENOMEM; | 3326 | goto drop; |
3325 | else | 3327 | else |
3326 | ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 3328 | ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
3327 | } else { | 3329 | } else { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index cce9e53528b1..148e73d2c451 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2721,7 +2721,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2721 | /* Eth + IPh + UDPh + mpls */ | 2721 | /* Eth + IPh + UDPh + mpls */ |
2722 | datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - | 2722 | datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - |
2723 | pkt_dev->pkt_overhead; | 2723 | pkt_dev->pkt_overhead; |
2724 | if (datalen < sizeof(struct pktgen_hdr)) | 2724 | if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) |
2725 | datalen = sizeof(struct pktgen_hdr); | 2725 | datalen = sizeof(struct pktgen_hdr); |
2726 | 2726 | ||
2727 | udph->source = htons(pkt_dev->cur_udp_src); | 2727 | udph->source = htons(pkt_dev->cur_udp_src); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fe00d1208167..e33ebae519c8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3502,7 +3502,9 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, | |||
3502 | if (!skb_cloned(from)) | 3502 | if (!skb_cloned(from)) |
3503 | skb_shinfo(from)->nr_frags = 0; | 3503 | skb_shinfo(from)->nr_frags = 0; |
3504 | 3504 | ||
3505 | /* if the skb is cloned this does nothing since we set nr_frags to 0 */ | 3505 | /* if the skb is not cloned this does nothing |
3506 | * since we set nr_frags to 0. | ||
3507 | */ | ||
3506 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) | 3508 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) |
3507 | skb_frag_ref(from, i); | 3509 | skb_frag_ref(from, i); |
3508 | 3510 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 8f67ced8d6a8..305792076121 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1523,7 +1523,14 @@ EXPORT_SYMBOL(sock_rfree); | |||
1523 | 1523 | ||
1524 | void sock_edemux(struct sk_buff *skb) | 1524 | void sock_edemux(struct sk_buff *skb) |
1525 | { | 1525 | { |
1526 | sock_put(skb->sk); | 1526 | struct sock *sk = skb->sk; |
1527 | |||
1528 | #ifdef CONFIG_INET | ||
1529 | if (sk->sk_state == TCP_TIME_WAIT) | ||
1530 | inet_twsk_put(inet_twsk(sk)); | ||
1531 | else | ||
1532 | #endif | ||
1533 | sock_put(sk); | ||
1527 | } | 1534 | } |
1528 | EXPORT_SYMBOL(sock_edemux); | 1535 | EXPORT_SYMBOL(sock_edemux); |
1529 | 1536 | ||
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 77e87aff419a..47800459e4cb 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1225,7 +1225,7 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, | |||
1225 | switch (event) { | 1225 | switch (event) { |
1226 | case NETDEV_CHANGEADDR: | 1226 | case NETDEV_CHANGEADDR: |
1227 | neigh_changeaddr(&arp_tbl, dev); | 1227 | neigh_changeaddr(&arp_tbl, dev); |
1228 | rt_cache_flush(dev_net(dev), 0); | 1228 | rt_cache_flush(dev_net(dev)); |
1229 | break; | 1229 | break; |
1230 | default: | 1230 | default: |
1231 | break; | 1231 | break; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 44bf82e3aef7..e12fad773852 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -725,7 +725,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
725 | break; | 725 | break; |
726 | 726 | ||
727 | case SIOCSIFFLAGS: | 727 | case SIOCSIFFLAGS: |
728 | ret = -EACCES; | 728 | ret = -EPERM; |
729 | if (!capable(CAP_NET_ADMIN)) | 729 | if (!capable(CAP_NET_ADMIN)) |
730 | goto out; | 730 | goto out; |
731 | break; | 731 | break; |
@@ -733,7 +733,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
733 | case SIOCSIFBRDADDR: /* Set the broadcast address */ | 733 | case SIOCSIFBRDADDR: /* Set the broadcast address */ |
734 | case SIOCSIFDSTADDR: /* Set the destination address */ | 734 | case SIOCSIFDSTADDR: /* Set the destination address */ |
735 | case SIOCSIFNETMASK: /* Set the netmask for the interface */ | 735 | case SIOCSIFNETMASK: /* Set the netmask for the interface */ |
736 | ret = -EACCES; | 736 | ret = -EPERM; |
737 | if (!capable(CAP_NET_ADMIN)) | 737 | if (!capable(CAP_NET_ADMIN)) |
738 | goto out; | 738 | goto out; |
739 | ret = -EINVAL; | 739 | ret = -EINVAL; |
@@ -1503,7 +1503,7 @@ static int devinet_conf_proc(ctl_table *ctl, int write, | |||
1503 | if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 || | 1503 | if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 || |
1504 | i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) | 1504 | i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) |
1505 | if ((new_value == 0) && (old_value != 0)) | 1505 | if ((new_value == 0) && (old_value != 0)) |
1506 | rt_cache_flush(net, 0); | 1506 | rt_cache_flush(net); |
1507 | } | 1507 | } |
1508 | 1508 | ||
1509 | return ret; | 1509 | return ret; |
@@ -1537,7 +1537,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, | |||
1537 | dev_disable_lro(idev->dev); | 1537 | dev_disable_lro(idev->dev); |
1538 | } | 1538 | } |
1539 | rtnl_unlock(); | 1539 | rtnl_unlock(); |
1540 | rt_cache_flush(net, 0); | 1540 | rt_cache_flush(net); |
1541 | } | 1541 | } |
1542 | } | 1542 | } |
1543 | 1543 | ||
@@ -1554,7 +1554,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write, | |||
1554 | struct net *net = ctl->extra2; | 1554 | struct net *net = ctl->extra2; |
1555 | 1555 | ||
1556 | if (write && *valp != val) | 1556 | if (write && *valp != val) |
1557 | rt_cache_flush(net, 0); | 1557 | rt_cache_flush(net); |
1558 | 1558 | ||
1559 | return ret; | 1559 | return ret; |
1560 | } | 1560 | } |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c43ae3fba792..8e2b475da9fa 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -148,7 +148,7 @@ static void fib_flush(struct net *net) | |||
148 | } | 148 | } |
149 | 149 | ||
150 | if (flushed) | 150 | if (flushed) |
151 | rt_cache_flush(net, -1); | 151 | rt_cache_flush(net); |
152 | } | 152 | } |
153 | 153 | ||
154 | /* | 154 | /* |
@@ -999,11 +999,11 @@ static void nl_fib_lookup_exit(struct net *net) | |||
999 | net->ipv4.fibnl = NULL; | 999 | net->ipv4.fibnl = NULL; |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | static void fib_disable_ip(struct net_device *dev, int force, int delay) | 1002 | static void fib_disable_ip(struct net_device *dev, int force) |
1003 | { | 1003 | { |
1004 | if (fib_sync_down_dev(dev, force)) | 1004 | if (fib_sync_down_dev(dev, force)) |
1005 | fib_flush(dev_net(dev)); | 1005 | fib_flush(dev_net(dev)); |
1006 | rt_cache_flush(dev_net(dev), delay); | 1006 | rt_cache_flush(dev_net(dev)); |
1007 | arp_ifdown(dev); | 1007 | arp_ifdown(dev); |
1008 | } | 1008 | } |
1009 | 1009 | ||
@@ -1020,7 +1020,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
1020 | fib_sync_up(dev); | 1020 | fib_sync_up(dev); |
1021 | #endif | 1021 | #endif |
1022 | atomic_inc(&net->ipv4.dev_addr_genid); | 1022 | atomic_inc(&net->ipv4.dev_addr_genid); |
1023 | rt_cache_flush(dev_net(dev), -1); | 1023 | rt_cache_flush(dev_net(dev)); |
1024 | break; | 1024 | break; |
1025 | case NETDEV_DOWN: | 1025 | case NETDEV_DOWN: |
1026 | fib_del_ifaddr(ifa, NULL); | 1026 | fib_del_ifaddr(ifa, NULL); |
@@ -1029,9 +1029,9 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
1029 | /* Last address was deleted from this interface. | 1029 | /* Last address was deleted from this interface. |
1030 | * Disable IP. | 1030 | * Disable IP. |
1031 | */ | 1031 | */ |
1032 | fib_disable_ip(dev, 1, 0); | 1032 | fib_disable_ip(dev, 1); |
1033 | } else { | 1033 | } else { |
1034 | rt_cache_flush(dev_net(dev), -1); | 1034 | rt_cache_flush(dev_net(dev)); |
1035 | } | 1035 | } |
1036 | break; | 1036 | break; |
1037 | } | 1037 | } |
@@ -1045,7 +1045,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
1045 | struct net *net = dev_net(dev); | 1045 | struct net *net = dev_net(dev); |
1046 | 1046 | ||
1047 | if (event == NETDEV_UNREGISTER) { | 1047 | if (event == NETDEV_UNREGISTER) { |
1048 | fib_disable_ip(dev, 2, -1); | 1048 | fib_disable_ip(dev, 2); |
1049 | rt_flush_dev(dev); | 1049 | rt_flush_dev(dev); |
1050 | return NOTIFY_DONE; | 1050 | return NOTIFY_DONE; |
1051 | } | 1051 | } |
@@ -1062,14 +1062,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
1062 | fib_sync_up(dev); | 1062 | fib_sync_up(dev); |
1063 | #endif | 1063 | #endif |
1064 | atomic_inc(&net->ipv4.dev_addr_genid); | 1064 | atomic_inc(&net->ipv4.dev_addr_genid); |
1065 | rt_cache_flush(dev_net(dev), -1); | 1065 | rt_cache_flush(dev_net(dev)); |
1066 | break; | 1066 | break; |
1067 | case NETDEV_DOWN: | 1067 | case NETDEV_DOWN: |
1068 | fib_disable_ip(dev, 0, 0); | 1068 | fib_disable_ip(dev, 0); |
1069 | break; | 1069 | break; |
1070 | case NETDEV_CHANGEMTU: | 1070 | case NETDEV_CHANGEMTU: |
1071 | case NETDEV_CHANGE: | 1071 | case NETDEV_CHANGE: |
1072 | rt_cache_flush(dev_net(dev), 0); | 1072 | rt_cache_flush(dev_net(dev)); |
1073 | break; | 1073 | break; |
1074 | case NETDEV_UNREGISTER_BATCH: | 1074 | case NETDEV_UNREGISTER_BATCH: |
1075 | break; | 1075 | break; |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index a83d74e498d2..274309d3aded 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -259,7 +259,7 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) | |||
259 | 259 | ||
260 | static void fib4_rule_flush_cache(struct fib_rules_ops *ops) | 260 | static void fib4_rule_flush_cache(struct fib_rules_ops *ops) |
261 | { | 261 | { |
262 | rt_cache_flush(ops->fro_net, -1); | 262 | rt_cache_flush(ops->fro_net); |
263 | } | 263 | } |
264 | 264 | ||
265 | static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { | 265 | static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 57bd978483e1..d1b93595b4a7 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1286,7 +1286,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1286 | 1286 | ||
1287 | fib_release_info(fi_drop); | 1287 | fib_release_info(fi_drop); |
1288 | if (state & FA_S_ACCESSED) | 1288 | if (state & FA_S_ACCESSED) |
1289 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); | 1289 | rt_cache_flush(cfg->fc_nlinfo.nl_net); |
1290 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, | 1290 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, |
1291 | tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); | 1291 | tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); |
1292 | 1292 | ||
@@ -1333,7 +1333,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1333 | list_add_tail_rcu(&new_fa->fa_list, | 1333 | list_add_tail_rcu(&new_fa->fa_list, |
1334 | (fa ? &fa->fa_list : fa_head)); | 1334 | (fa ? &fa->fa_list : fa_head)); |
1335 | 1335 | ||
1336 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); | 1336 | rt_cache_flush(cfg->fc_nlinfo.nl_net); |
1337 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, | 1337 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, |
1338 | &cfg->fc_nlinfo, 0); | 1338 | &cfg->fc_nlinfo, 0); |
1339 | succeeded: | 1339 | succeeded: |
@@ -1708,7 +1708,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) | |||
1708 | trie_leaf_remove(t, l); | 1708 | trie_leaf_remove(t, l); |
1709 | 1709 | ||
1710 | if (fa->fa_state & FA_S_ACCESSED) | 1710 | if (fa->fa_state & FA_S_ACCESSED) |
1711 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); | 1711 | rt_cache_flush(cfg->fc_nlinfo.nl_net); |
1712 | 1712 | ||
1713 | fib_release_info(fa->fa_info); | 1713 | fib_release_info(fa->fa_info); |
1714 | alias_free_mem_rcu(fa); | 1714 | alias_free_mem_rcu(fa); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 82cf2a722b23..fd9af60397b5 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -202,11 +202,6 @@ EXPORT_SYMBOL(ip_tos2prio); | |||
202 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); | 202 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); |
203 | #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) | 203 | #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) |
204 | 204 | ||
205 | static inline int rt_genid(struct net *net) | ||
206 | { | ||
207 | return atomic_read(&net->ipv4.rt_genid); | ||
208 | } | ||
209 | |||
210 | #ifdef CONFIG_PROC_FS | 205 | #ifdef CONFIG_PROC_FS |
211 | static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) | 206 | static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) |
212 | { | 207 | { |
@@ -447,27 +442,9 @@ static inline bool rt_is_expired(const struct rtable *rth) | |||
447 | return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); | 442 | return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); |
448 | } | 443 | } |
449 | 444 | ||
450 | /* | 445 | void rt_cache_flush(struct net *net) |
451 | * Perturbation of rt_genid by a small quantity [1..256] | ||
452 | * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() | ||
453 | * many times (2^24) without giving recent rt_genid. | ||
454 | * Jenkins hash is strong enough that litle changes of rt_genid are OK. | ||
455 | */ | ||
456 | static void rt_cache_invalidate(struct net *net) | ||
457 | { | 446 | { |
458 | unsigned char shuffle; | 447 | rt_genid_bump(net); |
459 | |||
460 | get_random_bytes(&shuffle, sizeof(shuffle)); | ||
461 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * delay < 0 : invalidate cache (fast : entries will be deleted later) | ||
466 | * delay >= 0 : invalidate & flush cache (can be long) | ||
467 | */ | ||
468 | void rt_cache_flush(struct net *net, int delay) | ||
469 | { | ||
470 | rt_cache_invalidate(net); | ||
471 | } | 448 | } |
472 | 449 | ||
473 | static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, | 450 | static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, |
@@ -2345,7 +2322,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2345 | 2322 | ||
2346 | void ip_rt_multicast_event(struct in_device *in_dev) | 2323 | void ip_rt_multicast_event(struct in_device *in_dev) |
2347 | { | 2324 | { |
2348 | rt_cache_flush(dev_net(in_dev->dev), 0); | 2325 | rt_cache_flush(dev_net(in_dev->dev)); |
2349 | } | 2326 | } |
2350 | 2327 | ||
2351 | #ifdef CONFIG_SYSCTL | 2328 | #ifdef CONFIG_SYSCTL |
@@ -2354,16 +2331,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, | |||
2354 | size_t *lenp, loff_t *ppos) | 2331 | size_t *lenp, loff_t *ppos) |
2355 | { | 2332 | { |
2356 | if (write) { | 2333 | if (write) { |
2357 | int flush_delay; | 2334 | rt_cache_flush((struct net *)__ctl->extra1); |
2358 | ctl_table ctl; | ||
2359 | struct net *net; | ||
2360 | |||
2361 | memcpy(&ctl, __ctl, sizeof(ctl)); | ||
2362 | ctl.data = &flush_delay; | ||
2363 | proc_dointvec(&ctl, write, buffer, lenp, ppos); | ||
2364 | |||
2365 | net = (struct net *)__ctl->extra1; | ||
2366 | rt_cache_flush(net, flush_delay); | ||
2367 | return 0; | 2335 | return 0; |
2368 | } | 2336 | } |
2369 | 2337 | ||
@@ -2533,8 +2501,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { | |||
2533 | 2501 | ||
2534 | static __net_init int rt_genid_init(struct net *net) | 2502 | static __net_init int rt_genid_init(struct net *net) |
2535 | { | 2503 | { |
2536 | get_random_bytes(&net->ipv4.rt_genid, | 2504 | atomic_set(&net->rt_genid, 0); |
2537 | sizeof(net->ipv4.rt_genid)); | ||
2538 | get_random_bytes(&net->ipv4.dev_addr_genid, | 2505 | get_random_bytes(&net->ipv4.dev_addr_genid, |
2539 | sizeof(net->ipv4.dev_addr_genid)); | 2506 | sizeof(net->ipv4.dev_addr_genid)); |
2540 | return 0; | 2507 | return 0; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2109ff4a1daf..5f6419341821 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1762,8 +1762,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | #ifdef CONFIG_NET_DMA | 1764 | #ifdef CONFIG_NET_DMA |
1765 | if (tp->ucopy.dma_chan) | 1765 | if (tp->ucopy.dma_chan) { |
1766 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1766 | if (tp->rcv_wnd == 0 && |
1767 | !skb_queue_empty(&sk->sk_async_wait_queue)) { | ||
1768 | tcp_service_net_dma(sk, true); | ||
1769 | tcp_cleanup_rbuf(sk, copied); | ||
1770 | } else | ||
1771 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1772 | } | ||
1767 | #endif | 1773 | #endif |
1768 | if (copied >= target) { | 1774 | if (copied >= target) { |
1769 | /* Do not sleep, just process backlog. */ | 1775 | /* Do not sleep, just process backlog. */ |
@@ -2325,10 +2331,17 @@ static int tcp_repair_options_est(struct tcp_sock *tp, | |||
2325 | tp->rx_opt.mss_clamp = opt.opt_val; | 2331 | tp->rx_opt.mss_clamp = opt.opt_val; |
2326 | break; | 2332 | break; |
2327 | case TCPOPT_WINDOW: | 2333 | case TCPOPT_WINDOW: |
2328 | if (opt.opt_val > 14) | 2334 | { |
2329 | return -EFBIG; | 2335 | u16 snd_wscale = opt.opt_val & 0xFFFF; |
2336 | u16 rcv_wscale = opt.opt_val >> 16; | ||
2337 | |||
2338 | if (snd_wscale > 14 || rcv_wscale > 14) | ||
2339 | return -EFBIG; | ||
2330 | 2340 | ||
2331 | tp->rx_opt.snd_wscale = opt.opt_val; | 2341 | tp->rx_opt.snd_wscale = snd_wscale; |
2342 | tp->rx_opt.rcv_wscale = rcv_wscale; | ||
2343 | tp->rx_opt.wscale_ok = 1; | ||
2344 | } | ||
2332 | break; | 2345 | break; |
2333 | case TCPOPT_SACK_PERM: | 2346 | case TCPOPT_SACK_PERM: |
2334 | if (opt.opt_val != 0) | 2347 | if (opt.opt_val != 0) |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6e38c6c23caa..d377f4854cb8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4661,7 +4661,7 @@ queue_and_out: | |||
4661 | 4661 | ||
4662 | if (eaten > 0) | 4662 | if (eaten > 0) |
4663 | kfree_skb_partial(skb, fragstolen); | 4663 | kfree_skb_partial(skb, fragstolen); |
4664 | else if (!sock_flag(sk, SOCK_DEAD)) | 4664 | if (!sock_flag(sk, SOCK_DEAD)) |
4665 | sk->sk_data_ready(sk, 0); | 4665 | sk->sk_data_ready(sk, 0); |
4666 | return; | 4666 | return; |
4667 | } | 4667 | } |
@@ -5556,8 +5556,7 @@ no_ack: | |||
5556 | #endif | 5556 | #endif |
5557 | if (eaten) | 5557 | if (eaten) |
5558 | kfree_skb_partial(skb, fragstolen); | 5558 | kfree_skb_partial(skb, fragstolen); |
5559 | else | 5559 | sk->sk_data_ready(sk, 0); |
5560 | sk->sk_data_ready(sk, 0); | ||
5561 | return 0; | 5560 | return 0; |
5562 | } | 5561 | } |
5563 | } | 5562 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6f6d1aca3c3d..2814f66dac64 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1226,6 +1226,11 @@ try_again: | |||
1226 | 1226 | ||
1227 | if (unlikely(err)) { | 1227 | if (unlikely(err)) { |
1228 | trace_kfree_skb(skb, udp_recvmsg); | 1228 | trace_kfree_skb(skb, udp_recvmsg); |
1229 | if (!peeked) { | ||
1230 | atomic_inc(&sk->sk_drops); | ||
1231 | UDP_INC_STATS_USER(sock_net(sk), | ||
1232 | UDP_MIB_INERRORS, is_udplite); | ||
1233 | } | ||
1229 | goto out_free; | 1234 | goto out_free; |
1230 | } | 1235 | } |
1231 | 1236 | ||
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 0251a6005be8..c4f934176cab 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -175,33 +175,12 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, | |||
175 | const struct in6_addr *saddr) | 175 | const struct in6_addr *saddr) |
176 | { | 176 | { |
177 | __ip6_dst_store(sk, dst, daddr, saddr); | 177 | __ip6_dst_store(sk, dst, daddr, saddr); |
178 | |||
179 | #ifdef CONFIG_XFRM | ||
180 | { | ||
181 | struct rt6_info *rt = (struct rt6_info *)dst; | ||
182 | rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); | ||
183 | } | ||
184 | #endif | ||
185 | } | 178 | } |
186 | 179 | ||
187 | static inline | 180 | static inline |
188 | struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | 181 | struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) |
189 | { | 182 | { |
190 | struct dst_entry *dst; | 183 | return __sk_dst_check(sk, cookie); |
191 | |||
192 | dst = __sk_dst_check(sk, cookie); | ||
193 | |||
194 | #ifdef CONFIG_XFRM | ||
195 | if (dst) { | ||
196 | struct rt6_info *rt = (struct rt6_info *)dst; | ||
197 | if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { | ||
198 | __sk_dst_reset(sk); | ||
199 | dst = NULL; | ||
200 | } | ||
201 | } | ||
202 | #endif | ||
203 | |||
204 | return dst; | ||
205 | } | 184 | } |
206 | 185 | ||
207 | static struct dst_entry *inet6_csk_route_socket(struct sock *sk, | 186 | static struct dst_entry *inet6_csk_route_socket(struct sock *sk, |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 13690d650c3e..286acfc21250 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -819,6 +819,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) | |||
819 | offsetof(struct rt6_info, rt6i_src), | 819 | offsetof(struct rt6_info, rt6i_src), |
820 | allow_create, replace_required); | 820 | allow_create, replace_required); |
821 | 821 | ||
822 | if (IS_ERR(sn)) { | ||
823 | err = PTR_ERR(sn); | ||
824 | sn = NULL; | ||
825 | } | ||
822 | if (!sn) { | 826 | if (!sn) { |
823 | /* If it is failed, discard just allocated | 827 | /* If it is failed, discard just allocated |
824 | root, and then (in st_failure) stale node | 828 | root, and then (in st_failure) stale node |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8e80fd279100..854e4018d205 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -226,7 +226,7 @@ static struct rt6_info ip6_null_entry_template = { | |||
226 | .dst = { | 226 | .dst = { |
227 | .__refcnt = ATOMIC_INIT(1), | 227 | .__refcnt = ATOMIC_INIT(1), |
228 | .__use = 1, | 228 | .__use = 1, |
229 | .obsolete = -1, | 229 | .obsolete = DST_OBSOLETE_FORCE_CHK, |
230 | .error = -ENETUNREACH, | 230 | .error = -ENETUNREACH, |
231 | .input = ip6_pkt_discard, | 231 | .input = ip6_pkt_discard, |
232 | .output = ip6_pkt_discard_out, | 232 | .output = ip6_pkt_discard_out, |
@@ -246,7 +246,7 @@ static struct rt6_info ip6_prohibit_entry_template = { | |||
246 | .dst = { | 246 | .dst = { |
247 | .__refcnt = ATOMIC_INIT(1), | 247 | .__refcnt = ATOMIC_INIT(1), |
248 | .__use = 1, | 248 | .__use = 1, |
249 | .obsolete = -1, | 249 | .obsolete = DST_OBSOLETE_FORCE_CHK, |
250 | .error = -EACCES, | 250 | .error = -EACCES, |
251 | .input = ip6_pkt_prohibit, | 251 | .input = ip6_pkt_prohibit, |
252 | .output = ip6_pkt_prohibit_out, | 252 | .output = ip6_pkt_prohibit_out, |
@@ -261,7 +261,7 @@ static struct rt6_info ip6_blk_hole_entry_template = { | |||
261 | .dst = { | 261 | .dst = { |
262 | .__refcnt = ATOMIC_INIT(1), | 262 | .__refcnt = ATOMIC_INIT(1), |
263 | .__use = 1, | 263 | .__use = 1, |
264 | .obsolete = -1, | 264 | .obsolete = DST_OBSOLETE_FORCE_CHK, |
265 | .error = -EINVAL, | 265 | .error = -EINVAL, |
266 | .input = dst_discard, | 266 | .input = dst_discard, |
267 | .output = dst_discard, | 267 | .output = dst_discard, |
@@ -281,13 +281,14 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net, | |||
281 | struct fib6_table *table) | 281 | struct fib6_table *table) |
282 | { | 282 | { |
283 | struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, | 283 | struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, |
284 | 0, DST_OBSOLETE_NONE, flags); | 284 | 0, DST_OBSOLETE_FORCE_CHK, flags); |
285 | 285 | ||
286 | if (rt) { | 286 | if (rt) { |
287 | struct dst_entry *dst = &rt->dst; | 287 | struct dst_entry *dst = &rt->dst; |
288 | 288 | ||
289 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); | 289 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); |
290 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); | 290 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); |
291 | rt->rt6i_genid = rt_genid(net); | ||
291 | } | 292 | } |
292 | return rt; | 293 | return rt; |
293 | } | 294 | } |
@@ -1031,6 +1032,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
1031 | 1032 | ||
1032 | rt = (struct rt6_info *) dst; | 1033 | rt = (struct rt6_info *) dst; |
1033 | 1034 | ||
1035 | /* All IPV6 dsts are created with ->obsolete set to the value | ||
1036 | * DST_OBSOLETE_FORCE_CHK which forces validation calls down | ||
1037 | * into this function always. | ||
1038 | */ | ||
1039 | if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) | ||
1040 | return NULL; | ||
1041 | |||
1034 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { | 1042 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { |
1035 | if (rt->rt6i_peer_genid != rt6_peer_genid()) { | 1043 | if (rt->rt6i_peer_genid != rt6_peer_genid()) { |
1036 | if (!rt6_has_peer(rt)) | 1044 | if (!rt6_has_peer(rt)) |
@@ -1397,8 +1405,6 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1397 | goto out; | 1405 | goto out; |
1398 | } | 1406 | } |
1399 | 1407 | ||
1400 | rt->dst.obsolete = -1; | ||
1401 | |||
1402 | if (cfg->fc_flags & RTF_EXPIRES) | 1408 | if (cfg->fc_flags & RTF_EXPIRES) |
1403 | rt6_set_expires(rt, jiffies + | 1409 | rt6_set_expires(rt, jiffies + |
1404 | clock_t_to_jiffies(cfg->fc_expires)); | 1410 | clock_t_to_jiffies(cfg->fc_expires)); |
@@ -2080,7 +2086,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
2080 | rt->dst.input = ip6_input; | 2086 | rt->dst.input = ip6_input; |
2081 | rt->dst.output = ip6_output; | 2087 | rt->dst.output = ip6_output; |
2082 | rt->rt6i_idev = idev; | 2088 | rt->rt6i_idev = idev; |
2083 | rt->dst.obsolete = -1; | ||
2084 | 2089 | ||
2085 | rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; | 2090 | rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; |
2086 | if (anycast) | 2091 | if (anycast) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index a3e60cc04a8a..acd32e3f1b68 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -403,8 +403,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
403 | tp->mtu_info = ntohl(info); | 403 | tp->mtu_info = ntohl(info); |
404 | if (!sock_owned_by_user(sk)) | 404 | if (!sock_owned_by_user(sk)) |
405 | tcp_v6_mtu_reduced(sk); | 405 | tcp_v6_mtu_reduced(sk); |
406 | else | 406 | else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, |
407 | set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); | 407 | &tp->tsq_flags)) |
408 | sock_hold(sk); | ||
408 | goto out; | 409 | goto out; |
409 | } | 410 | } |
410 | 411 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 99d0077b56b8..07e2bfef6845 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -394,6 +394,17 @@ try_again: | |||
394 | } | 394 | } |
395 | if (unlikely(err)) { | 395 | if (unlikely(err)) { |
396 | trace_kfree_skb(skb, udpv6_recvmsg); | 396 | trace_kfree_skb(skb, udpv6_recvmsg); |
397 | if (!peeked) { | ||
398 | atomic_inc(&sk->sk_drops); | ||
399 | if (is_udp4) | ||
400 | UDP_INC_STATS_USER(sock_net(sk), | ||
401 | UDP_MIB_INERRORS, | ||
402 | is_udplite); | ||
403 | else | ||
404 | UDP6_INC_STATS_USER(sock_net(sk), | ||
405 | UDP_MIB_INERRORS, | ||
406 | is_udplite); | ||
407 | } | ||
397 | goto out_free; | 408 | goto out_free; |
398 | } | 409 | } |
399 | if (!peeked) { | 410 | if (!peeked) { |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 513cab08a986..1a9f3723c13c 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1501,6 +1501,8 @@ out: | |||
1501 | return err; | 1501 | return err; |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | static struct lock_class_key l2tp_socket_class; | ||
1505 | |||
1504 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) | 1506 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) |
1505 | { | 1507 | { |
1506 | struct l2tp_tunnel *tunnel = NULL; | 1508 | struct l2tp_tunnel *tunnel = NULL; |
@@ -1605,6 +1607,8 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1605 | tunnel->old_sk_destruct = sk->sk_destruct; | 1607 | tunnel->old_sk_destruct = sk->sk_destruct; |
1606 | sk->sk_destruct = &l2tp_tunnel_destruct; | 1608 | sk->sk_destruct = &l2tp_tunnel_destruct; |
1607 | tunnel->sock = sk; | 1609 | tunnel->sock = sk; |
1610 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); | ||
1611 | |||
1608 | sk->sk_allocation = GFP_ATOMIC; | 1612 | sk->sk_allocation = GFP_ATOMIC; |
1609 | 1613 | ||
1610 | /* Add tunnel to our list */ | 1614 | /* Add tunnel to our list */ |
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index f9ee74deeac2..3bfb34aaee29 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
@@ -153,7 +153,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, | |||
153 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); | 153 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); |
154 | } | 154 | } |
155 | 155 | ||
156 | if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) | 156 | if (!pskb_may_pull(skb, ETH_HLEN)) |
157 | goto error; | 157 | goto error; |
158 | 158 | ||
159 | secpath_reset(skb); | 159 | secpath_reset(skb); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d41974aacf51..a58c0b649ba1 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1378,6 +1378,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
1378 | else | 1378 | else |
1379 | memset(next_hop, 0, ETH_ALEN); | 1379 | memset(next_hop, 0, ETH_ALEN); |
1380 | 1380 | ||
1381 | memset(pinfo, 0, sizeof(*pinfo)); | ||
1382 | |||
1381 | pinfo->generation = mesh_paths_generation; | 1383 | pinfo->generation = mesh_paths_generation; |
1382 | 1384 | ||
1383 | pinfo->filled = MPATH_INFO_FRAME_QLEN | | 1385 | pinfo->filled = MPATH_INFO_FRAME_QLEN | |
@@ -1396,7 +1398,6 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
1396 | pinfo->discovery_timeout = | 1398 | pinfo->discovery_timeout = |
1397 | jiffies_to_msecs(mpath->discovery_timeout); | 1399 | jiffies_to_msecs(mpath->discovery_timeout); |
1398 | pinfo->discovery_retries = mpath->discovery_retries; | 1400 | pinfo->discovery_retries = mpath->discovery_retries; |
1399 | pinfo->flags = 0; | ||
1400 | if (mpath->flags & MESH_PATH_ACTIVE) | 1401 | if (mpath->flags & MESH_PATH_ACTIVE) |
1401 | pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; | 1402 | pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; |
1402 | if (mpath->flags & MESH_PATH_RESOLVING) | 1403 | if (mpath->flags & MESH_PATH_RESOLVING) |
@@ -1405,10 +1406,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
1405 | pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; | 1406 | pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; |
1406 | if (mpath->flags & MESH_PATH_FIXED) | 1407 | if (mpath->flags & MESH_PATH_FIXED) |
1407 | pinfo->flags |= NL80211_MPATH_FLAG_FIXED; | 1408 | pinfo->flags |= NL80211_MPATH_FLAG_FIXED; |
1408 | if (mpath->flags & MESH_PATH_RESOLVING) | 1409 | if (mpath->flags & MESH_PATH_RESOLVED) |
1409 | pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; | 1410 | pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED; |
1410 | |||
1411 | pinfo->flags = mpath->flags; | ||
1412 | } | 1411 | } |
1413 | 1412 | ||
1414 | static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, | 1413 | static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a4a5acdbaa4d..f76b83341cf9 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -3248,6 +3248,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | |||
3248 | goto out_unlock; | 3248 | goto out_unlock; |
3249 | 3249 | ||
3250 | err_clear: | 3250 | err_clear: |
3251 | memset(ifmgd->bssid, 0, ETH_ALEN); | ||
3252 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); | ||
3251 | ifmgd->auth_data = NULL; | 3253 | ifmgd->auth_data = NULL; |
3252 | err_free: | 3254 | err_free: |
3253 | kfree(auth_data); | 3255 | kfree(auth_data); |
@@ -3439,6 +3441,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
3439 | err = 0; | 3441 | err = 0; |
3440 | goto out; | 3442 | goto out; |
3441 | err_clear: | 3443 | err_clear: |
3444 | memset(ifmgd->bssid, 0, ETH_ALEN); | ||
3445 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); | ||
3442 | ifmgd->assoc_data = NULL; | 3446 | ifmgd->assoc_data = NULL; |
3443 | err_free: | 3447 | err_free: |
3444 | kfree(assoc_data); | 3448 | kfree(assoc_data); |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index a5ac11ebef33..e046b3756aab 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -158,21 +158,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
158 | * sCL -> sSS | 158 | * sCL -> sSS |
159 | */ | 159 | */ |
160 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ | 160 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ |
161 | /*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR }, | 161 | /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR }, |
162 | /* | 162 | /* |
163 | * sNO -> sIV Too late and no reason to do anything | 163 | * sNO -> sIV Too late and no reason to do anything |
164 | * sSS -> sIV Client can't send SYN and then SYN/ACK | 164 | * sSS -> sIV Client can't send SYN and then SYN/ACK |
165 | * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open | 165 | * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open |
166 | * sSR -> sIG | 166 | * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open |
167 | * sES -> sIG Error: SYNs in window outside the SYN_SENT state | 167 | * sES -> sIV Invalid SYN/ACK packets sent by the client |
168 | * are errors. Receiver will reply with RST | 168 | * sFW -> sIV |
169 | * and close the connection. | 169 | * sCW -> sIV |
170 | * Or we are not in sync and hold a dead connection. | 170 | * sLA -> sIV |
171 | * sFW -> sIG | 171 | * sTW -> sIV |
172 | * sCW -> sIG | 172 | * sCL -> sIV |
173 | * sLA -> sIG | ||
174 | * sTW -> sIG | ||
175 | * sCL -> sIG | ||
176 | */ | 173 | */ |
177 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ | 174 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ |
178 | /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, | 175 | /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, |
@@ -633,15 +630,9 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
633 | ack = sack = receiver->td_end; | 630 | ack = sack = receiver->td_end; |
634 | } | 631 | } |
635 | 632 | ||
636 | if (seq == end | 633 | if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT) |
637 | && (!tcph->rst | ||
638 | || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) | ||
639 | /* | 634 | /* |
640 | * Packets contains no data: we assume it is valid | 635 | * RST sent answering SYN. |
641 | * and check the ack value only. | ||
642 | * However RST segments are always validated by their | ||
643 | * SEQ number, except when seq == 0 (reset sent answering | ||
644 | * SYN. | ||
645 | */ | 636 | */ |
646 | seq = end = sender->td_end; | 637 | seq = end = sender->td_end; |
647 | 638 | ||
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 14e2f3903142..5cfb5bedb2b8 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -381,6 +381,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
381 | struct nlmsghdr *nlh; | 381 | struct nlmsghdr *nlh; |
382 | struct nfgenmsg *nfmsg; | 382 | struct nfgenmsg *nfmsg; |
383 | sk_buff_data_t old_tail = inst->skb->tail; | 383 | sk_buff_data_t old_tail = inst->skb->tail; |
384 | struct sock *sk; | ||
384 | 385 | ||
385 | nlh = nlmsg_put(inst->skb, 0, 0, | 386 | nlh = nlmsg_put(inst->skb, 0, 0, |
386 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, | 387 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, |
@@ -499,18 +500,19 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
499 | } | 500 | } |
500 | 501 | ||
501 | /* UID */ | 502 | /* UID */ |
502 | if (skb->sk) { | 503 | sk = skb->sk; |
503 | read_lock_bh(&skb->sk->sk_callback_lock); | 504 | if (sk && sk->sk_state != TCP_TIME_WAIT) { |
504 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) { | 505 | read_lock_bh(&sk->sk_callback_lock); |
505 | struct file *file = skb->sk->sk_socket->file; | 506 | if (sk->sk_socket && sk->sk_socket->file) { |
507 | struct file *file = sk->sk_socket->file; | ||
506 | __be32 uid = htonl(file->f_cred->fsuid); | 508 | __be32 uid = htonl(file->f_cred->fsuid); |
507 | __be32 gid = htonl(file->f_cred->fsgid); | 509 | __be32 gid = htonl(file->f_cred->fsgid); |
508 | read_unlock_bh(&skb->sk->sk_callback_lock); | 510 | read_unlock_bh(&sk->sk_callback_lock); |
509 | if (nla_put_be32(inst->skb, NFULA_UID, uid) || | 511 | if (nla_put_be32(inst->skb, NFULA_UID, uid) || |
510 | nla_put_be32(inst->skb, NFULA_GID, gid)) | 512 | nla_put_be32(inst->skb, NFULA_GID, gid)) |
511 | goto nla_put_failure; | 513 | goto nla_put_failure; |
512 | } else | 514 | } else |
513 | read_unlock_bh(&skb->sk->sk_callback_lock); | 515 | read_unlock_bh(&sk->sk_callback_lock); |
514 | } | 516 | } |
515 | 517 | ||
516 | /* local sequence number */ | 518 | /* local sequence number */ |
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c index ff5f75fddb15..91e9af4d1f42 100644 --- a/net/netfilter/xt_LOG.c +++ b/net/netfilter/xt_LOG.c | |||
@@ -145,6 +145,19 @@ static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb, | |||
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
148 | static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk) | ||
149 | { | ||
150 | if (!sk || sk->sk_state == TCP_TIME_WAIT) | ||
151 | return; | ||
152 | |||
153 | read_lock_bh(&sk->sk_callback_lock); | ||
154 | if (sk->sk_socket && sk->sk_socket->file) | ||
155 | sb_add(m, "UID=%u GID=%u ", | ||
156 | sk->sk_socket->file->f_cred->fsuid, | ||
157 | sk->sk_socket->file->f_cred->fsgid); | ||
158 | read_unlock_bh(&sk->sk_callback_lock); | ||
159 | } | ||
160 | |||
148 | /* One level of recursion won't kill us */ | 161 | /* One level of recursion won't kill us */ |
149 | static void dump_ipv4_packet(struct sbuff *m, | 162 | static void dump_ipv4_packet(struct sbuff *m, |
150 | const struct nf_loginfo *info, | 163 | const struct nf_loginfo *info, |
@@ -361,14 +374,8 @@ static void dump_ipv4_packet(struct sbuff *m, | |||
361 | } | 374 | } |
362 | 375 | ||
363 | /* Max length: 15 "UID=4294967295 " */ | 376 | /* Max length: 15 "UID=4294967295 " */ |
364 | if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) { | 377 | if ((logflags & XT_LOG_UID) && !iphoff) |
365 | read_lock_bh(&skb->sk->sk_callback_lock); | 378 | dump_sk_uid_gid(m, skb->sk); |
366 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) | ||
367 | sb_add(m, "UID=%u GID=%u ", | ||
368 | skb->sk->sk_socket->file->f_cred->fsuid, | ||
369 | skb->sk->sk_socket->file->f_cred->fsgid); | ||
370 | read_unlock_bh(&skb->sk->sk_callback_lock); | ||
371 | } | ||
372 | 379 | ||
373 | /* Max length: 16 "MARK=0xFFFFFFFF " */ | 380 | /* Max length: 16 "MARK=0xFFFFFFFF " */ |
374 | if (!iphoff && skb->mark) | 381 | if (!iphoff && skb->mark) |
@@ -436,8 +443,8 @@ log_packet_common(struct sbuff *m, | |||
436 | const struct nf_loginfo *loginfo, | 443 | const struct nf_loginfo *loginfo, |
437 | const char *prefix) | 444 | const char *prefix) |
438 | { | 445 | { |
439 | sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level, | 446 | sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ", |
440 | prefix, | 447 | '0' + loginfo->u.log.level, prefix, |
441 | in ? in->name : "", | 448 | in ? in->name : "", |
442 | out ? out->name : ""); | 449 | out ? out->name : ""); |
443 | #ifdef CONFIG_BRIDGE_NETFILTER | 450 | #ifdef CONFIG_BRIDGE_NETFILTER |
@@ -717,14 +724,8 @@ static void dump_ipv6_packet(struct sbuff *m, | |||
717 | } | 724 | } |
718 | 725 | ||
719 | /* Max length: 15 "UID=4294967295 " */ | 726 | /* Max length: 15 "UID=4294967295 " */ |
720 | if ((logflags & XT_LOG_UID) && recurse && skb->sk) { | 727 | if ((logflags & XT_LOG_UID) && recurse) |
721 | read_lock_bh(&skb->sk->sk_callback_lock); | 728 | dump_sk_uid_gid(m, skb->sk); |
722 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) | ||
723 | sb_add(m, "UID=%u GID=%u ", | ||
724 | skb->sk->sk_socket->file->f_cred->fsuid, | ||
725 | skb->sk->sk_socket->file->f_cred->fsgid); | ||
726 | read_unlock_bh(&skb->sk->sk_callback_lock); | ||
727 | } | ||
728 | 729 | ||
729 | /* Max length: 16 "MARK=0xFFFFFFFF " */ | 730 | /* Max length: 16 "MARK=0xFFFFFFFF " */ |
730 | if (!recurse && skb->mark) | 731 | if (!recurse && skb->mark) |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 06592d8b4a2b..7261eb81974f 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -601,7 +601,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
601 | if (!capable(CAP_NET_BIND_SERVICE)) { | 601 | if (!capable(CAP_NET_BIND_SERVICE)) { |
602 | dev_put(dev); | 602 | dev_put(dev); |
603 | release_sock(sk); | 603 | release_sock(sk); |
604 | return -EACCES; | 604 | return -EPERM; |
605 | } | 605 | } |
606 | nr->user_addr = addr->fsa_digipeater[0]; | 606 | nr->user_addr = addr->fsa_digipeater[0]; |
607 | nr->source_addr = addr->fsa_ax25.sax25_call; | 607 | nr->source_addr = addr->fsa_ax25.sax25_call; |
@@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1169 | msg->msg_flags |= MSG_TRUNC; | 1169 | msg->msg_flags |= MSG_TRUNC; |
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1172 | er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
1173 | if (er < 0) { | ||
1174 | skb_free_datagram(sk, skb); | ||
1175 | release_sock(sk); | ||
1176 | return er; | ||
1177 | } | ||
1173 | 1178 | ||
1174 | if (sax != NULL) { | 1179 | if (sax != NULL) { |
1175 | sax->sax25_family = AF_NETROM; | 1180 | sax->sax25_family = AF_NETROM; |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index f3f96badf5aa..954405ceae9e 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -45,7 +45,7 @@ static int make_writable(struct sk_buff *skb, int write_len) | |||
45 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 45 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
46 | } | 46 | } |
47 | 47 | ||
48 | /* remove VLAN header from packet and update csum accrodingly. */ | 48 | /* remove VLAN header from packet and update csum accordingly. */ |
49 | static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | 49 | static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) |
50 | { | 50 | { |
51 | struct vlan_hdr *vhdr; | 51 | struct vlan_hdr *vhdr; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index d8277d29e710..cf58cedad083 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -425,10 +425,10 @@ static int validate_sample(const struct nlattr *attr, | |||
425 | static int validate_tp_port(const struct sw_flow_key *flow_key) | 425 | static int validate_tp_port(const struct sw_flow_key *flow_key) |
426 | { | 426 | { |
427 | if (flow_key->eth.type == htons(ETH_P_IP)) { | 427 | if (flow_key->eth.type == htons(ETH_P_IP)) { |
428 | if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) | 428 | if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) |
429 | return 0; | 429 | return 0; |
430 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { | 430 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { |
431 | if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) | 431 | if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst) |
432 | return 0; | 432 | return 0; |
433 | } | 433 | } |
434 | 434 | ||
@@ -460,7 +460,7 @@ static int validate_set(const struct nlattr *a, | |||
460 | if (flow_key->eth.type != htons(ETH_P_IP)) | 460 | if (flow_key->eth.type != htons(ETH_P_IP)) |
461 | return -EINVAL; | 461 | return -EINVAL; |
462 | 462 | ||
463 | if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst) | 463 | if (!flow_key->ip.proto) |
464 | return -EINVAL; | 464 | return -EINVAL; |
465 | 465 | ||
466 | ipv4_key = nla_data(ovs_key); | 466 | ipv4_key = nla_data(ovs_key); |
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 9b75617ca4e0..c30df1a10c67 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
@@ -145,15 +145,17 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies); | |||
145 | * OVS_KEY_ATTR_PRIORITY 4 -- 4 8 | 145 | * OVS_KEY_ATTR_PRIORITY 4 -- 4 8 |
146 | * OVS_KEY_ATTR_IN_PORT 4 -- 4 8 | 146 | * OVS_KEY_ATTR_IN_PORT 4 -- 4 8 |
147 | * OVS_KEY_ATTR_ETHERNET 12 -- 4 16 | 147 | * OVS_KEY_ATTR_ETHERNET 12 -- 4 16 |
148 | * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (outer VLAN ethertype) | ||
148 | * OVS_KEY_ATTR_8021Q 4 -- 4 8 | 149 | * OVS_KEY_ATTR_8021Q 4 -- 4 8 |
149 | * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 | 150 | * OVS_KEY_ATTR_ENCAP 0 -- 4 4 (VLAN encapsulation) |
151 | * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (inner VLAN ethertype) | ||
150 | * OVS_KEY_ATTR_IPV6 40 -- 4 44 | 152 | * OVS_KEY_ATTR_IPV6 40 -- 4 44 |
151 | * OVS_KEY_ATTR_ICMPV6 2 2 4 8 | 153 | * OVS_KEY_ATTR_ICMPV6 2 2 4 8 |
152 | * OVS_KEY_ATTR_ND 28 -- 4 32 | 154 | * OVS_KEY_ATTR_ND 28 -- 4 32 |
153 | * ------------------------------------------------- | 155 | * ------------------------------------------------- |
154 | * total 132 | 156 | * total 144 |
155 | */ | 157 | */ |
156 | #define FLOW_BUFSIZE 132 | 158 | #define FLOW_BUFSIZE 144 |
157 | 159 | ||
158 | int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); | 160 | int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); |
159 | int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, | 161 | int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 6aabd77d1cfd..564b9fc8efd3 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) | 250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) |
251 | cl = defmap[TC_PRIO_BESTEFFORT]; | 251 | cl = defmap[TC_PRIO_BESTEFFORT]; |
252 | 252 | ||
253 | if (cl == NULL || cl->level >= head->level) | 253 | if (cl == NULL) |
254 | goto fallback; | 254 | goto fallback; |
255 | } | 255 | } |
256 | 256 | if (cl->level >= head->level) | |
257 | goto fallback; | ||
257 | #ifdef CONFIG_NET_CLS_ACT | 258 | #ifdef CONFIG_NET_CLS_ACT |
258 | switch (result) { | 259 | switch (result) { |
259 | case TC_ACT_QUEUED: | 260 | case TC_ACT_QUEUED: |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 9fc1c62ec80e..4e606fcb2534 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -191,7 +191,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
191 | 191 | ||
192 | if (list_empty(&flow->flowchain)) { | 192 | if (list_empty(&flow->flowchain)) { |
193 | list_add_tail(&flow->flowchain, &q->new_flows); | 193 | list_add_tail(&flow->flowchain, &q->new_flows); |
194 | codel_vars_init(&flow->cvars); | ||
195 | q->new_flow_count++; | 194 | q->new_flow_count++; |
196 | flow->deficit = q->quantum; | 195 | flow->deficit = q->quantum; |
197 | flow->dropped = 0; | 196 | flow->dropped = 0; |
@@ -418,6 +417,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) | |||
418 | struct fq_codel_flow *flow = q->flows + i; | 417 | struct fq_codel_flow *flow = q->flows + i; |
419 | 418 | ||
420 | INIT_LIST_HEAD(&flow->flowchain); | 419 | INIT_LIST_HEAD(&flow->flowchain); |
420 | codel_vars_init(&flow->cvars); | ||
421 | } | 421 | } |
422 | } | 422 | } |
423 | if (sch->limit >= 1) | 423 | if (sch->limit >= 1) |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index e901583e4ea5..d42234c0f13b 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -102,9 +102,8 @@ static inline int gred_wred_mode_check(struct Qdisc *sch) | |||
102 | if (q == NULL) | 102 | if (q == NULL) |
103 | continue; | 103 | continue; |
104 | 104 | ||
105 | for (n = 0; n < table->DPs; n++) | 105 | for (n = i + 1; n < table->DPs; n++) |
106 | if (table->tab[n] && table->tab[n] != q && | 106 | if (table->tab[n] && table->tab[n]->prio == q->prio) |
107 | table->tab[n]->prio == q->prio) | ||
108 | return 1; | 107 | return 1; |
109 | } | 108 | } |
110 | 109 | ||
@@ -137,6 +136,7 @@ static inline void gred_store_wred_set(struct gred_sched *table, | |||
137 | struct gred_sched_data *q) | 136 | struct gred_sched_data *q) |
138 | { | 137 | { |
139 | table->wred_set.qavg = q->vars.qavg; | 138 | table->wred_set.qavg = q->vars.qavg; |
139 | table->wred_set.qidlestart = q->vars.qidlestart; | ||
140 | } | 140 | } |
141 | 141 | ||
142 | static inline int gred_use_ecn(struct gred_sched *t) | 142 | static inline int gred_use_ecn(struct gred_sched *t) |
@@ -176,7 +176,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
176 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; | 176 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; |
177 | } | 177 | } |
178 | 178 | ||
179 | /* sum up all the qaves of prios <= to ours to get the new qave */ | 179 | /* sum up all the qaves of prios < ours to get the new qave */ |
180 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { | 180 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { |
181 | int i; | 181 | int i; |
182 | 182 | ||
@@ -260,16 +260,18 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) | |||
260 | } else { | 260 | } else { |
261 | q->backlog -= qdisc_pkt_len(skb); | 261 | q->backlog -= qdisc_pkt_len(skb); |
262 | 262 | ||
263 | if (!q->backlog && !gred_wred_mode(t)) | 263 | if (gred_wred_mode(t)) { |
264 | red_start_of_idle_period(&q->vars); | 264 | if (!sch->qstats.backlog) |
265 | red_start_of_idle_period(&t->wred_set); | ||
266 | } else { | ||
267 | if (!q->backlog) | ||
268 | red_start_of_idle_period(&q->vars); | ||
269 | } | ||
265 | } | 270 | } |
266 | 271 | ||
267 | return skb; | 272 | return skb; |
268 | } | 273 | } |
269 | 274 | ||
270 | if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) | ||
271 | red_start_of_idle_period(&t->wred_set); | ||
272 | |||
273 | return NULL; | 275 | return NULL; |
274 | } | 276 | } |
275 | 277 | ||
@@ -291,19 +293,20 @@ static unsigned int gred_drop(struct Qdisc *sch) | |||
291 | q->backlog -= len; | 293 | q->backlog -= len; |
292 | q->stats.other++; | 294 | q->stats.other++; |
293 | 295 | ||
294 | if (!q->backlog && !gred_wred_mode(t)) | 296 | if (gred_wred_mode(t)) { |
295 | red_start_of_idle_period(&q->vars); | 297 | if (!sch->qstats.backlog) |
298 | red_start_of_idle_period(&t->wred_set); | ||
299 | } else { | ||
300 | if (!q->backlog) | ||
301 | red_start_of_idle_period(&q->vars); | ||
302 | } | ||
296 | } | 303 | } |
297 | 304 | ||
298 | qdisc_drop(skb, sch); | 305 | qdisc_drop(skb, sch); |
299 | return len; | 306 | return len; |
300 | } | 307 | } |
301 | 308 | ||
302 | if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) | ||
303 | red_start_of_idle_period(&t->wred_set); | ||
304 | |||
305 | return 0; | 309 | return 0; |
306 | |||
307 | } | 310 | } |
308 | 311 | ||
309 | static void gred_reset(struct Qdisc *sch) | 312 | static void gred_reset(struct Qdisc *sch) |
@@ -535,6 +538,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
535 | for (i = 0; i < MAX_DPs; i++) { | 538 | for (i = 0; i < MAX_DPs; i++) { |
536 | struct gred_sched_data *q = table->tab[i]; | 539 | struct gred_sched_data *q = table->tab[i]; |
537 | struct tc_gred_qopt opt; | 540 | struct tc_gred_qopt opt; |
541 | unsigned long qavg; | ||
538 | 542 | ||
539 | memset(&opt, 0, sizeof(opt)); | 543 | memset(&opt, 0, sizeof(opt)); |
540 | 544 | ||
@@ -566,7 +570,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
566 | if (gred_wred_mode(table)) | 570 | if (gred_wred_mode(table)) |
567 | gred_load_wred_set(table, q); | 571 | gred_load_wred_set(table, q); |
568 | 572 | ||
569 | opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); | 573 | qavg = red_calc_qavg(&q->parms, &q->vars, |
574 | q->vars.qavg >> q->parms.Wlog); | ||
575 | opt.qave = qavg >> q->parms.Wlog; | ||
570 | 576 | ||
571 | append_opt: | 577 | append_opt: |
572 | if (nla_append(skb, sizeof(opt), &opt) < 0) | 578 | if (nla_append(skb, sizeof(opt), &opt) < 0) |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e4723d31fdd5..211a21217045 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -865,7 +865,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) | |||
865 | if (mask) { | 865 | if (mask) { |
866 | struct qfq_group *next = qfq_ffs(q, mask); | 866 | struct qfq_group *next = qfq_ffs(q, mask); |
867 | if (qfq_gt(roundedF, next->F)) { | 867 | if (qfq_gt(roundedF, next->F)) { |
868 | cl->S = next->F; | 868 | if (qfq_gt(limit, next->F)) |
869 | cl->S = next->F; | ||
870 | else /* preserve timestamp correctness */ | ||
871 | cl->S = limit; | ||
869 | return; | 872 | return; |
870 | } | 873 | } |
871 | } | 874 | } |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 838e18b4d7ea..be50aa234dcd 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -364,6 +364,25 @@ finish: | |||
364 | return retval; | 364 | return retval; |
365 | } | 365 | } |
366 | 366 | ||
367 | static void sctp_packet_release_owner(struct sk_buff *skb) | ||
368 | { | ||
369 | sk_free(skb->sk); | ||
370 | } | ||
371 | |||
372 | static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
373 | { | ||
374 | skb_orphan(skb); | ||
375 | skb->sk = sk; | ||
376 | skb->destructor = sctp_packet_release_owner; | ||
377 | |||
378 | /* | ||
379 | * The data chunks have already been accounted for in sctp_sendmsg(), | ||
380 | * therefore only reserve a single byte to keep socket around until | ||
381 | * the packet has been transmitted. | ||
382 | */ | ||
383 | atomic_inc(&sk->sk_wmem_alloc); | ||
384 | } | ||
385 | |||
367 | /* All packets are sent to the network through this function from | 386 | /* All packets are sent to the network through this function from |
368 | * sctp_outq_tail(). | 387 | * sctp_outq_tail(). |
369 | * | 388 | * |
@@ -405,7 +424,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
405 | /* Set the owning socket so that we know where to get the | 424 | /* Set the owning socket so that we know where to get the |
406 | * destination IP address. | 425 | * destination IP address. |
407 | */ | 426 | */ |
408 | skb_set_owner_w(nskb, sk); | 427 | sctp_packet_set_owner_w(nskb, sk); |
409 | 428 | ||
410 | if (!sctp_transport_dst_check(tp)) { | 429 | if (!sctp_transport_dst_check(tp)) { |
411 | sctp_transport_route(tp, NULL, sctp_sk(sk)); | 430 | sctp_transport_route(tp, NULL, sctp_sk(sk)); |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index a5a402a7d21f..5d7f61d7559c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | |||
969 | return false; | 969 | return false; |
970 | } | 970 | } |
971 | 971 | ||
972 | static void xprt_alloc_slot(struct rpc_task *task) | 972 | void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) |
973 | { | 973 | { |
974 | struct rpc_xprt *xprt = task->tk_xprt; | ||
975 | struct rpc_rqst *req; | 974 | struct rpc_rqst *req; |
976 | 975 | ||
976 | spin_lock(&xprt->reserve_lock); | ||
977 | if (!list_empty(&xprt->free)) { | 977 | if (!list_empty(&xprt->free)) { |
978 | req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); | 978 | req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); |
979 | list_del(&req->rq_list); | 979 | list_del(&req->rq_list); |
@@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task) | |||
994 | default: | 994 | default: |
995 | task->tk_status = -EAGAIN; | 995 | task->tk_status = -EAGAIN; |
996 | } | 996 | } |
997 | spin_unlock(&xprt->reserve_lock); | ||
997 | return; | 998 | return; |
998 | out_init_req: | 999 | out_init_req: |
999 | task->tk_status = 0; | 1000 | task->tk_status = 0; |
1000 | task->tk_rqstp = req; | 1001 | task->tk_rqstp = req; |
1001 | xprt_request_init(task, xprt); | 1002 | xprt_request_init(task, xprt); |
1003 | spin_unlock(&xprt->reserve_lock); | ||
1004 | } | ||
1005 | EXPORT_SYMBOL_GPL(xprt_alloc_slot); | ||
1006 | |||
1007 | void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) | ||
1008 | { | ||
1009 | /* Note: grabbing the xprt_lock_write() ensures that we throttle | ||
1010 | * new slot allocation if the transport is congested (i.e. when | ||
1011 | * reconnecting a stream transport or when out of socket write | ||
1012 | * buffer space). | ||
1013 | */ | ||
1014 | if (xprt_lock_write(xprt, task)) { | ||
1015 | xprt_alloc_slot(xprt, task); | ||
1016 | xprt_release_write(xprt, task); | ||
1017 | } | ||
1002 | } | 1018 | } |
1019 | EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot); | ||
1003 | 1020 | ||
1004 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | 1021 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) |
1005 | { | 1022 | { |
@@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task) | |||
1083 | if (task->tk_rqstp != NULL) | 1100 | if (task->tk_rqstp != NULL) |
1084 | return; | 1101 | return; |
1085 | 1102 | ||
1086 | /* Note: grabbing the xprt_lock_write() here is not strictly needed, | ||
1087 | * but ensures that we throttle new slot allocation if the transport | ||
1088 | * is congested (e.g. if reconnecting or if we're out of socket | ||
1089 | * write buffer space). | ||
1090 | */ | ||
1091 | task->tk_timeout = 0; | 1103 | task->tk_timeout = 0; |
1092 | task->tk_status = -EAGAIN; | 1104 | task->tk_status = -EAGAIN; |
1093 | if (!xprt_lock_write(xprt, task)) | 1105 | xprt->ops->alloc_slot(xprt, task); |
1094 | return; | ||
1095 | |||
1096 | spin_lock(&xprt->reserve_lock); | ||
1097 | xprt_alloc_slot(task); | ||
1098 | spin_unlock(&xprt->reserve_lock); | ||
1099 | xprt_release_write(xprt, task); | ||
1100 | } | 1106 | } |
1101 | 1107 | ||
1102 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) | 1108 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 06cdbff79e4a..5d9202dc7cb1 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
713 | static struct rpc_xprt_ops xprt_rdma_procs = { | 713 | static struct rpc_xprt_ops xprt_rdma_procs = { |
714 | .reserve_xprt = xprt_rdma_reserve_xprt, | 714 | .reserve_xprt = xprt_rdma_reserve_xprt, |
715 | .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ | 715 | .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ |
716 | .alloc_slot = xprt_alloc_slot, | ||
716 | .release_request = xprt_release_rqst_cong, /* ditto */ | 717 | .release_request = xprt_release_rqst_cong, /* ditto */ |
717 | .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ | 718 | .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ |
718 | .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ | 719 | .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 400567243f84..a35b8e52e551 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2473,6 +2473,7 @@ static void bc_destroy(struct rpc_xprt *xprt) | |||
2473 | static struct rpc_xprt_ops xs_local_ops = { | 2473 | static struct rpc_xprt_ops xs_local_ops = { |
2474 | .reserve_xprt = xprt_reserve_xprt, | 2474 | .reserve_xprt = xprt_reserve_xprt, |
2475 | .release_xprt = xs_tcp_release_xprt, | 2475 | .release_xprt = xs_tcp_release_xprt, |
2476 | .alloc_slot = xprt_alloc_slot, | ||
2476 | .rpcbind = xs_local_rpcbind, | 2477 | .rpcbind = xs_local_rpcbind, |
2477 | .set_port = xs_local_set_port, | 2478 | .set_port = xs_local_set_port, |
2478 | .connect = xs_connect, | 2479 | .connect = xs_connect, |
@@ -2489,6 +2490,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
2489 | .set_buffer_size = xs_udp_set_buffer_size, | 2490 | .set_buffer_size = xs_udp_set_buffer_size, |
2490 | .reserve_xprt = xprt_reserve_xprt_cong, | 2491 | .reserve_xprt = xprt_reserve_xprt_cong, |
2491 | .release_xprt = xprt_release_xprt_cong, | 2492 | .release_xprt = xprt_release_xprt_cong, |
2493 | .alloc_slot = xprt_alloc_slot, | ||
2492 | .rpcbind = rpcb_getport_async, | 2494 | .rpcbind = rpcb_getport_async, |
2493 | .set_port = xs_set_port, | 2495 | .set_port = xs_set_port, |
2494 | .connect = xs_connect, | 2496 | .connect = xs_connect, |
@@ -2506,6 +2508,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
2506 | static struct rpc_xprt_ops xs_tcp_ops = { | 2508 | static struct rpc_xprt_ops xs_tcp_ops = { |
2507 | .reserve_xprt = xprt_reserve_xprt, | 2509 | .reserve_xprt = xprt_reserve_xprt, |
2508 | .release_xprt = xs_tcp_release_xprt, | 2510 | .release_xprt = xs_tcp_release_xprt, |
2511 | .alloc_slot = xprt_lock_and_alloc_slot, | ||
2509 | .rpcbind = rpcb_getport_async, | 2512 | .rpcbind = rpcb_getport_async, |
2510 | .set_port = xs_set_port, | 2513 | .set_port = xs_set_port, |
2511 | .connect = xs_connect, | 2514 | .connect = xs_connect, |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 97026f3b215a..1e37dbf00cb3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -5633,8 +5633,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) | |||
5633 | sizeof(connect.ht_capa_mask)); | 5633 | sizeof(connect.ht_capa_mask)); |
5634 | 5634 | ||
5635 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { | 5635 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { |
5636 | if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) | 5636 | if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) { |
5637 | kfree(connkeys); | ||
5637 | return -EINVAL; | 5638 | return -EINVAL; |
5639 | } | ||
5638 | memcpy(&connect.ht_capa, | 5640 | memcpy(&connect.ht_capa, |
5639 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), | 5641 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), |
5640 | sizeof(connect.ht_capa)); | 5642 | sizeof(connect.ht_capa)); |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 54a0dc2e2f8d..ab2bb42fe094 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -212,7 +212,7 @@ resume: | |||
212 | /* only the first xfrm gets the encap type */ | 212 | /* only the first xfrm gets the encap type */ |
213 | encap_type = 0; | 213 | encap_type = 0; |
214 | 214 | ||
215 | if (async && x->repl->check(x, skb, seq)) { | 215 | if (async && x->repl->recheck(x, skb, seq)) { |
216 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); | 216 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); |
217 | goto drop_unlock; | 217 | goto drop_unlock; |
218 | } | 218 | } |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5a2aa17e4d3c..387848e90078 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -585,6 +585,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
585 | xfrm_pol_hold(policy); | 585 | xfrm_pol_hold(policy); |
586 | net->xfrm.policy_count[dir]++; | 586 | net->xfrm.policy_count[dir]++; |
587 | atomic_inc(&flow_cache_genid); | 587 | atomic_inc(&flow_cache_genid); |
588 | rt_genid_bump(net); | ||
588 | if (delpol) | 589 | if (delpol) |
589 | __xfrm_policy_unlink(delpol, dir); | 590 | __xfrm_policy_unlink(delpol, dir); |
590 | policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); | 591 | policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); |
@@ -1763,7 +1764,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family, | |||
1763 | 1764 | ||
1764 | if (!afinfo) { | 1765 | if (!afinfo) { |
1765 | dst_release(dst_orig); | 1766 | dst_release(dst_orig); |
1766 | ret = ERR_PTR(-EINVAL); | 1767 | return ERR_PTR(-EINVAL); |
1767 | } else { | 1768 | } else { |
1768 | ret = afinfo->blackhole_route(net, dst_orig); | 1769 | ret = afinfo->blackhole_route(net, dst_orig); |
1769 | } | 1770 | } |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 2f6d11d04a2b..3efb07d3eb27 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -420,6 +420,18 @@ err: | |||
420 | return -EINVAL; | 420 | return -EINVAL; |
421 | } | 421 | } |
422 | 422 | ||
423 | static int xfrm_replay_recheck_esn(struct xfrm_state *x, | ||
424 | struct sk_buff *skb, __be32 net_seq) | ||
425 | { | ||
426 | if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi != | ||
427 | htonl(xfrm_replay_seqhi(x, net_seq)))) { | ||
428 | x->stats.replay_window++; | ||
429 | return -EINVAL; | ||
430 | } | ||
431 | |||
432 | return xfrm_replay_check_esn(x, skb, net_seq); | ||
433 | } | ||
434 | |||
423 | static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) | 435 | static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) |
424 | { | 436 | { |
425 | unsigned int bitnr, nr, i; | 437 | unsigned int bitnr, nr, i; |
@@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) | |||
479 | static struct xfrm_replay xfrm_replay_legacy = { | 491 | static struct xfrm_replay xfrm_replay_legacy = { |
480 | .advance = xfrm_replay_advance, | 492 | .advance = xfrm_replay_advance, |
481 | .check = xfrm_replay_check, | 493 | .check = xfrm_replay_check, |
494 | .recheck = xfrm_replay_check, | ||
482 | .notify = xfrm_replay_notify, | 495 | .notify = xfrm_replay_notify, |
483 | .overflow = xfrm_replay_overflow, | 496 | .overflow = xfrm_replay_overflow, |
484 | }; | 497 | }; |
@@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = { | |||
486 | static struct xfrm_replay xfrm_replay_bmp = { | 499 | static struct xfrm_replay xfrm_replay_bmp = { |
487 | .advance = xfrm_replay_advance_bmp, | 500 | .advance = xfrm_replay_advance_bmp, |
488 | .check = xfrm_replay_check_bmp, | 501 | .check = xfrm_replay_check_bmp, |
502 | .recheck = xfrm_replay_check_bmp, | ||
489 | .notify = xfrm_replay_notify_bmp, | 503 | .notify = xfrm_replay_notify_bmp, |
490 | .overflow = xfrm_replay_overflow_bmp, | 504 | .overflow = xfrm_replay_overflow_bmp, |
491 | }; | 505 | }; |
@@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = { | |||
493 | static struct xfrm_replay xfrm_replay_esn = { | 507 | static struct xfrm_replay xfrm_replay_esn = { |
494 | .advance = xfrm_replay_advance_esn, | 508 | .advance = xfrm_replay_advance_esn, |
495 | .check = xfrm_replay_check_esn, | 509 | .check = xfrm_replay_check_esn, |
510 | .recheck = xfrm_replay_recheck_esn, | ||
496 | .notify = xfrm_replay_notify_bmp, | 511 | .notify = xfrm_replay_notify_bmp, |
497 | .overflow = xfrm_replay_overflow_esn, | 512 | .overflow = xfrm_replay_overflow_esn, |
498 | }; | 513 | }; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e75d8e47f35c..289f4bf18ff0 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p, | |||
123 | struct nlattr **attrs) | 123 | struct nlattr **attrs) |
124 | { | 124 | { |
125 | struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; | 125 | struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; |
126 | struct xfrm_replay_state_esn *rs; | ||
126 | 127 | ||
127 | if ((p->flags & XFRM_STATE_ESN) && !rt) | 128 | if (p->flags & XFRM_STATE_ESN) { |
128 | return -EINVAL; | 129 | if (!rt) |
130 | return -EINVAL; | ||
131 | |||
132 | rs = nla_data(rt); | ||
133 | |||
134 | if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) | ||
135 | return -EINVAL; | ||
136 | |||
137 | if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && | ||
138 | nla_len(rt) != sizeof(*rs)) | ||
139 | return -EINVAL; | ||
140 | } | ||
129 | 141 | ||
130 | if (!rt) | 142 | if (!rt) |
131 | return 0; | 143 | return 0; |
@@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es | |||
370 | struct nlattr *rp) | 382 | struct nlattr *rp) |
371 | { | 383 | { |
372 | struct xfrm_replay_state_esn *up; | 384 | struct xfrm_replay_state_esn *up; |
385 | int ulen; | ||
373 | 386 | ||
374 | if (!replay_esn || !rp) | 387 | if (!replay_esn || !rp) |
375 | return 0; | 388 | return 0; |
376 | 389 | ||
377 | up = nla_data(rp); | 390 | up = nla_data(rp); |
391 | ulen = xfrm_replay_state_esn_len(up); | ||
378 | 392 | ||
379 | if (xfrm_replay_state_esn_len(replay_esn) != | 393 | if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) |
380 | xfrm_replay_state_esn_len(up)) | ||
381 | return -EINVAL; | 394 | return -EINVAL; |
382 | 395 | ||
383 | return 0; | 396 | return 0; |
@@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn | |||
388 | struct nlattr *rta) | 401 | struct nlattr *rta) |
389 | { | 402 | { |
390 | struct xfrm_replay_state_esn *p, *pp, *up; | 403 | struct xfrm_replay_state_esn *p, *pp, *up; |
404 | int klen, ulen; | ||
391 | 405 | ||
392 | if (!rta) | 406 | if (!rta) |
393 | return 0; | 407 | return 0; |
394 | 408 | ||
395 | up = nla_data(rta); | 409 | up = nla_data(rta); |
410 | klen = xfrm_replay_state_esn_len(up); | ||
411 | ulen = nla_len(rta) >= klen ? klen : sizeof(*up); | ||
396 | 412 | ||
397 | p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); | 413 | p = kzalloc(klen, GFP_KERNEL); |
398 | if (!p) | 414 | if (!p) |
399 | return -ENOMEM; | 415 | return -ENOMEM; |
400 | 416 | ||
401 | pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); | 417 | pp = kzalloc(klen, GFP_KERNEL); |
402 | if (!pp) { | 418 | if (!pp) { |
403 | kfree(p); | 419 | kfree(p); |
404 | return -ENOMEM; | 420 | return -ENOMEM; |
405 | } | 421 | } |
406 | 422 | ||
423 | memcpy(p, up, ulen); | ||
424 | memcpy(pp, up, ulen); | ||
425 | |||
407 | *replay_esn = p; | 426 | *replay_esn = p; |
408 | *preplay_esn = pp; | 427 | *preplay_esn = pp; |
409 | 428 | ||
@@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info * | |||
442 | * somehow made shareable and move it to xfrm_state.c - JHS | 461 | * somehow made shareable and move it to xfrm_state.c - JHS |
443 | * | 462 | * |
444 | */ | 463 | */ |
445 | static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) | 464 | static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, |
465 | int update_esn) | ||
446 | { | 466 | { |
447 | struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; | 467 | struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; |
448 | struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; | 468 | struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; |
449 | struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; | 469 | struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; |
450 | struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; | 470 | struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; |
451 | struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; | 471 | struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; |
@@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, | |||
555 | goto error; | 575 | goto error; |
556 | 576 | ||
557 | /* override default values from above */ | 577 | /* override default values from above */ |
558 | xfrm_update_ae_params(x, attrs); | 578 | xfrm_update_ae_params(x, attrs, 0); |
559 | 579 | ||
560 | return x; | 580 | return x; |
561 | 581 | ||
@@ -689,6 +709,7 @@ out: | |||
689 | 709 | ||
690 | static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) | 710 | static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) |
691 | { | 711 | { |
712 | memset(p, 0, sizeof(*p)); | ||
692 | memcpy(&p->id, &x->id, sizeof(p->id)); | 713 | memcpy(&p->id, &x->id, sizeof(p->id)); |
693 | memcpy(&p->sel, &x->sel, sizeof(p->sel)); | 714 | memcpy(&p->sel, &x->sel, sizeof(p->sel)); |
694 | memcpy(&p->lft, &x->lft, sizeof(p->lft)); | 715 | memcpy(&p->lft, &x->lft, sizeof(p->lft)); |
@@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) | |||
742 | return -EMSGSIZE; | 763 | return -EMSGSIZE; |
743 | 764 | ||
744 | algo = nla_data(nla); | 765 | algo = nla_data(nla); |
745 | strcpy(algo->alg_name, auth->alg_name); | 766 | strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); |
746 | memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); | 767 | memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); |
747 | algo->alg_key_len = auth->alg_key_len; | 768 | algo->alg_key_len = auth->alg_key_len; |
748 | 769 | ||
@@ -878,6 +899,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, | |||
878 | { | 899 | { |
879 | struct xfrm_dump_info info; | 900 | struct xfrm_dump_info info; |
880 | struct sk_buff *skb; | 901 | struct sk_buff *skb; |
902 | int err; | ||
881 | 903 | ||
882 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | 904 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
883 | if (!skb) | 905 | if (!skb) |
@@ -888,9 +910,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, | |||
888 | info.nlmsg_seq = seq; | 910 | info.nlmsg_seq = seq; |
889 | info.nlmsg_flags = 0; | 911 | info.nlmsg_flags = 0; |
890 | 912 | ||
891 | if (dump_one_state(x, 0, &info)) { | 913 | err = dump_one_state(x, 0, &info); |
914 | if (err) { | ||
892 | kfree_skb(skb); | 915 | kfree_skb(skb); |
893 | return NULL; | 916 | return ERR_PTR(err); |
894 | } | 917 | } |
895 | 918 | ||
896 | return skb; | 919 | return skb; |
@@ -1317,6 +1340,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy | |||
1317 | 1340 | ||
1318 | static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) | 1341 | static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) |
1319 | { | 1342 | { |
1343 | memset(p, 0, sizeof(*p)); | ||
1320 | memcpy(&p->sel, &xp->selector, sizeof(p->sel)); | 1344 | memcpy(&p->sel, &xp->selector, sizeof(p->sel)); |
1321 | memcpy(&p->lft, &xp->lft, sizeof(p->lft)); | 1345 | memcpy(&p->lft, &xp->lft, sizeof(p->lft)); |
1322 | memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); | 1346 | memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); |
@@ -1421,6 +1445,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) | |||
1421 | struct xfrm_user_tmpl *up = &vec[i]; | 1445 | struct xfrm_user_tmpl *up = &vec[i]; |
1422 | struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; | 1446 | struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; |
1423 | 1447 | ||
1448 | memset(up, 0, sizeof(*up)); | ||
1424 | memcpy(&up->id, &kp->id, sizeof(up->id)); | 1449 | memcpy(&up->id, &kp->id, sizeof(up->id)); |
1425 | up->family = kp->encap_family; | 1450 | up->family = kp->encap_family; |
1426 | memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); | 1451 | memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); |
@@ -1546,6 +1571,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, | |||
1546 | { | 1571 | { |
1547 | struct xfrm_dump_info info; | 1572 | struct xfrm_dump_info info; |
1548 | struct sk_buff *skb; | 1573 | struct sk_buff *skb; |
1574 | int err; | ||
1549 | 1575 | ||
1550 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 1576 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
1551 | if (!skb) | 1577 | if (!skb) |
@@ -1556,9 +1582,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, | |||
1556 | info.nlmsg_seq = seq; | 1582 | info.nlmsg_seq = seq; |
1557 | info.nlmsg_flags = 0; | 1583 | info.nlmsg_flags = 0; |
1558 | 1584 | ||
1559 | if (dump_one_policy(xp, dir, 0, &info) < 0) { | 1585 | err = dump_one_policy(xp, dir, 0, &info); |
1586 | if (err) { | ||
1560 | kfree_skb(skb); | 1587 | kfree_skb(skb); |
1561 | return NULL; | 1588 | return ERR_PTR(err); |
1562 | } | 1589 | } |
1563 | 1590 | ||
1564 | return skb; | 1591 | return skb; |
@@ -1822,7 +1849,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1822 | goto out; | 1849 | goto out; |
1823 | 1850 | ||
1824 | spin_lock_bh(&x->lock); | 1851 | spin_lock_bh(&x->lock); |
1825 | xfrm_update_ae_params(x, attrs); | 1852 | xfrm_update_ae_params(x, attrs, 1); |
1826 | spin_unlock_bh(&x->lock); | 1853 | spin_unlock_bh(&x->lock); |
1827 | 1854 | ||
1828 | c.event = nlh->nlmsg_type; | 1855 | c.event = nlh->nlmsg_type; |