aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-26 00:08:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-26 00:08:24 -0400
commitf40ede392d49b14c23418c4802ad078379667f7d (patch)
treec8f230903ddbad68e0e7ebe71c253b9ac72e541a /net
parentec71feae064cf0e06ad89f9a2fc573de405ad49e (diff)
parentde843723f9b989178762196fb24dd050cbe20ca3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix crash in ipvs tot_stats estimator, from Julian Anastasov. 2) Fix OOPS in nf_nat on netns removal, from Florian Westphal. 3) Really really really fix locking issues in slip and slcan tty write wakeups, from Tyler Hall. 4) Fix checksum offloading in fec driver, from Fugang Duan. 5) Off by one in BPF instruction limit test, from Kees Cook. 6) Need to clear all TSO capability flags when doing software TSO in tg3 driver, from Prashant Sreedharan. 7) Fix memory leak in vlan_reorder_header() error path, from Li RongQing. 8) Fix various bugs in xen-netfront and xen-netback multiqueue support, from David Vrabel and Wei Liu. 9) Fix deadlock in cxgb4 driver, from Li RongQing. 10) Prevent double free of no-cache DST entries, from Eric Dumazet. 11) Bad csum_start handling in skb_segment() leads to crashes when forwarding, from Tom Herbert. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits) net: fix setting csum_start in skb_segment() ipv4: fix dst race in sk_dst_get() net: filter: Use kcalloc/kmalloc_array to allocate arrays trivial: net: filter: Change kerneldoc parameter order trivial: net: filter: Fix typo in comment net: allwinner: emac: Add missing free_irq cxgb4: use dev_port to identify ports xen-netback: bookkeep number of active queues in our own module tg3: Change nvram command timeout value to 50ms cxgb4: Not need to hold the adap_rcu_lock lock when read adap_rcu_list be2net: fix qnq mode detection on VFs of: mdio: fixup of_phy_register_fixed_link parsing of new bindings at86rf230: fix irq setup net: phy: at803x: fix coccinelle warnings net/mlx4_core: Fix the error flow when probing with invalid VF configuration tulip: Poll link status more frequently for Comet chips net: huawei_cdc_ncm: increase command buffer size drivers: net: cpsw: fix dual EMAC stall when connected to same switch xen-netfront: recreate queues correctly when reconnecting xen-netfront: fix oops when disconnected from backend ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c5
-rw-r--r--net/bluetooth/hci_conn.c7
-rw-r--r--net/bluetooth/hci_event.c17
-rw-r--r--net/bluetooth/l2cap_core.c8
-rw-r--r--net/bluetooth/l2cap_sock.c5
-rw-r--r--net/bluetooth/mgmt.c104
-rw-r--r--net/bluetooth/smp.c9
-rw-r--r--net/core/dst.c16
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/ip_tunnel.c14
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c20
-rw-r--r--net/netfilter/nf_nat_core.c35
-rw-r--r--net/netfilter/nf_tables_api.c11
-rw-r--r--net/netfilter/nft_compat.c18
-rw-r--r--net/netfilter/nft_nat.c14
-rw-r--r--net/sctp/sysctl.c46
20 files changed, 219 insertions, 128 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 9012b1c922b6..75d427763992 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
114 114
115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) 115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
116{ 116{
117 if (skb_cow(skb, skb_headroom(skb)) < 0) 117 if (skb_cow(skb, skb_headroom(skb)) < 0) {
118 kfree_skb(skb);
118 return NULL; 119 return NULL;
120 }
121
119 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 122 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
120 skb->mac_header += VLAN_HLEN; 123 skb->mac_header += VLAN_HLEN;
121 return skb; 124 return skb;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 8671bc79a35b..ca01d1861854 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -610,11 +610,6 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
610 if (hci_update_random_address(req, false, &own_addr_type)) 610 if (hci_update_random_address(req, false, &own_addr_type))
611 return; 611 return;
612 612
613 /* Save the address type used for this connnection attempt so we able
614 * to retrieve this information if we need it.
615 */
616 conn->src_type = own_addr_type;
617
618 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); 613 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
619 cp.scan_window = cpu_to_le16(hdev->le_scan_window); 614 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
620 bacpy(&cp.peer_addr, &conn->dst); 615 bacpy(&cp.peer_addr, &conn->dst);
@@ -894,7 +889,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
894 /* If we're already encrypted set the REAUTH_PEND flag, 889 /* If we're already encrypted set the REAUTH_PEND flag,
895 * otherwise set the ENCRYPT_PEND. 890 * otherwise set the ENCRYPT_PEND.
896 */ 891 */
897 if (conn->key_type != 0xff) 892 if (conn->link_mode & HCI_LM_ENCRYPT)
898 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 893 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
899 else 894 else
900 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 895 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 21e5913d12e0..640c54ec1bd2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -48,6 +48,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY); 49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 50
51 hci_dev_lock(hdev);
52 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
53 hci_dev_unlock(hdev);
54
51 hci_conn_check_pending(hdev); 55 hci_conn_check_pending(hdev);
52} 56}
53 57
@@ -3537,7 +3541,11 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3537 cp.authentication = conn->auth_type; 3541 cp.authentication = conn->auth_type;
3538 3542
3539 /* Request MITM protection if our IO caps allow it 3543 /* Request MITM protection if our IO caps allow it
3540 * except for the no-bonding case 3544 * except for the no-bonding case.
3545 * conn->auth_type is not updated here since
3546 * that might cause the user confirmation to be
3547 * rejected in case the remote doesn't have the
3548 * IO capabilities for MITM.
3541 */ 3549 */
3542 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 3550 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3543 cp.authentication != HCI_AT_NO_BONDING) 3551 cp.authentication != HCI_AT_NO_BONDING)
@@ -3628,8 +3636,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3628 3636
3629 /* If we're not the initiators request authorization to 3637 /* If we're not the initiators request authorization to
3630 * proceed from user space (mgmt_user_confirm with 3638 * proceed from user space (mgmt_user_confirm with
3631 * confirm_hint set to 1). */ 3639 * confirm_hint set to 1). The exception is if neither
3632 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3640 * side had MITM in which case we do auto-accept.
3641 */
3642 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3643 (loc_mitm || rem_mitm)) {
3633 BT_DBG("Confirming auto-accept as acceptor"); 3644 BT_DBG("Confirming auto-accept as acceptor");
3634 confirm_hint = 1; 3645 confirm_hint = 1;
3635 goto confirm; 3646 goto confirm;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6eabbe05fe54..323f23cd2c37 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1663,7 +1663,13 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1663 kfree_skb(conn->rx_skb); 1663 kfree_skb(conn->rx_skb);
1664 1664
1665 skb_queue_purge(&conn->pending_rx); 1665 skb_queue_purge(&conn->pending_rx);
1666 flush_work(&conn->pending_rx_work); 1666
1667 /* We can not call flush_work(&conn->pending_rx_work) here since we
1668 * might block if we are running on a worker from the same workqueue
1669 * pending_rx_work is waiting on.
1670 */
1671 if (work_pending(&conn->pending_rx_work))
1672 cancel_work_sync(&conn->pending_rx_work);
1667 1673
1668 l2cap_unregister_all_users(conn); 1674 l2cap_unregister_all_users(conn);
1669 1675
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index ade3fb4c23bc..e1378693cc90 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -787,11 +787,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
787 787
788 /*change security for LE channels */ 788 /*change security for LE channels */
789 if (chan->scid == L2CAP_CID_ATT) { 789 if (chan->scid == L2CAP_CID_ATT) {
790 if (!conn->hcon->out) {
791 err = -EINVAL;
792 break;
793 }
794
795 if (smp_conn_security(conn->hcon, sec.level)) 790 if (smp_conn_security(conn->hcon, sec.level))
796 break; 791 break;
797 sk->sk_state = BT_CONFIG; 792 sk->sk_state = BT_CONFIG;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 0fce54412ffd..af8e0a6243b7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1047,6 +1047,43 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1047 } 1047 }
1048} 1048}
1049 1049
1050static void hci_stop_discovery(struct hci_request *req)
1051{
1052 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp;
1054 struct inquiry_entry *e;
1055
1056 switch (hdev->discovery.state) {
1057 case DISCOVERY_FINDING:
1058 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1059 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1060 } else {
1061 cancel_delayed_work(&hdev->le_scan_disable);
1062 hci_req_add_le_scan_disable(req);
1063 }
1064
1065 break;
1066
1067 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 NAME_PENDING);
1070 if (!e)
1071 return;
1072
1073 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 &cp);
1076
1077 break;
1078
1079 default:
1080 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1082 hci_req_add_le_scan_disable(req);
1083 break;
1084 }
1085}
1086
1050static int clean_up_hci_state(struct hci_dev *hdev) 1087static int clean_up_hci_state(struct hci_dev *hdev)
1051{ 1088{
1052 struct hci_request req; 1089 struct hci_request req;
@@ -1063,9 +1100,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1063 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1064 disable_advertising(&req); 1101 disable_advertising(&req);
1065 1102
1066 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { 1103 hci_stop_discovery(&req);
1067 hci_req_add_le_scan_disable(&req);
1068 }
1069 1104
1070 list_for_each_entry(conn, &hdev->conn_hash.list, list) { 1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1071 struct hci_cp_disconnect dc; 1106 struct hci_cp_disconnect dc;
@@ -2996,8 +3031,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2996 } 3031 }
2997 3032
2998 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 3033 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2999 /* Continue with pairing via SMP */ 3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3036 * purposes.
3037 */
3038 hci_dev_unlock(hdev);
3000 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 3039 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 hci_dev_lock(hdev);
3001 3041
3002 if (!err) 3042 if (!err)
3003 err = cmd_complete(sk, hdev->id, mgmt_op, 3043 err = cmd_complete(sk, hdev->id, mgmt_op,
@@ -3574,8 +3614,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3574{ 3614{
3575 struct mgmt_cp_stop_discovery *mgmt_cp = data; 3615 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3576 struct pending_cmd *cmd; 3616 struct pending_cmd *cmd;
3577 struct hci_cp_remote_name_req_cancel cp;
3578 struct inquiry_entry *e;
3579 struct hci_request req; 3617 struct hci_request req;
3580 int err; 3618 int err;
3581 3619
@@ -3605,52 +3643,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3605 3643
3606 hci_req_init(&req, hdev); 3644 hci_req_init(&req, hdev);
3607 3645
3608 switch (hdev->discovery.state) { 3646 hci_stop_discovery(&req);
3609 case DISCOVERY_FINDING:
3610 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3611 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3612 } else {
3613 cancel_delayed_work(&hdev->le_scan_disable);
3614
3615 hci_req_add_le_scan_disable(&req);
3616 }
3617
3618 break;
3619 3647
3620 case DISCOVERY_RESOLVING: 3648 err = hci_req_run(&req, stop_discovery_complete);
3621 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 3649 if (!err) {
3622 NAME_PENDING); 3650 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3623 if (!e) {
3624 mgmt_pending_remove(cmd);
3625 err = cmd_complete(sk, hdev->id,
3626 MGMT_OP_STOP_DISCOVERY, 0,
3627 &mgmt_cp->type,
3628 sizeof(mgmt_cp->type));
3629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3630 goto unlock;
3631 }
3632
3633 bacpy(&cp.bdaddr, &e->data.bdaddr);
3634 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3635 &cp);
3636
3637 break;
3638
3639 default:
3640 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3641
3642 mgmt_pending_remove(cmd);
3643 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3644 MGMT_STATUS_FAILED, &mgmt_cp->type,
3645 sizeof(mgmt_cp->type));
3646 goto unlock; 3651 goto unlock;
3647 } 3652 }
3648 3653
3649 err = hci_req_run(&req, stop_discovery_complete); 3654 mgmt_pending_remove(cmd);
3650 if (err < 0) 3655
3651 mgmt_pending_remove(cmd); 3656 /* If no HCI commands were sent we're done */
3652 else 3657 if (err == -ENODATA) {
3653 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 3658 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3659 &mgmt_cp->type, sizeof(mgmt_cp->type));
3660 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3661 }
3654 3662
3655unlock: 3663unlock:
3656 hci_dev_unlock(hdev); 3664 hci_dev_unlock(hdev);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d1cc164557d..f2829a7932e2 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -544,7 +544,7 @@ static u8 smp_random(struct smp_chan *smp)
544 hci_le_start_enc(hcon, ediv, rand, stk); 544 hci_le_start_enc(hcon, ediv, rand, stk);
545 hcon->enc_key_size = smp->enc_key_size; 545 hcon->enc_key_size = smp->enc_key_size;
546 } else { 546 } else {
547 u8 stk[16]; 547 u8 stk[16], auth;
548 __le64 rand = 0; 548 __le64 rand = 0;
549 __le16 ediv = 0; 549 __le16 ediv = 0;
550 550
@@ -556,8 +556,13 @@ static u8 smp_random(struct smp_chan *smp)
556 memset(stk + smp->enc_key_size, 0, 556 memset(stk + smp->enc_key_size, 0,
557 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 557 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
558 558
559 if (hcon->pending_sec_level == BT_SECURITY_HIGH)
560 auth = 1;
561 else
562 auth = 0;
563
559 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, 564 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
560 HCI_SMP_STK_SLAVE, 0, stk, smp->enc_key_size, 565 HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
561 ediv, rand); 566 ediv, rand);
562 } 567 }
563 568
diff --git a/net/core/dst.c b/net/core/dst.c
index 80d6286c8b62..a028409ee438 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -269,6 +269,15 @@ again:
269} 269}
270EXPORT_SYMBOL(dst_destroy); 270EXPORT_SYMBOL(dst_destroy);
271 271
272static void dst_destroy_rcu(struct rcu_head *head)
273{
274 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
275
276 dst = dst_destroy(dst);
277 if (dst)
278 __dst_free(dst);
279}
280
272void dst_release(struct dst_entry *dst) 281void dst_release(struct dst_entry *dst)
273{ 282{
274 if (dst) { 283 if (dst) {
@@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
276 285
277 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
278 WARN_ON(newrefcnt < 0); 287 WARN_ON(newrefcnt < 0);
279 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { 288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
280 dst = dst_destroy(dst); 289 call_rcu(&dst->rcu_head, dst_destroy_rcu);
281 if (dst)
282 __dst_free(dst);
283 }
284 } 290 }
285} 291}
286EXPORT_SYMBOL(dst_release); 292EXPORT_SYMBOL(dst_release);
diff --git a/net/core/filter.c b/net/core/filter.c
index 735fad897496..1dbf6462f766 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -840,11 +840,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
842 842
843 if (len <= 0 || len >= BPF_MAXINSNS) 843 if (len <= 0 || len > BPF_MAXINSNS)
844 return -EINVAL; 844 return -EINVAL;
845 845
846 if (new_prog) { 846 if (new_prog) {
847 addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL); 847 addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
848 if (!addrs) 848 if (!addrs)
849 return -ENOMEM; 849 return -ENOMEM;
850 } 850 }
@@ -1101,7 +1101,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
1101 1101
1102 BUILD_BUG_ON(BPF_MEMWORDS > 16); 1102 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1103 1103
1104 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); 1104 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
1105 if (!masks) 1105 if (!masks)
1106 return -ENOMEM; 1106 return -ENOMEM;
1107 1107
@@ -1382,7 +1382,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
1383 if (fp_new) { 1383 if (fp_new) {
1384 *fp_new = *fp; 1384 *fp_new = *fp;
1385 /* As we're kepping orig_prog in fp_new along, 1385 /* As we're keeping orig_prog in fp_new along,
1386 * we need to make sure we're not evicting it 1386 * we need to make sure we're not evicting it
1387 * from the old fp. 1387 * from the old fp.
1388 */ 1388 */
@@ -1524,8 +1524,8 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1524 1524
1525/** 1525/**
1526 * sk_unattached_filter_create - create an unattached filter 1526 * sk_unattached_filter_create - create an unattached filter
1527 * @fprog: the filter program
1528 * @pfp: the unattached filter that is created 1527 * @pfp: the unattached filter that is created
1528 * @fprog: the filter program
1529 * 1529 *
1530 * Create a filter independent of any socket. We first run some 1530 * Create a filter independent of any socket. We first run some
1531 * sanity checks on it to make sure it does not explode on us later. 1531 * sanity checks on it to make sure it does not explode on us later.
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9cd5344fad73..c1a33033cbe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2993 skb_put(nskb, len), 2993 skb_put(nskb, len),
2994 len, 0); 2994 len, 0);
2995 SKB_GSO_CB(nskb)->csum_start = 2995 SKB_GSO_CB(nskb)->csum_start =
2996 skb_headroom(nskb) + offset; 2996 skb_headroom(nskb) + doffset;
2997 continue; 2997 continue;
2998 } 2998 }
2999 2999
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 097b3e7c1e8f..54b6731dab55 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
73{ 73{
74 struct dst_entry *old_dst; 74 struct dst_entry *old_dst;
75 75
76 if (dst) { 76 dst_clone(dst);
77 if (dst->flags & DST_NOCACHE)
78 dst = NULL;
79 else
80 dst_clone(dst);
81 }
82 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst); 77 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
83 dst_release(old_dst); 78 dst_release(old_dst);
84} 79}
@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
108 103
109 rcu_read_lock(); 104 rcu_read_lock();
110 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); 105 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
106 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
107 dst = NULL;
111 if (dst) { 108 if (dst) {
112 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 109 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
113 rcu_read_unlock();
114 tunnel_dst_reset(t); 110 tunnel_dst_reset(t);
115 return NULL; 111 dst_release(dst);
112 dst = NULL;
116 } 113 }
117 dst_hold(dst);
118 } 114 }
119 rcu_read_unlock(); 115 rcu_read_unlock();
120 return (struct rtable *)dst; 116 return (struct rtable *)dst;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 62e48cf84e60..9771563ab564 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -131,7 +131,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
131 struct dst_entry *dst, 131 struct dst_entry *dst,
132 struct request_sock *req) 132 struct request_sock *req)
133{ 133{
134 struct tcp_sock *tp = tcp_sk(sk); 134 struct tcp_sock *tp;
135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
136 struct sock *child; 136 struct sock *child;
137 137
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 40661fc1e233..b5c23756965a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1162 unsigned int new_len = (pkt_len / mss) * mss; 1162 unsigned int new_len = (pkt_len / mss) * mss;
1163 if (!in_sack && new_len < pkt_len) { 1163 if (!in_sack && new_len < pkt_len) {
1164 new_len += mss; 1164 new_len += mss;
1165 if (new_len > skb->len) 1165 if (new_len >= skb->len)
1166 return 0; 1166 return 0;
1167 } 1167 }
1168 pkt_len = new_len; 1168 pkt_len = new_len;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c42e83d2751c..581a6584ed0c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3778,6 +3778,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3778 cancel_delayed_work_sync(&ipvs->defense_work); 3778 cancel_delayed_work_sync(&ipvs->defense_work);
3779 cancel_work_sync(&ipvs->defense_work.work); 3779 cancel_work_sync(&ipvs->defense_work.work);
3780 unregister_net_sysctl_table(ipvs->sysctl_hdr); 3780 unregister_net_sysctl_table(ipvs->sysctl_hdr);
3781 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3781} 3782}
3782 3783
3783#else 3784#else
@@ -3840,7 +3841,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3840 struct netns_ipvs *ipvs = net_ipvs(net); 3841 struct netns_ipvs *ipvs = net_ipvs(net);
3841 3842
3842 ip_vs_trash_cleanup(net); 3843 ip_vs_trash_cleanup(net);
3843 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3844 ip_vs_control_net_cleanup_sysctl(net); 3844 ip_vs_control_net_cleanup_sysctl(net);
3845 remove_proc_entry("ip_vs_stats_percpu", net->proc_net); 3845 remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
3846 remove_proc_entry("ip_vs_stats", net->proc_net); 3846 remove_proc_entry("ip_vs_stats", net->proc_net);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 58579634427d..300ed1eec729 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -597,6 +597,9 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
597#ifdef CONFIG_NF_CONNTRACK_MARK 597#ifdef CONFIG_NF_CONNTRACK_MARK
598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
599#endif 599#endif
600#ifdef CONFIG_NF_CONNTRACK_ZONES
601 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
602#endif
600 + ctnetlink_proto_size(ct) 603 + ctnetlink_proto_size(ct)
601 + ctnetlink_label_size(ct) 604 + ctnetlink_label_size(ct)
602 ; 605 ;
@@ -1150,7 +1153,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
1150static int 1153static int
1151ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying) 1154ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1152{ 1155{
1153 struct nf_conn *ct, *last = NULL; 1156 struct nf_conn *ct, *last;
1154 struct nf_conntrack_tuple_hash *h; 1157 struct nf_conntrack_tuple_hash *h;
1155 struct hlist_nulls_node *n; 1158 struct hlist_nulls_node *n;
1156 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1159 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -1163,8 +1166,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
1163 if (cb->args[2]) 1166 if (cb->args[2])
1164 return 0; 1167 return 0;
1165 1168
1166 if (cb->args[0] == nr_cpu_ids) 1169 last = (struct nf_conn *)cb->args[1];
1167 return 0;
1168 1170
1169 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 1171 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1170 struct ct_pcpu *pcpu; 1172 struct ct_pcpu *pcpu;
@@ -1174,7 +1176,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
1174 1176
1175 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); 1177 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1176 spin_lock_bh(&pcpu->lock); 1178 spin_lock_bh(&pcpu->lock);
1177 last = (struct nf_conn *)cb->args[1];
1178 list = dying ? &pcpu->dying : &pcpu->unconfirmed; 1179 list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1179restart: 1180restart:
1180 hlist_nulls_for_each_entry(h, n, list, hnnode) { 1181 hlist_nulls_for_each_entry(h, n, list, hnnode) {
@@ -1193,7 +1194,9 @@ restart:
1193 ct); 1194 ct);
1194 rcu_read_unlock(); 1195 rcu_read_unlock();
1195 if (res < 0) { 1196 if (res < 0) {
1196 nf_conntrack_get(&ct->ct_general); 1197 if (!atomic_inc_not_zero(&ct->ct_general.use))
1198 continue;
1199 cb->args[0] = cpu;
1197 cb->args[1] = (unsigned long)ct; 1200 cb->args[1] = (unsigned long)ct;
1198 spin_unlock_bh(&pcpu->lock); 1201 spin_unlock_bh(&pcpu->lock);
1199 goto out; 1202 goto out;
@@ -1202,10 +1205,10 @@ restart:
1202 if (cb->args[1]) { 1205 if (cb->args[1]) {
1203 cb->args[1] = 0; 1206 cb->args[1] = 0;
1204 goto restart; 1207 goto restart;
1205 } else 1208 }
1206 cb->args[2] = 1;
1207 spin_unlock_bh(&pcpu->lock); 1209 spin_unlock_bh(&pcpu->lock);
1208 } 1210 }
1211 cb->args[2] = 1;
1209out: 1212out:
1210 if (last) 1213 if (last)
1211 nf_ct_put(last); 1214 nf_ct_put(last);
@@ -2040,6 +2043,9 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
2040#ifdef CONFIG_NF_CONNTRACK_MARK 2043#ifdef CONFIG_NF_CONNTRACK_MARK
2041 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 2044 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2042#endif 2045#endif
2046#ifdef CONFIG_NF_CONNTRACK_ZONES
2047 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
2048#endif
2043 + ctnetlink_proto_size(ct) 2049 + ctnetlink_proto_size(ct)
2044 ; 2050 ;
2045} 2051}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 09096a670c45..a49907b1dabc 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -525,6 +525,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
525 return i->status & IPS_NAT_MASK ? 1 : 0; 525 return i->status & IPS_NAT_MASK ? 1 : 0;
526} 526}
527 527
528static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
529{
530 struct nf_conn_nat *nat = nfct_nat(ct);
531
532 if (nf_nat_proto_remove(ct, data))
533 return 1;
534
535 if (!nat || !nat->ct)
536 return 0;
537
538 /* This netns is being destroyed, and conntrack has nat null binding.
539 * Remove it from bysource hash, as the table will be freed soon.
540 *
541 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
542 * will delete entry from already-freed table.
543 */
544 if (!del_timer(&ct->timeout))
545 return 1;
546
547 spin_lock_bh(&nf_nat_lock);
548 hlist_del_rcu(&nat->bysource);
549 ct->status &= ~IPS_NAT_DONE_MASK;
550 nat->ct = NULL;
551 spin_unlock_bh(&nf_nat_lock);
552
553 add_timer(&ct->timeout);
554
555 /* don't delete conntrack. Although that would make things a lot
556 * simpler, we'd end up flushing all conntracks on nat rmmod.
557 */
558 return 0;
559}
560
528static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 561static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
529{ 562{
530 struct nf_nat_proto_clean clean = { 563 struct nf_nat_proto_clean clean = {
@@ -795,7 +828,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
795{ 828{
796 struct nf_nat_proto_clean clean = {}; 829 struct nf_nat_proto_clean clean = {};
797 830
798 nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0); 831 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
799 synchronize_rcu(); 832 synchronize_rcu();
800 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); 833 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
801} 834}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 624e083125b9..ab4566cfcbe4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1730,6 +1730,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1730 if (!create || nlh->nlmsg_flags & NLM_F_REPLACE) 1730 if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
1731 return -EINVAL; 1731 return -EINVAL;
1732 handle = nf_tables_alloc_handle(table); 1732 handle = nf_tables_alloc_handle(table);
1733
1734 if (chain->use == UINT_MAX)
1735 return -EOVERFLOW;
1733 } 1736 }
1734 1737
1735 if (nla[NFTA_RULE_POSITION]) { 1738 if (nla[NFTA_RULE_POSITION]) {
@@ -1789,14 +1792,15 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1789 1792
1790 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 1793 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
1791 if (nft_rule_is_active_next(net, old_rule)) { 1794 if (nft_rule_is_active_next(net, old_rule)) {
1792 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, 1795 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
1793 old_rule); 1796 old_rule);
1794 if (trans == NULL) { 1797 if (trans == NULL) {
1795 err = -ENOMEM; 1798 err = -ENOMEM;
1796 goto err2; 1799 goto err2;
1797 } 1800 }
1798 nft_rule_disactivate_next(net, old_rule); 1801 nft_rule_disactivate_next(net, old_rule);
1799 list_add_tail(&rule->list, &old_rule->list); 1802 chain->use--;
1803 list_add_tail_rcu(&rule->list, &old_rule->list);
1800 } else { 1804 } else {
1801 err = -ENOENT; 1805 err = -ENOENT;
1802 goto err2; 1806 goto err2;
@@ -1826,6 +1830,7 @@ err3:
1826 list_del_rcu(&nft_trans_rule(trans)->list); 1830 list_del_rcu(&nft_trans_rule(trans)->list);
1827 nft_rule_clear(net, nft_trans_rule(trans)); 1831 nft_rule_clear(net, nft_trans_rule(trans));
1828 nft_trans_destroy(trans); 1832 nft_trans_destroy(trans);
1833 chain->use++;
1829 } 1834 }
1830err2: 1835err2:
1831 nf_tables_rule_destroy(&ctx, rule); 1836 nf_tables_rule_destroy(&ctx, rule);
@@ -2845,7 +2850,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
2845 goto nla_put_failure; 2850 goto nla_put_failure;
2846 2851
2847 nfmsg = nlmsg_data(nlh); 2852 nfmsg = nlmsg_data(nlh);
2848 nfmsg->nfgen_family = NFPROTO_UNSPEC; 2853 nfmsg->nfgen_family = ctx.afi->family;
2849 nfmsg->version = NFNETLINK_V0; 2854 nfmsg->version = NFNETLINK_V0;
2850 nfmsg->res_id = 0; 2855 nfmsg->res_id = 0;
2851 2856
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 8a779be832fb..1840989092ed 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -195,6 +195,15 @@ static void
195nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 195nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
196{ 196{
197 struct xt_target *target = expr->ops->data; 197 struct xt_target *target = expr->ops->data;
198 void *info = nft_expr_priv(expr);
199 struct xt_tgdtor_param par;
200
201 par.net = ctx->net;
202 par.target = target;
203 par.targinfo = info;
204 par.family = ctx->afi->family;
205 if (par.target->destroy != NULL)
206 par.target->destroy(&par);
198 207
199 module_put(target->me); 208 module_put(target->me);
200} 209}
@@ -382,6 +391,15 @@ static void
382nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 391nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
383{ 392{
384 struct xt_match *match = expr->ops->data; 393 struct xt_match *match = expr->ops->data;
394 void *info = nft_expr_priv(expr);
395 struct xt_mtdtor_param par;
396
397 par.net = ctx->net;
398 par.match = match;
399 par.matchinfo = info;
400 par.family = ctx->afi->family;
401 if (par.match->destroy != NULL)
402 par.match->destroy(&par);
385 403
386 module_put(match->me); 404 module_put(match->me);
387} 405}
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index a0195d28bcfc..79ff58cd36dc 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -175,12 +175,14 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
175 if (nla_put_be32(skb, 175 if (nla_put_be32(skb,
176 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) 176 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
177 goto nla_put_failure; 177 goto nla_put_failure;
178 if (nla_put_be32(skb, 178 if (priv->sreg_proto_min) {
179 NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min))) 179 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
180 goto nla_put_failure; 180 htonl(priv->sreg_proto_min)))
181 if (nla_put_be32(skb, 181 goto nla_put_failure;
182 NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max))) 182 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
183 goto nla_put_failure; 183 htonl(priv->sreg_proto_max)))
184 goto nla_put_failure;
185 }
184 return 0; 186 return 0;
185 187
186nla_put_failure: 188nla_put_failure:
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index dcb19592761e..12c7e01c2677 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -321,41 +321,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
321 loff_t *ppos) 321 loff_t *ppos)
322{ 322{
323 struct net *net = current->nsproxy->net_ns; 323 struct net *net = current->nsproxy->net_ns;
324 char tmp[8];
325 struct ctl_table tbl; 324 struct ctl_table tbl;
326 int ret; 325 bool changed = false;
327 int changed = 0;
328 char *none = "none"; 326 char *none = "none";
327 char tmp[8];
328 int ret;
329 329
330 memset(&tbl, 0, sizeof(struct ctl_table)); 330 memset(&tbl, 0, sizeof(struct ctl_table));
331 331
332 if (write) { 332 if (write) {
333 tbl.data = tmp; 333 tbl.data = tmp;
334 tbl.maxlen = 8; 334 tbl.maxlen = sizeof(tmp);
335 } else { 335 } else {
336 tbl.data = net->sctp.sctp_hmac_alg ? : none; 336 tbl.data = net->sctp.sctp_hmac_alg ? : none;
337 tbl.maxlen = strlen(tbl.data); 337 tbl.maxlen = strlen(tbl.data);
338 } 338 }
339 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
340 339
341 if (write) { 340 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
341 if (write && ret == 0) {
342#ifdef CONFIG_CRYPTO_MD5 342#ifdef CONFIG_CRYPTO_MD5
343 if (!strncmp(tmp, "md5", 3)) { 343 if (!strncmp(tmp, "md5", 3)) {
344 net->sctp.sctp_hmac_alg = "md5"; 344 net->sctp.sctp_hmac_alg = "md5";
345 changed = 1; 345 changed = true;
346 } 346 }
347#endif 347#endif
348#ifdef CONFIG_CRYPTO_SHA1 348#ifdef CONFIG_CRYPTO_SHA1
349 if (!strncmp(tmp, "sha1", 4)) { 349 if (!strncmp(tmp, "sha1", 4)) {
350 net->sctp.sctp_hmac_alg = "sha1"; 350 net->sctp.sctp_hmac_alg = "sha1";
351 changed = 1; 351 changed = true;
352 } 352 }
353#endif 353#endif
354 if (!strncmp(tmp, "none", 4)) { 354 if (!strncmp(tmp, "none", 4)) {
355 net->sctp.sctp_hmac_alg = NULL; 355 net->sctp.sctp_hmac_alg = NULL;
356 changed = 1; 356 changed = true;
357 } 357 }
358
359 if (!changed) 358 if (!changed)
360 ret = -EINVAL; 359 ret = -EINVAL;
361 } 360 }
@@ -368,11 +367,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
368 loff_t *ppos) 367 loff_t *ppos)
369{ 368{
370 struct net *net = current->nsproxy->net_ns; 369 struct net *net = current->nsproxy->net_ns;
371 int new_value;
372 struct ctl_table tbl;
373 unsigned int min = *(unsigned int *) ctl->extra1; 370 unsigned int min = *(unsigned int *) ctl->extra1;
374 unsigned int max = *(unsigned int *) ctl->extra2; 371 unsigned int max = *(unsigned int *) ctl->extra2;
375 int ret; 372 struct ctl_table tbl;
373 int ret, new_value;
376 374
377 memset(&tbl, 0, sizeof(struct ctl_table)); 375 memset(&tbl, 0, sizeof(struct ctl_table));
378 tbl.maxlen = sizeof(unsigned int); 376 tbl.maxlen = sizeof(unsigned int);
@@ -381,12 +379,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
381 tbl.data = &new_value; 379 tbl.data = &new_value;
382 else 380 else
383 tbl.data = &net->sctp.rto_min; 381 tbl.data = &net->sctp.rto_min;
382
384 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 383 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
385 if (write) { 384 if (write && ret == 0) {
386 if (ret || new_value > max || new_value < min) 385 if (new_value > max || new_value < min)
387 return -EINVAL; 386 return -EINVAL;
387
388 net->sctp.rto_min = new_value; 388 net->sctp.rto_min = new_value;
389 } 389 }
390
390 return ret; 391 return ret;
391} 392}
392 393
@@ -395,11 +396,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
395 loff_t *ppos) 396 loff_t *ppos)
396{ 397{
397 struct net *net = current->nsproxy->net_ns; 398 struct net *net = current->nsproxy->net_ns;
398 int new_value;
399 struct ctl_table tbl;
400 unsigned int min = *(unsigned int *) ctl->extra1; 399 unsigned int min = *(unsigned int *) ctl->extra1;
401 unsigned int max = *(unsigned int *) ctl->extra2; 400 unsigned int max = *(unsigned int *) ctl->extra2;
402 int ret; 401 struct ctl_table tbl;
402 int ret, new_value;
403 403
404 memset(&tbl, 0, sizeof(struct ctl_table)); 404 memset(&tbl, 0, sizeof(struct ctl_table));
405 tbl.maxlen = sizeof(unsigned int); 405 tbl.maxlen = sizeof(unsigned int);
@@ -408,12 +408,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
408 tbl.data = &new_value; 408 tbl.data = &new_value;
409 else 409 else
410 tbl.data = &net->sctp.rto_max; 410 tbl.data = &net->sctp.rto_max;
411
411 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 412 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
412 if (write) { 413 if (write && ret == 0) {
413 if (ret || new_value > max || new_value < min) 414 if (new_value > max || new_value < min)
414 return -EINVAL; 415 return -EINVAL;
416
415 net->sctp.rto_max = new_value; 417 net->sctp.rto_max = new_value;
416 } 418 }
419
417 return ret; 420 return ret;
418} 421}
419 422
@@ -444,8 +447,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
444 tbl.data = &net->sctp.auth_enable; 447 tbl.data = &net->sctp.auth_enable;
445 448
446 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 449 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
447 450 if (write && ret == 0) {
448 if (write) {
449 struct sock *sk = net->sctp.ctl_sock; 451 struct sock *sk = net->sctp.ctl_sock;
450 452
451 net->sctp.auth_enable = new_value; 453 net->sctp.auth_enable = new_value;