diff options
author | Pablo Neira Ayuso <pablo@netfilter.org> | 2015-04-08 11:40:17 -0400 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2015-04-08 12:30:21 -0400 |
commit | aadd51aa71f8d013c818a312bb2a0c5714830dbc (patch) | |
tree | 28ca52d17183cb1d732b1324fce4f7b5d6b3dfc0 /net | |
parent | 68e942e88add0ac8576fc8397e86495edf3dcea7 (diff) | |
parent | ee90b81203a91d4e5385622811ee7872b5bcfe76 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Resolve conflicts between 5888b93 ("Merge branch 'nf-hook-compress'") and
Florian Westphal br_netfilter works.
Conflicts:
net/bridge/br_netfilter.c
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
171 files changed, 2047 insertions, 1639 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 8b5ab9033b41..01d7ba840df8 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -538,7 +538,6 @@ static int vlan_dev_init(struct net_device *dev) | |||
538 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | 538 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ |
539 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | 539 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | |
540 | IFF_MASTER | IFF_SLAVE); | 540 | IFF_MASTER | IFF_SLAVE); |
541 | dev->iflink = real_dev->ifindex; | ||
542 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | | 541 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | |
543 | (1<<__LINK_STATE_DORMANT))) | | 542 | (1<<__LINK_STATE_DORMANT))) | |
544 | (1<<__LINK_STATE_PRESENT); | 543 | (1<<__LINK_STATE_PRESENT); |
@@ -733,6 +732,13 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev) | |||
733 | } | 732 | } |
734 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 733 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
735 | 734 | ||
735 | static int vlan_dev_get_iflink(const struct net_device *dev) | ||
736 | { | ||
737 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; | ||
738 | |||
739 | return real_dev->ifindex; | ||
740 | } | ||
741 | |||
736 | static const struct ethtool_ops vlan_ethtool_ops = { | 742 | static const struct ethtool_ops vlan_ethtool_ops = { |
737 | .get_settings = vlan_ethtool_get_settings, | 743 | .get_settings = vlan_ethtool_get_settings, |
738 | .get_drvinfo = vlan_ethtool_get_drvinfo, | 744 | .get_drvinfo = vlan_ethtool_get_drvinfo, |
@@ -769,6 +775,7 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
769 | #endif | 775 | #endif |
770 | .ndo_fix_features = vlan_dev_fix_features, | 776 | .ndo_fix_features = vlan_dev_fix_features, |
771 | .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, | 777 | .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, |
778 | .ndo_get_iflink = vlan_dev_get_iflink, | ||
772 | }; | 779 | }; |
773 | 780 | ||
774 | static void vlan_dev_free(struct net_device *dev) | 781 | static void vlan_dev_free(struct net_device *dev) |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index fbda6b54baff..baf1f9843f2c 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -83,11 +83,12 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) | |||
83 | return true; | 83 | return true; |
84 | 84 | ||
85 | /* no more parents..stop recursion */ | 85 | /* no more parents..stop recursion */ |
86 | if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex) | 86 | if (dev_get_iflink(net_dev) == 0 || |
87 | dev_get_iflink(net_dev) == net_dev->ifindex) | ||
87 | return false; | 88 | return false; |
88 | 89 | ||
89 | /* recurse over the parent device */ | 90 | /* recurse over the parent device */ |
90 | parent_dev = __dev_get_by_index(&init_net, net_dev->iflink); | 91 | parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev)); |
91 | /* if we got a NULL parent_dev there is something broken.. */ | 92 | /* if we got a NULL parent_dev there is something broken.. */ |
92 | if (WARN(!parent_dev, "Cannot find parent device")) | 93 | if (WARN(!parent_dev, "Cannot find parent device")) |
93 | return false; | 94 | return false; |
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index 5a5b16f365e9..40854c99bc1e 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -111,6 +111,10 @@ struct bnep_ext_hdr { | |||
111 | #define BNEPCONNDEL _IOW('B', 201, int) | 111 | #define BNEPCONNDEL _IOW('B', 201, int) |
112 | #define BNEPGETCONNLIST _IOR('B', 210, int) | 112 | #define BNEPGETCONNLIST _IOR('B', 210, int) |
113 | #define BNEPGETCONNINFO _IOR('B', 211, int) | 113 | #define BNEPGETCONNINFO _IOR('B', 211, int) |
114 | #define BNEPGETSUPPFEAT _IOR('B', 212, int) | ||
115 | |||
116 | #define BNEP_SETUP_RESPONSE 0 | ||
117 | #define BNEP_SETUP_RSP_SENT 10 | ||
114 | 118 | ||
115 | struct bnep_connadd_req { | 119 | struct bnep_connadd_req { |
116 | int sock; /* Connected socket */ | 120 | int sock; /* Connected socket */ |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 05f57e491ccb..1641367e54ca 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -231,7 +231,14 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len) | |||
231 | break; | 231 | break; |
232 | 232 | ||
233 | case BNEP_SETUP_CONN_REQ: | 233 | case BNEP_SETUP_CONN_REQ: |
234 | err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED); | 234 | /* Successful response should be sent only once */ |
235 | if (test_bit(BNEP_SETUP_RESPONSE, &s->flags) && | ||
236 | !test_and_set_bit(BNEP_SETUP_RSP_SENT, &s->flags)) | ||
237 | err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, | ||
238 | BNEP_SUCCESS); | ||
239 | else | ||
240 | err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, | ||
241 | BNEP_CONN_NOT_ALLOWED); | ||
235 | break; | 242 | break; |
236 | 243 | ||
237 | default: { | 244 | default: { |
@@ -239,7 +246,7 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len) | |||
239 | pkt[0] = BNEP_CONTROL; | 246 | pkt[0] = BNEP_CONTROL; |
240 | pkt[1] = BNEP_CMD_NOT_UNDERSTOOD; | 247 | pkt[1] = BNEP_CMD_NOT_UNDERSTOOD; |
241 | pkt[2] = cmd; | 248 | pkt[2] = cmd; |
242 | bnep_send(s, pkt, sizeof(pkt)); | 249 | err = bnep_send(s, pkt, sizeof(pkt)); |
243 | } | 250 | } |
244 | break; | 251 | break; |
245 | } | 252 | } |
@@ -292,29 +299,55 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
292 | { | 299 | { |
293 | struct net_device *dev = s->dev; | 300 | struct net_device *dev = s->dev; |
294 | struct sk_buff *nskb; | 301 | struct sk_buff *nskb; |
295 | u8 type; | 302 | u8 type, ctrl_type; |
296 | 303 | ||
297 | dev->stats.rx_bytes += skb->len; | 304 | dev->stats.rx_bytes += skb->len; |
298 | 305 | ||
299 | type = *(u8 *) skb->data; | 306 | type = *(u8 *) skb->data; |
300 | skb_pull(skb, 1); | 307 | skb_pull(skb, 1); |
308 | ctrl_type = *(u8 *)skb->data; | ||
301 | 309 | ||
302 | if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen)) | 310 | if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen)) |
303 | goto badframe; | 311 | goto badframe; |
304 | 312 | ||
305 | if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { | 313 | if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { |
306 | bnep_rx_control(s, skb->data, skb->len); | 314 | if (bnep_rx_control(s, skb->data, skb->len) < 0) { |
307 | kfree_skb(skb); | 315 | dev->stats.tx_errors++; |
308 | return 0; | 316 | kfree_skb(skb); |
309 | } | 317 | return 0; |
318 | } | ||
310 | 319 | ||
311 | skb_reset_mac_header(skb); | 320 | if (!(type & BNEP_EXT_HEADER)) { |
321 | kfree_skb(skb); | ||
322 | return 0; | ||
323 | } | ||
312 | 324 | ||
313 | /* Verify and pull out header */ | 325 | /* Verify and pull ctrl message since it's already processed */ |
314 | if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) | 326 | switch (ctrl_type) { |
315 | goto badframe; | 327 | case BNEP_SETUP_CONN_REQ: |
328 | /* Pull: ctrl type (1 b), len (1 b), data (len bytes) */ | ||
329 | if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2)) | ||
330 | goto badframe; | ||
331 | break; | ||
332 | case BNEP_FILTER_MULTI_ADDR_SET: | ||
333 | case BNEP_FILTER_NET_TYPE_SET: | ||
334 | /* Pull: ctrl type (1 b), len (2 b), data (len bytes) */ | ||
335 | if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2)) | ||
336 | goto badframe; | ||
337 | break; | ||
338 | default: | ||
339 | kfree_skb(skb); | ||
340 | return 0; | ||
341 | } | ||
342 | } else { | ||
343 | skb_reset_mac_header(skb); | ||
316 | 344 | ||
317 | s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); | 345 | /* Verify and pull out header */ |
346 | if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) | ||
347 | goto badframe; | ||
348 | |||
349 | s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); | ||
350 | } | ||
318 | 351 | ||
319 | if (type & BNEP_EXT_HEADER) { | 352 | if (type & BNEP_EXT_HEADER) { |
320 | if (bnep_rx_extension(s, skb) < 0) | 353 | if (bnep_rx_extension(s, skb) < 0) |
@@ -525,6 +558,7 @@ static struct device_type bnep_type = { | |||
525 | 558 | ||
526 | int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) | 559 | int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) |
527 | { | 560 | { |
561 | u32 valid_flags = BIT(BNEP_SETUP_RESPONSE); | ||
528 | struct net_device *dev; | 562 | struct net_device *dev; |
529 | struct bnep_session *s, *ss; | 563 | struct bnep_session *s, *ss; |
530 | u8 dst[ETH_ALEN], src[ETH_ALEN]; | 564 | u8 dst[ETH_ALEN], src[ETH_ALEN]; |
@@ -535,6 +569,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) | |||
535 | if (!l2cap_is_socket(sock)) | 569 | if (!l2cap_is_socket(sock)) |
536 | return -EBADFD; | 570 | return -EBADFD; |
537 | 571 | ||
572 | if (req->flags & ~valid_flags) | ||
573 | return -EINVAL; | ||
574 | |||
538 | baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); | 575 | baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); |
539 | baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); | 576 | baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); |
540 | 577 | ||
@@ -566,6 +603,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) | |||
566 | s->sock = sock; | 603 | s->sock = sock; |
567 | s->role = req->role; | 604 | s->role = req->role; |
568 | s->state = BT_CONNECTED; | 605 | s->state = BT_CONNECTED; |
606 | s->flags = req->flags; | ||
569 | 607 | ||
570 | s->msg.msg_flags = MSG_NOSIGNAL; | 608 | s->msg.msg_flags = MSG_NOSIGNAL; |
571 | 609 | ||
@@ -611,11 +649,15 @@ failed: | |||
611 | 649 | ||
612 | int bnep_del_connection(struct bnep_conndel_req *req) | 650 | int bnep_del_connection(struct bnep_conndel_req *req) |
613 | { | 651 | { |
652 | u32 valid_flags = 0; | ||
614 | struct bnep_session *s; | 653 | struct bnep_session *s; |
615 | int err = 0; | 654 | int err = 0; |
616 | 655 | ||
617 | BT_DBG(""); | 656 | BT_DBG(""); |
618 | 657 | ||
658 | if (req->flags & ~valid_flags) | ||
659 | return -EINVAL; | ||
660 | |||
619 | down_read(&bnep_session_sem); | 661 | down_read(&bnep_session_sem); |
620 | 662 | ||
621 | s = __bnep_get_session(req->dst); | 663 | s = __bnep_get_session(req->dst); |
@@ -631,10 +673,12 @@ int bnep_del_connection(struct bnep_conndel_req *req) | |||
631 | 673 | ||
632 | static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) | 674 | static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) |
633 | { | 675 | { |
676 | u32 valid_flags = BIT(BNEP_SETUP_RESPONSE); | ||
677 | |||
634 | memset(ci, 0, sizeof(*ci)); | 678 | memset(ci, 0, sizeof(*ci)); |
635 | memcpy(ci->dst, s->eh.h_source, ETH_ALEN); | 679 | memcpy(ci->dst, s->eh.h_source, ETH_ALEN); |
636 | strcpy(ci->device, s->dev->name); | 680 | strcpy(ci->device, s->dev->name); |
637 | ci->flags = s->flags; | 681 | ci->flags = s->flags & valid_flags; |
638 | ci->state = s->state; | 682 | ci->state = s->state; |
639 | ci->role = s->role; | 683 | ci->role = s->role; |
640 | } | 684 | } |
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 5f051290daba..bde2bdd9e929 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -57,6 +57,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
57 | struct bnep_conninfo ci; | 57 | struct bnep_conninfo ci; |
58 | struct socket *nsock; | 58 | struct socket *nsock; |
59 | void __user *argp = (void __user *)arg; | 59 | void __user *argp = (void __user *)arg; |
60 | __u32 supp_feat = BIT(BNEP_SETUP_RESPONSE); | ||
60 | int err; | 61 | int err; |
61 | 62 | ||
62 | BT_DBG("cmd %x arg %lx", cmd, arg); | 63 | BT_DBG("cmd %x arg %lx", cmd, arg); |
@@ -120,6 +121,12 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
120 | 121 | ||
121 | return err; | 122 | return err; |
122 | 123 | ||
124 | case BNEPGETSUPPFEAT: | ||
125 | if (copy_to_user(argp, &supp_feat, sizeof(supp_feat))) | ||
126 | return -EFAULT; | ||
127 | |||
128 | return 0; | ||
129 | |||
123 | default: | 130 | default: |
124 | return -EINVAL; | 131 | return -EINVAL; |
125 | } | 132 | } |
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 75bd2c42e3e7..b0c6c6af76ef 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c | |||
@@ -333,7 +333,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb) | |||
333 | return; | 333 | return; |
334 | } | 334 | } |
335 | 335 | ||
336 | if (session->flags & (1 << CMTP_LOOPBACK)) { | 336 | if (session->flags & BIT(CMTP_LOOPBACK)) { |
337 | kfree_skb(skb); | 337 | kfree_skb(skb); |
338 | return; | 338 | return; |
339 | } | 339 | } |
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 278a194e6af4..298ed37010e6 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c | |||
@@ -75,10 +75,11 @@ static void __cmtp_unlink_session(struct cmtp_session *session) | |||
75 | 75 | ||
76 | static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) | 76 | static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) |
77 | { | 77 | { |
78 | u32 valid_flags = BIT(CMTP_LOOPBACK); | ||
78 | memset(ci, 0, sizeof(*ci)); | 79 | memset(ci, 0, sizeof(*ci)); |
79 | bacpy(&ci->bdaddr, &session->bdaddr); | 80 | bacpy(&ci->bdaddr, &session->bdaddr); |
80 | 81 | ||
81 | ci->flags = session->flags; | 82 | ci->flags = session->flags & valid_flags; |
82 | ci->state = session->state; | 83 | ci->state = session->state; |
83 | 84 | ||
84 | ci->num = session->num; | 85 | ci->num = session->num; |
@@ -313,7 +314,7 @@ static int cmtp_session(void *arg) | |||
313 | 314 | ||
314 | down_write(&cmtp_session_sem); | 315 | down_write(&cmtp_session_sem); |
315 | 316 | ||
316 | if (!(session->flags & (1 << CMTP_LOOPBACK))) | 317 | if (!(session->flags & BIT(CMTP_LOOPBACK))) |
317 | cmtp_detach_device(session); | 318 | cmtp_detach_device(session); |
318 | 319 | ||
319 | fput(session->sock->file); | 320 | fput(session->sock->file); |
@@ -329,6 +330,7 @@ static int cmtp_session(void *arg) | |||
329 | 330 | ||
330 | int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) | 331 | int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) |
331 | { | 332 | { |
333 | u32 valid_flags = BIT(CMTP_LOOPBACK); | ||
332 | struct cmtp_session *session, *s; | 334 | struct cmtp_session *session, *s; |
333 | int i, err; | 335 | int i, err; |
334 | 336 | ||
@@ -337,6 +339,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) | |||
337 | if (!l2cap_is_socket(sock)) | 339 | if (!l2cap_is_socket(sock)) |
338 | return -EBADFD; | 340 | return -EBADFD; |
339 | 341 | ||
342 | if (req->flags & ~valid_flags) | ||
343 | return -EINVAL; | ||
344 | |||
340 | session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); | 345 | session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); |
341 | if (!session) | 346 | if (!session) |
342 | return -ENOMEM; | 347 | return -ENOMEM; |
@@ -385,7 +390,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) | |||
385 | goto unlink; | 390 | goto unlink; |
386 | } | 391 | } |
387 | 392 | ||
388 | if (!(session->flags & (1 << CMTP_LOOPBACK))) { | 393 | if (!(session->flags & BIT(CMTP_LOOPBACK))) { |
389 | err = cmtp_attach_device(session); | 394 | err = cmtp_attach_device(session); |
390 | if (err < 0) { | 395 | if (err < 0) { |
391 | atomic_inc(&session->terminate); | 396 | atomic_inc(&session->terminate); |
@@ -409,11 +414,15 @@ failed: | |||
409 | 414 | ||
410 | int cmtp_del_connection(struct cmtp_conndel_req *req) | 415 | int cmtp_del_connection(struct cmtp_conndel_req *req) |
411 | { | 416 | { |
417 | u32 valid_flags = 0; | ||
412 | struct cmtp_session *session; | 418 | struct cmtp_session *session; |
413 | int err = 0; | 419 | int err = 0; |
414 | 420 | ||
415 | BT_DBG(""); | 421 | BT_DBG(""); |
416 | 422 | ||
423 | if (req->flags & ~valid_flags) | ||
424 | return -EINVAL; | ||
425 | |||
417 | down_read(&cmtp_session_sem); | 426 | down_read(&cmtp_session_sem); |
418 | 427 | ||
419 | session = __cmtp_get_session(&req->bdaddr); | 428 | session = __cmtp_get_session(&req->bdaddr); |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index e6bfeb7b4415..46b114c0140b 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -141,13 +141,16 @@ static const struct file_operations dut_mode_fops = { | |||
141 | 141 | ||
142 | /* ---- HCI requests ---- */ | 142 | /* ---- HCI requests ---- */ |
143 | 143 | ||
144 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode) | 144 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, |
145 | struct sk_buff *skb) | ||
145 | { | 146 | { |
146 | BT_DBG("%s result 0x%2.2x", hdev->name, result); | 147 | BT_DBG("%s result 0x%2.2x", hdev->name, result); |
147 | 148 | ||
148 | if (hdev->req_status == HCI_REQ_PEND) { | 149 | if (hdev->req_status == HCI_REQ_PEND) { |
149 | hdev->req_result = result; | 150 | hdev->req_result = result; |
150 | hdev->req_status = HCI_REQ_DONE; | 151 | hdev->req_status = HCI_REQ_DONE; |
152 | if (skb) | ||
153 | hdev->req_skb = skb_get(skb); | ||
151 | wake_up_interruptible(&hdev->req_wait_q); | 154 | wake_up_interruptible(&hdev->req_wait_q); |
152 | } | 155 | } |
153 | } | 156 | } |
@@ -163,66 +166,12 @@ static void hci_req_cancel(struct hci_dev *hdev, int err) | |||
163 | } | 166 | } |
164 | } | 167 | } |
165 | 168 | ||
166 | static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, | ||
167 | u8 event) | ||
168 | { | ||
169 | struct hci_ev_cmd_complete *ev; | ||
170 | struct hci_event_hdr *hdr; | ||
171 | struct sk_buff *skb; | ||
172 | |||
173 | hci_dev_lock(hdev); | ||
174 | |||
175 | skb = hdev->recv_evt; | ||
176 | hdev->recv_evt = NULL; | ||
177 | |||
178 | hci_dev_unlock(hdev); | ||
179 | |||
180 | if (!skb) | ||
181 | return ERR_PTR(-ENODATA); | ||
182 | |||
183 | if (skb->len < sizeof(*hdr)) { | ||
184 | BT_ERR("Too short HCI event"); | ||
185 | goto failed; | ||
186 | } | ||
187 | |||
188 | hdr = (void *) skb->data; | ||
189 | skb_pull(skb, HCI_EVENT_HDR_SIZE); | ||
190 | |||
191 | if (event) { | ||
192 | if (hdr->evt != event) | ||
193 | goto failed; | ||
194 | return skb; | ||
195 | } | ||
196 | |||
197 | if (hdr->evt != HCI_EV_CMD_COMPLETE) { | ||
198 | BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt); | ||
199 | goto failed; | ||
200 | } | ||
201 | |||
202 | if (skb->len < sizeof(*ev)) { | ||
203 | BT_ERR("Too short cmd_complete event"); | ||
204 | goto failed; | ||
205 | } | ||
206 | |||
207 | ev = (void *) skb->data; | ||
208 | skb_pull(skb, sizeof(*ev)); | ||
209 | |||
210 | if (opcode == __le16_to_cpu(ev->opcode)) | ||
211 | return skb; | ||
212 | |||
213 | BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, | ||
214 | __le16_to_cpu(ev->opcode)); | ||
215 | |||
216 | failed: | ||
217 | kfree_skb(skb); | ||
218 | return ERR_PTR(-ENODATA); | ||
219 | } | ||
220 | |||
221 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, | 169 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, |
222 | const void *param, u8 event, u32 timeout) | 170 | const void *param, u8 event, u32 timeout) |
223 | { | 171 | { |
224 | DECLARE_WAITQUEUE(wait, current); | 172 | DECLARE_WAITQUEUE(wait, current); |
225 | struct hci_request req; | 173 | struct hci_request req; |
174 | struct sk_buff *skb; | ||
226 | int err = 0; | 175 | int err = 0; |
227 | 176 | ||
228 | BT_DBG("%s", hdev->name); | 177 | BT_DBG("%s", hdev->name); |
@@ -236,7 +185,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, | |||
236 | add_wait_queue(&hdev->req_wait_q, &wait); | 185 | add_wait_queue(&hdev->req_wait_q, &wait); |
237 | set_current_state(TASK_INTERRUPTIBLE); | 186 | set_current_state(TASK_INTERRUPTIBLE); |
238 | 187 | ||
239 | err = hci_req_run(&req, hci_req_sync_complete); | 188 | err = hci_req_run_skb(&req, hci_req_sync_complete); |
240 | if (err < 0) { | 189 | if (err < 0) { |
241 | remove_wait_queue(&hdev->req_wait_q, &wait); | 190 | remove_wait_queue(&hdev->req_wait_q, &wait); |
242 | set_current_state(TASK_RUNNING); | 191 | set_current_state(TASK_RUNNING); |
@@ -265,13 +214,20 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, | |||
265 | } | 214 | } |
266 | 215 | ||
267 | hdev->req_status = hdev->req_result = 0; | 216 | hdev->req_status = hdev->req_result = 0; |
217 | skb = hdev->req_skb; | ||
218 | hdev->req_skb = NULL; | ||
268 | 219 | ||
269 | BT_DBG("%s end: err %d", hdev->name, err); | 220 | BT_DBG("%s end: err %d", hdev->name, err); |
270 | 221 | ||
271 | if (err < 0) | 222 | if (err < 0) { |
223 | kfree_skb(skb); | ||
272 | return ERR_PTR(err); | 224 | return ERR_PTR(err); |
225 | } | ||
226 | |||
227 | if (!skb) | ||
228 | return ERR_PTR(-ENODATA); | ||
273 | 229 | ||
274 | return hci_get_cmd_complete(hdev, opcode, event); | 230 | return skb; |
275 | } | 231 | } |
276 | EXPORT_SYMBOL(__hci_cmd_sync_ev); | 232 | EXPORT_SYMBOL(__hci_cmd_sync_ev); |
277 | 233 | ||
@@ -303,7 +259,7 @@ static int __hci_req_sync(struct hci_dev *hdev, | |||
303 | add_wait_queue(&hdev->req_wait_q, &wait); | 259 | add_wait_queue(&hdev->req_wait_q, &wait); |
304 | set_current_state(TASK_INTERRUPTIBLE); | 260 | set_current_state(TASK_INTERRUPTIBLE); |
305 | 261 | ||
306 | err = hci_req_run(&req, hci_req_sync_complete); | 262 | err = hci_req_run_skb(&req, hci_req_sync_complete); |
307 | if (err < 0) { | 263 | if (err < 0) { |
308 | hdev->req_status = 0; | 264 | hdev->req_status = 0; |
309 | 265 | ||
@@ -1690,9 +1646,6 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
1690 | hdev->sent_cmd = NULL; | 1646 | hdev->sent_cmd = NULL; |
1691 | } | 1647 | } |
1692 | 1648 | ||
1693 | kfree_skb(hdev->recv_evt); | ||
1694 | hdev->recv_evt = NULL; | ||
1695 | |||
1696 | /* After this point our queues are empty | 1649 | /* After this point our queues are empty |
1697 | * and no tasks are scheduled. */ | 1650 | * and no tasks are scheduled. */ |
1698 | hdev->close(hdev); | 1651 | hdev->close(hdev); |
@@ -3563,11 +3516,6 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) | |||
3563 | } | 3516 | } |
3564 | } | 3517 | } |
3565 | 3518 | ||
3566 | bool hci_req_pending(struct hci_dev *hdev) | ||
3567 | { | ||
3568 | return (hdev->req_status == HCI_REQ_PEND); | ||
3569 | } | ||
3570 | |||
3571 | /* Send HCI command */ | 3519 | /* Send HCI command */ |
3572 | int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, | 3520 | int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, |
3573 | const void *param) | 3521 | const void *param) |
@@ -3585,7 +3533,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, | |||
3585 | /* Stand-alone HCI commands must be flagged as | 3533 | /* Stand-alone HCI commands must be flagged as |
3586 | * single-command requests. | 3534 | * single-command requests. |
3587 | */ | 3535 | */ |
3588 | bt_cb(skb)->req_start = 1; | 3536 | bt_cb(skb)->req.start = true; |
3589 | 3537 | ||
3590 | skb_queue_tail(&hdev->cmd_q, skb); | 3538 | skb_queue_tail(&hdev->cmd_q, skb); |
3591 | queue_work(hdev->workqueue, &hdev->cmd_work); | 3539 | queue_work(hdev->workqueue, &hdev->cmd_work); |
@@ -4263,7 +4211,7 @@ static bool hci_req_is_complete(struct hci_dev *hdev) | |||
4263 | if (!skb) | 4211 | if (!skb) |
4264 | return true; | 4212 | return true; |
4265 | 4213 | ||
4266 | return bt_cb(skb)->req_start; | 4214 | return bt_cb(skb)->req.start; |
4267 | } | 4215 | } |
4268 | 4216 | ||
4269 | static void hci_resend_last(struct hci_dev *hdev) | 4217 | static void hci_resend_last(struct hci_dev *hdev) |
@@ -4288,9 +4236,10 @@ static void hci_resend_last(struct hci_dev *hdev) | |||
4288 | queue_work(hdev->workqueue, &hdev->cmd_work); | 4236 | queue_work(hdev->workqueue, &hdev->cmd_work); |
4289 | } | 4237 | } |
4290 | 4238 | ||
4291 | void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) | 4239 | void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, |
4240 | hci_req_complete_t *req_complete, | ||
4241 | hci_req_complete_skb_t *req_complete_skb) | ||
4292 | { | 4242 | { |
4293 | hci_req_complete_t req_complete = NULL; | ||
4294 | struct sk_buff *skb; | 4243 | struct sk_buff *skb; |
4295 | unsigned long flags; | 4244 | unsigned long flags; |
4296 | 4245 | ||
@@ -4322,36 +4271,29 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) | |||
4322 | * callback would be found in hdev->sent_cmd instead of the | 4271 | * callback would be found in hdev->sent_cmd instead of the |
4323 | * command queue (hdev->cmd_q). | 4272 | * command queue (hdev->cmd_q). |
4324 | */ | 4273 | */ |
4325 | if (hdev->sent_cmd) { | 4274 | if (bt_cb(hdev->sent_cmd)->req.complete) { |
4326 | req_complete = bt_cb(hdev->sent_cmd)->req_complete; | 4275 | *req_complete = bt_cb(hdev->sent_cmd)->req.complete; |
4327 | 4276 | return; | |
4328 | if (req_complete) { | 4277 | } |
4329 | /* We must set the complete callback to NULL to | ||
4330 | * avoid calling the callback more than once if | ||
4331 | * this function gets called again. | ||
4332 | */ | ||
4333 | bt_cb(hdev->sent_cmd)->req_complete = NULL; | ||
4334 | 4278 | ||
4335 | goto call_complete; | 4279 | if (bt_cb(hdev->sent_cmd)->req.complete_skb) { |
4336 | } | 4280 | *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb; |
4281 | return; | ||
4337 | } | 4282 | } |
4338 | 4283 | ||
4339 | /* Remove all pending commands belonging to this request */ | 4284 | /* Remove all pending commands belonging to this request */ |
4340 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | 4285 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); |
4341 | while ((skb = __skb_dequeue(&hdev->cmd_q))) { | 4286 | while ((skb = __skb_dequeue(&hdev->cmd_q))) { |
4342 | if (bt_cb(skb)->req_start) { | 4287 | if (bt_cb(skb)->req.start) { |
4343 | __skb_queue_head(&hdev->cmd_q, skb); | 4288 | __skb_queue_head(&hdev->cmd_q, skb); |
4344 | break; | 4289 | break; |
4345 | } | 4290 | } |
4346 | 4291 | ||
4347 | req_complete = bt_cb(skb)->req_complete; | 4292 | *req_complete = bt_cb(skb)->req.complete; |
4293 | *req_complete_skb = bt_cb(skb)->req.complete_skb; | ||
4348 | kfree_skb(skb); | 4294 | kfree_skb(skb); |
4349 | } | 4295 | } |
4350 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); | 4296 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); |
4351 | |||
4352 | call_complete: | ||
4353 | if (req_complete) | ||
4354 | req_complete(hdev, status, status ? opcode : HCI_OP_NOP); | ||
4355 | } | 4297 | } |
4356 | 4298 | ||
4357 | static void hci_rx_work(struct work_struct *work) | 4299 | static void hci_rx_work(struct work_struct *work) |
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index e6255833a258..7db4220941cc 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c | |||
@@ -114,6 +114,30 @@ static const struct file_operations features_fops = { | |||
114 | .release = single_release, | 114 | .release = single_release, |
115 | }; | 115 | }; |
116 | 116 | ||
117 | static int device_id_show(struct seq_file *f, void *ptr) | ||
118 | { | ||
119 | struct hci_dev *hdev = f->private; | ||
120 | |||
121 | hci_dev_lock(hdev); | ||
122 | seq_printf(f, "%4.4x:%4.4x:%4.4x:%4.4x\n", hdev->devid_source, | ||
123 | hdev->devid_vendor, hdev->devid_product, hdev->devid_version); | ||
124 | hci_dev_unlock(hdev); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int device_id_open(struct inode *inode, struct file *file) | ||
130 | { | ||
131 | return single_open(file, device_id_show, inode->i_private); | ||
132 | } | ||
133 | |||
134 | static const struct file_operations device_id_fops = { | ||
135 | .open = device_id_open, | ||
136 | .read = seq_read, | ||
137 | .llseek = seq_lseek, | ||
138 | .release = single_release, | ||
139 | }; | ||
140 | |||
117 | static int device_list_show(struct seq_file *f, void *ptr) | 141 | static int device_list_show(struct seq_file *f, void *ptr) |
118 | { | 142 | { |
119 | struct hci_dev *hdev = f->private; | 143 | struct hci_dev *hdev = f->private; |
@@ -335,6 +359,8 @@ void hci_debugfs_create_common(struct hci_dev *hdev) | |||
335 | debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); | 359 | debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); |
336 | debugfs_create_u8("hardware_error", 0444, hdev->debugfs, | 360 | debugfs_create_u8("hardware_error", 0444, hdev->debugfs, |
337 | &hdev->hw_error_code); | 361 | &hdev->hw_error_code); |
362 | debugfs_create_file("device_id", 0444, hdev->debugfs, hdev, | ||
363 | &device_id_fops); | ||
338 | 364 | ||
339 | debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, | 365 | debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, |
340 | &device_list_fops); | 366 | &device_list_fops); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 62f92a508961..01031038eb0e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -1045,11 +1045,6 @@ static void hci_cc_read_local_oob_data(struct hci_dev *hdev, | |||
1045 | struct hci_rp_read_local_oob_data *rp = (void *) skb->data; | 1045 | struct hci_rp_read_local_oob_data *rp = (void *) skb->data; |
1046 | 1046 | ||
1047 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | 1047 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); |
1048 | |||
1049 | hci_dev_lock(hdev); | ||
1050 | mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL, | ||
1051 | rp->status); | ||
1052 | hci_dev_unlock(hdev); | ||
1053 | } | 1048 | } |
1054 | 1049 | ||
1055 | static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, | 1050 | static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, |
@@ -1058,15 +1053,8 @@ static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, | |||
1058 | struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; | 1053 | struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; |
1059 | 1054 | ||
1060 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | 1055 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); |
1061 | |||
1062 | hci_dev_lock(hdev); | ||
1063 | mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192, | ||
1064 | rp->hash256, rp->rand256, | ||
1065 | rp->status); | ||
1066 | hci_dev_unlock(hdev); | ||
1067 | } | 1056 | } |
1068 | 1057 | ||
1069 | |||
1070 | static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) | 1058 | static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) |
1071 | { | 1059 | { |
1072 | __u8 status = *((__u8 *) skb->data); | 1060 | __u8 status = *((__u8 *) skb->data); |
@@ -2732,17 +2720,19 @@ unlock: | |||
2732 | hci_dev_unlock(hdev); | 2720 | hci_dev_unlock(hdev); |
2733 | } | 2721 | } |
2734 | 2722 | ||
2735 | static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2723 | static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, |
2724 | u16 *opcode, u8 *status, | ||
2725 | hci_req_complete_t *req_complete, | ||
2726 | hci_req_complete_skb_t *req_complete_skb) | ||
2736 | { | 2727 | { |
2737 | struct hci_ev_cmd_complete *ev = (void *) skb->data; | 2728 | struct hci_ev_cmd_complete *ev = (void *) skb->data; |
2738 | u8 status = skb->data[sizeof(*ev)]; | ||
2739 | __u16 opcode; | ||
2740 | 2729 | ||
2741 | skb_pull(skb, sizeof(*ev)); | 2730 | *opcode = __le16_to_cpu(ev->opcode); |
2731 | *status = skb->data[sizeof(*ev)]; | ||
2742 | 2732 | ||
2743 | opcode = __le16_to_cpu(ev->opcode); | 2733 | skb_pull(skb, sizeof(*ev)); |
2744 | 2734 | ||
2745 | switch (opcode) { | 2735 | switch (*opcode) { |
2746 | case HCI_OP_INQUIRY_CANCEL: | 2736 | case HCI_OP_INQUIRY_CANCEL: |
2747 | hci_cc_inquiry_cancel(hdev, skb); | 2737 | hci_cc_inquiry_cancel(hdev, skb); |
2748 | break; | 2738 | break; |
@@ -3020,32 +3010,36 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
3020 | break; | 3010 | break; |
3021 | 3011 | ||
3022 | default: | 3012 | default: |
3023 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); | 3013 | BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); |
3024 | break; | 3014 | break; |
3025 | } | 3015 | } |
3026 | 3016 | ||
3027 | if (opcode != HCI_OP_NOP) | 3017 | if (*opcode != HCI_OP_NOP) |
3028 | cancel_delayed_work(&hdev->cmd_timer); | 3018 | cancel_delayed_work(&hdev->cmd_timer); |
3029 | 3019 | ||
3030 | hci_req_cmd_complete(hdev, opcode, status); | 3020 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) |
3031 | |||
3032 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { | ||
3033 | atomic_set(&hdev->cmd_cnt, 1); | 3021 | atomic_set(&hdev->cmd_cnt, 1); |
3034 | if (!skb_queue_empty(&hdev->cmd_q)) | 3022 | |
3035 | queue_work(hdev->workqueue, &hdev->cmd_work); | 3023 | hci_req_cmd_complete(hdev, *opcode, *status, req_complete, |
3036 | } | 3024 | req_complete_skb); |
3025 | |||
3026 | if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) | ||
3027 | queue_work(hdev->workqueue, &hdev->cmd_work); | ||
3037 | } | 3028 | } |
3038 | 3029 | ||
3039 | static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3030 | static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, |
3031 | u16 *opcode, u8 *status, | ||
3032 | hci_req_complete_t *req_complete, | ||
3033 | hci_req_complete_skb_t *req_complete_skb) | ||
3040 | { | 3034 | { |
3041 | struct hci_ev_cmd_status *ev = (void *) skb->data; | 3035 | struct hci_ev_cmd_status *ev = (void *) skb->data; |
3042 | __u16 opcode; | ||
3043 | 3036 | ||
3044 | skb_pull(skb, sizeof(*ev)); | 3037 | skb_pull(skb, sizeof(*ev)); |
3045 | 3038 | ||
3046 | opcode = __le16_to_cpu(ev->opcode); | 3039 | *opcode = __le16_to_cpu(ev->opcode); |
3040 | *status = ev->status; | ||
3047 | 3041 | ||
3048 | switch (opcode) { | 3042 | switch (*opcode) { |
3049 | case HCI_OP_INQUIRY: | 3043 | case HCI_OP_INQUIRY: |
3050 | hci_cs_inquiry(hdev, ev->status); | 3044 | hci_cs_inquiry(hdev, ev->status); |
3051 | break; | 3045 | break; |
@@ -3115,22 +3109,29 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
3115 | break; | 3109 | break; |
3116 | 3110 | ||
3117 | default: | 3111 | default: |
3118 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); | 3112 | BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); |
3119 | break; | 3113 | break; |
3120 | } | 3114 | } |
3121 | 3115 | ||
3122 | if (opcode != HCI_OP_NOP) | 3116 | if (*opcode != HCI_OP_NOP) |
3123 | cancel_delayed_work(&hdev->cmd_timer); | 3117 | cancel_delayed_work(&hdev->cmd_timer); |
3124 | 3118 | ||
3119 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) | ||
3120 | atomic_set(&hdev->cmd_cnt, 1); | ||
3121 | |||
3122 | /* Indicate request completion if the command failed. Also, if | ||
3123 | * we're not waiting for a special event and we get a success | ||
3124 | * command status we should try to flag the request as completed | ||
3125 | * (since for this kind of commands there will not be a command | ||
3126 | * complete event). | ||
3127 | */ | ||
3125 | if (ev->status || | 3128 | if (ev->status || |
3126 | (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req_event)) | 3129 | (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event)) |
3127 | hci_req_cmd_complete(hdev, opcode, ev->status); | 3130 | hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, |
3131 | req_complete_skb); | ||
3128 | 3132 | ||
3129 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { | 3133 | if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) |
3130 | atomic_set(&hdev->cmd_cnt, 1); | 3134 | queue_work(hdev->workqueue, &hdev->cmd_work); |
3131 | if (!skb_queue_empty(&hdev->cmd_q)) | ||
3132 | queue_work(hdev->workqueue, &hdev->cmd_work); | ||
3133 | } | ||
3134 | } | 3135 | } |
3135 | 3136 | ||
3136 | static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3137 | static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -5031,32 +5032,79 @@ static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
5031 | amp_read_loc_assoc_final_data(hdev, hcon); | 5032 | amp_read_loc_assoc_final_data(hdev, hcon); |
5032 | } | 5033 | } |
5033 | 5034 | ||
5034 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | 5035 | static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, |
5036 | u8 event, struct sk_buff *skb) | ||
5035 | { | 5037 | { |
5036 | struct hci_event_hdr *hdr = (void *) skb->data; | 5038 | struct hci_ev_cmd_complete *ev; |
5037 | __u8 event = hdr->evt; | 5039 | struct hci_event_hdr *hdr; |
5038 | 5040 | ||
5039 | hci_dev_lock(hdev); | 5041 | if (!skb) |
5042 | return false; | ||
5040 | 5043 | ||
5041 | /* Received events are (currently) only needed when a request is | 5044 | if (skb->len < sizeof(*hdr)) { |
5042 | * ongoing so avoid unnecessary memory allocation. | 5045 | BT_ERR("Too short HCI event"); |
5043 | */ | 5046 | return false; |
5044 | if (hci_req_pending(hdev)) { | ||
5045 | kfree_skb(hdev->recv_evt); | ||
5046 | hdev->recv_evt = skb_clone(skb, GFP_KERNEL); | ||
5047 | } | 5047 | } |
5048 | 5048 | ||
5049 | hci_dev_unlock(hdev); | 5049 | hdr = (void *) skb->data; |
5050 | |||
5051 | skb_pull(skb, HCI_EVENT_HDR_SIZE); | 5050 | skb_pull(skb, HCI_EVENT_HDR_SIZE); |
5052 | 5051 | ||
5053 | if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req_event == event) { | 5052 | if (event) { |
5054 | struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; | 5053 | if (hdr->evt != event) |
5055 | u16 opcode = __le16_to_cpu(cmd_hdr->opcode); | 5054 | return false; |
5055 | return true; | ||
5056 | } | ||
5057 | |||
5058 | if (hdr->evt != HCI_EV_CMD_COMPLETE) { | ||
5059 | BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt); | ||
5060 | return false; | ||
5061 | } | ||
5062 | |||
5063 | if (skb->len < sizeof(*ev)) { | ||
5064 | BT_ERR("Too short cmd_complete event"); | ||
5065 | return false; | ||
5066 | } | ||
5067 | |||
5068 | ev = (void *) skb->data; | ||
5069 | skb_pull(skb, sizeof(*ev)); | ||
5056 | 5070 | ||
5057 | hci_req_cmd_complete(hdev, opcode, 0); | 5071 | if (opcode != __le16_to_cpu(ev->opcode)) { |
5072 | BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, | ||
5073 | __le16_to_cpu(ev->opcode)); | ||
5074 | return false; | ||
5058 | } | 5075 | } |
5059 | 5076 | ||
5077 | return true; | ||
5078 | } | ||
5079 | |||
5080 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | ||
5081 | { | ||
5082 | struct hci_event_hdr *hdr = (void *) skb->data; | ||
5083 | hci_req_complete_t req_complete = NULL; | ||
5084 | hci_req_complete_skb_t req_complete_skb = NULL; | ||
5085 | struct sk_buff *orig_skb = NULL; | ||
5086 | u8 status = 0, event = hdr->evt, req_evt = 0; | ||
5087 | u16 opcode = HCI_OP_NOP; | ||
5088 | |||
5089 | if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) { | ||
5090 | struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; | ||
5091 | opcode = __le16_to_cpu(cmd_hdr->opcode); | ||
5092 | hci_req_cmd_complete(hdev, opcode, status, &req_complete, | ||
5093 | &req_complete_skb); | ||
5094 | req_evt = event; | ||
5095 | } | ||
5096 | |||
5097 | /* If it looks like we might end up having to call | ||
5098 | * req_complete_skb, store a pristine copy of the skb since the | ||
5099 | * various handlers may modify the original one through | ||
5100 | * skb_pull() calls, etc. | ||
5101 | */ | ||
5102 | if (req_complete_skb || event == HCI_EV_CMD_STATUS || | ||
5103 | event == HCI_EV_CMD_COMPLETE) | ||
5104 | orig_skb = skb_clone(skb, GFP_KERNEL); | ||
5105 | |||
5106 | skb_pull(skb, HCI_EVENT_HDR_SIZE); | ||
5107 | |||
5060 | switch (event) { | 5108 | switch (event) { |
5061 | case HCI_EV_INQUIRY_COMPLETE: | 5109 | case HCI_EV_INQUIRY_COMPLETE: |
5062 | hci_inquiry_complete_evt(hdev, skb); | 5110 | hci_inquiry_complete_evt(hdev, skb); |
@@ -5099,11 +5147,13 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
5099 | break; | 5147 | break; |
5100 | 5148 | ||
5101 | case HCI_EV_CMD_COMPLETE: | 5149 | case HCI_EV_CMD_COMPLETE: |
5102 | hci_cmd_complete_evt(hdev, skb); | 5150 | hci_cmd_complete_evt(hdev, skb, &opcode, &status, |
5151 | &req_complete, &req_complete_skb); | ||
5103 | break; | 5152 | break; |
5104 | 5153 | ||
5105 | case HCI_EV_CMD_STATUS: | 5154 | case HCI_EV_CMD_STATUS: |
5106 | hci_cmd_status_evt(hdev, skb); | 5155 | hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete, |
5156 | &req_complete_skb); | ||
5107 | break; | 5157 | break; |
5108 | 5158 | ||
5109 | case HCI_EV_HARDWARE_ERROR: | 5159 | case HCI_EV_HARDWARE_ERROR: |
@@ -5235,6 +5285,17 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
5235 | break; | 5285 | break; |
5236 | } | 5286 | } |
5237 | 5287 | ||
5288 | if (req_complete) { | ||
5289 | req_complete(hdev, status, opcode); | ||
5290 | } else if (req_complete_skb) { | ||
5291 | if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { | ||
5292 | kfree_skb(orig_skb); | ||
5293 | orig_skb = NULL; | ||
5294 | } | ||
5295 | req_complete_skb(hdev, status, opcode, orig_skb); | ||
5296 | } | ||
5297 | |||
5298 | kfree_skb(orig_skb); | ||
5238 | kfree_skb(skb); | 5299 | kfree_skb(skb); |
5239 | hdev->stat.evt_rx++; | 5300 | hdev->stat.evt_rx++; |
5240 | } | 5301 | } |
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 55e096d20a0f..d6025d6e6d59 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c | |||
@@ -34,7 +34,8 @@ void hci_req_init(struct hci_request *req, struct hci_dev *hdev) | |||
34 | req->err = 0; | 34 | req->err = 0; |
35 | } | 35 | } |
36 | 36 | ||
37 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete) | 37 | static int req_run(struct hci_request *req, hci_req_complete_t complete, |
38 | hci_req_complete_skb_t complete_skb) | ||
38 | { | 39 | { |
39 | struct hci_dev *hdev = req->hdev; | 40 | struct hci_dev *hdev = req->hdev; |
40 | struct sk_buff *skb; | 41 | struct sk_buff *skb; |
@@ -55,7 +56,8 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete) | |||
55 | return -ENODATA; | 56 | return -ENODATA; |
56 | 57 | ||
57 | skb = skb_peek_tail(&req->cmd_q); | 58 | skb = skb_peek_tail(&req->cmd_q); |
58 | bt_cb(skb)->req_complete = complete; | 59 | bt_cb(skb)->req.complete = complete; |
60 | bt_cb(skb)->req.complete_skb = complete_skb; | ||
59 | 61 | ||
60 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | 62 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); |
61 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); | 63 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); |
@@ -66,6 +68,16 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete) | |||
66 | return 0; | 68 | return 0; |
67 | } | 69 | } |
68 | 70 | ||
71 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete) | ||
72 | { | ||
73 | return req_run(req, complete, NULL); | ||
74 | } | ||
75 | |||
76 | int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) | ||
77 | { | ||
78 | return req_run(req, NULL, complete); | ||
79 | } | ||
80 | |||
69 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, | 81 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, |
70 | const void *param) | 82 | const void *param) |
71 | { | 83 | { |
@@ -116,9 +128,9 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, | |||
116 | } | 128 | } |
117 | 129 | ||
118 | if (skb_queue_empty(&req->cmd_q)) | 130 | if (skb_queue_empty(&req->cmd_q)) |
119 | bt_cb(skb)->req_start = 1; | 131 | bt_cb(skb)->req.start = true; |
120 | 132 | ||
121 | bt_cb(skb)->req_event = event; | 133 | bt_cb(skb)->req.event = event; |
122 | 134 | ||
123 | skb_queue_tail(&req->cmd_q, skb); | 135 | skb_queue_tail(&req->cmd_q, skb); |
124 | } | 136 | } |
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index adf074d33544..bf6df92f42db 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h | |||
@@ -32,11 +32,14 @@ struct hci_request { | |||
32 | 32 | ||
33 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev); | 33 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev); |
34 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete); | 34 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete); |
35 | int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); | ||
35 | void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, | 36 | void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, |
36 | const void *param); | 37 | const void *param); |
37 | void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, | 38 | void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, |
38 | const void *param, u8 event); | 39 | const void *param, u8 event); |
39 | void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status); | 40 | void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, |
41 | hci_req_complete_t *req_complete, | ||
42 | hci_req_complete_skb_t *req_complete_skb); | ||
40 | 43 | ||
41 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, | 44 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, |
42 | const void *param); | 45 | const void *param); |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 85a44a7dc150..56f9edbf3d05 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -1164,7 +1164,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, | |||
1164 | /* Stand-alone HCI commands must be flagged as | 1164 | /* Stand-alone HCI commands must be flagged as |
1165 | * single-command requests. | 1165 | * single-command requests. |
1166 | */ | 1166 | */ |
1167 | bt_cb(skb)->req_start = 1; | 1167 | bt_cb(skb)->req.start = true; |
1168 | 1168 | ||
1169 | skb_queue_tail(&hdev->cmd_q, skb); | 1169 | skb_queue_tail(&hdev->cmd_q, skb); |
1170 | queue_work(hdev->workqueue, &hdev->cmd_work); | 1170 | queue_work(hdev->workqueue, &hdev->cmd_work); |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 07348e142f16..a05b9dbf14c9 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -70,10 +70,11 @@ static void hidp_session_terminate(struct hidp_session *s); | |||
70 | 70 | ||
71 | static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) | 71 | static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) |
72 | { | 72 | { |
73 | u32 valid_flags = 0; | ||
73 | memset(ci, 0, sizeof(*ci)); | 74 | memset(ci, 0, sizeof(*ci)); |
74 | bacpy(&ci->bdaddr, &session->bdaddr); | 75 | bacpy(&ci->bdaddr, &session->bdaddr); |
75 | 76 | ||
76 | ci->flags = session->flags; | 77 | ci->flags = session->flags & valid_flags; |
77 | ci->state = BT_CONNECTED; | 78 | ci->state = BT_CONNECTED; |
78 | 79 | ||
79 | if (session->input) { | 80 | if (session->input) { |
@@ -907,7 +908,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr, | |||
907 | kref_init(&session->ref); | 908 | kref_init(&session->ref); |
908 | atomic_set(&session->state, HIDP_SESSION_IDLING); | 909 | atomic_set(&session->state, HIDP_SESSION_IDLING); |
909 | init_waitqueue_head(&session->state_queue); | 910 | init_waitqueue_head(&session->state_queue); |
910 | session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); | 911 | session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID); |
911 | 912 | ||
912 | /* connection management */ | 913 | /* connection management */ |
913 | bacpy(&session->bdaddr, bdaddr); | 914 | bacpy(&session->bdaddr, bdaddr); |
@@ -1312,6 +1313,7 @@ int hidp_connection_add(struct hidp_connadd_req *req, | |||
1312 | struct socket *ctrl_sock, | 1313 | struct socket *ctrl_sock, |
1313 | struct socket *intr_sock) | 1314 | struct socket *intr_sock) |
1314 | { | 1315 | { |
1316 | u32 valid_flags = 0; | ||
1315 | struct hidp_session *session; | 1317 | struct hidp_session *session; |
1316 | struct l2cap_conn *conn; | 1318 | struct l2cap_conn *conn; |
1317 | struct l2cap_chan *chan; | 1319 | struct l2cap_chan *chan; |
@@ -1321,6 +1323,9 @@ int hidp_connection_add(struct hidp_connadd_req *req, | |||
1321 | if (ret) | 1323 | if (ret) |
1322 | return ret; | 1324 | return ret; |
1323 | 1325 | ||
1326 | if (req->flags & ~valid_flags) | ||
1327 | return -EINVAL; | ||
1328 | |||
1324 | chan = l2cap_pi(ctrl_sock->sk)->chan; | 1329 | chan = l2cap_pi(ctrl_sock->sk)->chan; |
1325 | conn = NULL; | 1330 | conn = NULL; |
1326 | l2cap_chan_lock(chan); | 1331 | l2cap_chan_lock(chan); |
@@ -1351,13 +1356,17 @@ out_conn: | |||
1351 | 1356 | ||
1352 | int hidp_connection_del(struct hidp_conndel_req *req) | 1357 | int hidp_connection_del(struct hidp_conndel_req *req) |
1353 | { | 1358 | { |
1359 | u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG); | ||
1354 | struct hidp_session *session; | 1360 | struct hidp_session *session; |
1355 | 1361 | ||
1362 | if (req->flags & ~valid_flags) | ||
1363 | return -EINVAL; | ||
1364 | |||
1356 | session = hidp_session_find(&req->bdaddr); | 1365 | session = hidp_session_find(&req->bdaddr); |
1357 | if (!session) | 1366 | if (!session) |
1358 | return -ENOENT; | 1367 | return -ENOENT; |
1359 | 1368 | ||
1360 | if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) | 1369 | if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG)) |
1361 | hidp_send_ctrl_message(session, | 1370 | hidp_send_ctrl_message(session, |
1362 | HIDP_TRANS_HID_CONTROL | | 1371 | HIDP_TRANS_HID_CONTROL | |
1363 | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, | 1372 | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index d69861c89bb5..dad419782a12 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -292,7 +292,7 @@ static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, | |||
292 | struct sk_buff *skb; | 292 | struct sk_buff *skb; |
293 | 293 | ||
294 | skb_queue_walk(head, skb) { | 294 | skb_queue_walk(head, skb) { |
295 | if (bt_cb(skb)->control.txseq == seq) | 295 | if (bt_cb(skb)->l2cap.txseq == seq) |
296 | return skb; | 296 | return skb; |
297 | } | 297 | } |
298 | 298 | ||
@@ -954,11 +954,11 @@ static inline void __unpack_control(struct l2cap_chan *chan, | |||
954 | { | 954 | { |
955 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { | 955 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { |
956 | __unpack_extended_control(get_unaligned_le32(skb->data), | 956 | __unpack_extended_control(get_unaligned_le32(skb->data), |
957 | &bt_cb(skb)->control); | 957 | &bt_cb(skb)->l2cap); |
958 | skb_pull(skb, L2CAP_EXT_CTRL_SIZE); | 958 | skb_pull(skb, L2CAP_EXT_CTRL_SIZE); |
959 | } else { | 959 | } else { |
960 | __unpack_enhanced_control(get_unaligned_le16(skb->data), | 960 | __unpack_enhanced_control(get_unaligned_le16(skb->data), |
961 | &bt_cb(skb)->control); | 961 | &bt_cb(skb)->l2cap); |
962 | skb_pull(skb, L2CAP_ENH_CTRL_SIZE); | 962 | skb_pull(skb, L2CAP_ENH_CTRL_SIZE); |
963 | } | 963 | } |
964 | } | 964 | } |
@@ -1200,8 +1200,8 @@ static void l2cap_move_setup(struct l2cap_chan *chan) | |||
1200 | 1200 | ||
1201 | chan->retry_count = 0; | 1201 | chan->retry_count = 0; |
1202 | skb_queue_walk(&chan->tx_q, skb) { | 1202 | skb_queue_walk(&chan->tx_q, skb) { |
1203 | if (bt_cb(skb)->control.retries) | 1203 | if (bt_cb(skb)->l2cap.retries) |
1204 | bt_cb(skb)->control.retries = 1; | 1204 | bt_cb(skb)->l2cap.retries = 1; |
1205 | else | 1205 | else |
1206 | break; | 1206 | break; |
1207 | } | 1207 | } |
@@ -1846,8 +1846,8 @@ static void l2cap_streaming_send(struct l2cap_chan *chan, | |||
1846 | 1846 | ||
1847 | skb = skb_dequeue(&chan->tx_q); | 1847 | skb = skb_dequeue(&chan->tx_q); |
1848 | 1848 | ||
1849 | bt_cb(skb)->control.retries = 1; | 1849 | bt_cb(skb)->l2cap.retries = 1; |
1850 | control = &bt_cb(skb)->control; | 1850 | control = &bt_cb(skb)->l2cap; |
1851 | 1851 | ||
1852 | control->reqseq = 0; | 1852 | control->reqseq = 0; |
1853 | control->txseq = chan->next_tx_seq; | 1853 | control->txseq = chan->next_tx_seq; |
@@ -1891,8 +1891,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) | |||
1891 | 1891 | ||
1892 | skb = chan->tx_send_head; | 1892 | skb = chan->tx_send_head; |
1893 | 1893 | ||
1894 | bt_cb(skb)->control.retries = 1; | 1894 | bt_cb(skb)->l2cap.retries = 1; |
1895 | control = &bt_cb(skb)->control; | 1895 | control = &bt_cb(skb)->l2cap; |
1896 | 1896 | ||
1897 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) | 1897 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) |
1898 | control->final = 1; | 1898 | control->final = 1; |
@@ -1963,11 +1963,11 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan) | |||
1963 | continue; | 1963 | continue; |
1964 | } | 1964 | } |
1965 | 1965 | ||
1966 | bt_cb(skb)->control.retries++; | 1966 | bt_cb(skb)->l2cap.retries++; |
1967 | control = bt_cb(skb)->control; | 1967 | control = bt_cb(skb)->l2cap; |
1968 | 1968 | ||
1969 | if (chan->max_tx != 0 && | 1969 | if (chan->max_tx != 0 && |
1970 | bt_cb(skb)->control.retries > chan->max_tx) { | 1970 | bt_cb(skb)->l2cap.retries > chan->max_tx) { |
1971 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); | 1971 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); |
1972 | l2cap_send_disconn_req(chan, ECONNRESET); | 1972 | l2cap_send_disconn_req(chan, ECONNRESET); |
1973 | l2cap_seq_list_clear(&chan->retrans_list); | 1973 | l2cap_seq_list_clear(&chan->retrans_list); |
@@ -2045,7 +2045,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan, | |||
2045 | 2045 | ||
2046 | if (chan->unacked_frames) { | 2046 | if (chan->unacked_frames) { |
2047 | skb_queue_walk(&chan->tx_q, skb) { | 2047 | skb_queue_walk(&chan->tx_q, skb) { |
2048 | if (bt_cb(skb)->control.txseq == control->reqseq || | 2048 | if (bt_cb(skb)->l2cap.txseq == control->reqseq || |
2049 | skb == chan->tx_send_head) | 2049 | skb == chan->tx_send_head) |
2050 | break; | 2050 | break; |
2051 | } | 2051 | } |
@@ -2055,7 +2055,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan, | |||
2055 | break; | 2055 | break; |
2056 | 2056 | ||
2057 | l2cap_seq_list_append(&chan->retrans_list, | 2057 | l2cap_seq_list_append(&chan->retrans_list, |
2058 | bt_cb(skb)->control.txseq); | 2058 | bt_cb(skb)->l2cap.txseq); |
2059 | } | 2059 | } |
2060 | 2060 | ||
2061 | l2cap_ertm_resend(chan); | 2061 | l2cap_ertm_resend(chan); |
@@ -2267,8 +2267,8 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, | |||
2267 | return ERR_PTR(err); | 2267 | return ERR_PTR(err); |
2268 | } | 2268 | } |
2269 | 2269 | ||
2270 | bt_cb(skb)->control.fcs = chan->fcs; | 2270 | bt_cb(skb)->l2cap.fcs = chan->fcs; |
2271 | bt_cb(skb)->control.retries = 0; | 2271 | bt_cb(skb)->l2cap.retries = 0; |
2272 | return skb; | 2272 | return skb; |
2273 | } | 2273 | } |
2274 | 2274 | ||
@@ -2321,7 +2321,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, | |||
2321 | return PTR_ERR(skb); | 2321 | return PTR_ERR(skb); |
2322 | } | 2322 | } |
2323 | 2323 | ||
2324 | bt_cb(skb)->control.sar = sar; | 2324 | bt_cb(skb)->l2cap.sar = sar; |
2325 | __skb_queue_tail(seg_queue, skb); | 2325 | __skb_queue_tail(seg_queue, skb); |
2326 | 2326 | ||
2327 | len -= pdu_len; | 2327 | len -= pdu_len; |
@@ -2856,7 +2856,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2856 | continue; | 2856 | continue; |
2857 | 2857 | ||
2858 | /* Don't send frame to the channel it came from */ | 2858 | /* Don't send frame to the channel it came from */ |
2859 | if (bt_cb(skb)->chan == chan) | 2859 | if (bt_cb(skb)->l2cap.chan == chan) |
2860 | continue; | 2860 | continue; |
2861 | 2861 | ||
2862 | nskb = skb_clone(skb, GFP_KERNEL); | 2862 | nskb = skb_clone(skb, GFP_KERNEL); |
@@ -5918,7 +5918,7 @@ static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) | |||
5918 | 5918 | ||
5919 | skb_unlink(skb, &chan->srej_q); | 5919 | skb_unlink(skb, &chan->srej_q); |
5920 | chan->buffer_seq = __next_seq(chan, chan->buffer_seq); | 5920 | chan->buffer_seq = __next_seq(chan, chan->buffer_seq); |
5921 | err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control); | 5921 | err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap); |
5922 | if (err) | 5922 | if (err) |
5923 | break; | 5923 | break; |
5924 | } | 5924 | } |
@@ -5952,7 +5952,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan, | |||
5952 | return; | 5952 | return; |
5953 | } | 5953 | } |
5954 | 5954 | ||
5955 | if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) { | 5955 | if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { |
5956 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); | 5956 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); |
5957 | l2cap_send_disconn_req(chan, ECONNRESET); | 5957 | l2cap_send_disconn_req(chan, ECONNRESET); |
5958 | return; | 5958 | return; |
@@ -6005,7 +6005,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan, | |||
6005 | skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); | 6005 | skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); |
6006 | 6006 | ||
6007 | if (chan->max_tx && skb && | 6007 | if (chan->max_tx && skb && |
6008 | bt_cb(skb)->control.retries >= chan->max_tx) { | 6008 | bt_cb(skb)->l2cap.retries >= chan->max_tx) { |
6009 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); | 6009 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); |
6010 | l2cap_send_disconn_req(chan, ECONNRESET); | 6010 | l2cap_send_disconn_req(chan, ECONNRESET); |
6011 | return; | 6011 | return; |
@@ -6565,7 +6565,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, | |||
6565 | 6565 | ||
6566 | static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) | 6566 | static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) |
6567 | { | 6567 | { |
6568 | struct l2cap_ctrl *control = &bt_cb(skb)->control; | 6568 | struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; |
6569 | u16 len; | 6569 | u16 len; |
6570 | u8 event; | 6570 | u8 event; |
6571 | 6571 | ||
@@ -6864,8 +6864,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, | |||
6864 | goto drop; | 6864 | goto drop; |
6865 | 6865 | ||
6866 | /* Store remote BD_ADDR and PSM for msg_name */ | 6866 | /* Store remote BD_ADDR and PSM for msg_name */ |
6867 | bacpy(&bt_cb(skb)->bdaddr, &hcon->dst); | 6867 | bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst); |
6868 | bt_cb(skb)->psm = psm; | 6868 | bt_cb(skb)->l2cap.psm = psm; |
6869 | 6869 | ||
6870 | if (!chan->ops->recv(chan, skb)) { | 6870 | if (!chan->ops->recv(chan, skb)) { |
6871 | l2cap_chan_put(chan); | 6871 | l2cap_chan_put(chan); |
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 9070720eedc8..a7278f05eafb 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
@@ -1330,7 +1330,7 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, | |||
1330 | 1330 | ||
1331 | skb->priority = sk->sk_priority; | 1331 | skb->priority = sk->sk_priority; |
1332 | 1332 | ||
1333 | bt_cb(skb)->chan = chan; | 1333 | bt_cb(skb)->l2cap.chan = chan; |
1334 | 1334 | ||
1335 | return skb; | 1335 | return skb; |
1336 | } | 1336 | } |
@@ -1444,8 +1444,8 @@ static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name, | |||
1444 | 1444 | ||
1445 | memset(la, 0, sizeof(struct sockaddr_l2)); | 1445 | memset(la, 0, sizeof(struct sockaddr_l2)); |
1446 | la->l2_family = AF_BLUETOOTH; | 1446 | la->l2_family = AF_BLUETOOTH; |
1447 | la->l2_psm = bt_cb(skb)->psm; | 1447 | la->l2_psm = bt_cb(skb)->l2cap.psm; |
1448 | bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr); | 1448 | bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr); |
1449 | 1449 | ||
1450 | *msg_namelen = sizeof(struct sockaddr_l2); | 1450 | *msg_namelen = sizeof(struct sockaddr_l2); |
1451 | } | 1451 | } |
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index fb2e764c6211..845dfcc43a20 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
@@ -985,14 +985,27 @@ static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) | |||
985 | /* Instance 0 always manages the "Tx Power" and "Flags" fields */ | 985 | /* Instance 0 always manages the "Tx Power" and "Flags" fields */ |
986 | flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; | 986 | flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; |
987 | 987 | ||
988 | /* For instance 0, assemble the flags from global settings */ | 988 | /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds |
989 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE) || | 989 | * to the "connectable" instance flag. |
990 | get_connectable(hdev)) | 990 | */ |
991 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) | ||
991 | flags |= MGMT_ADV_FLAG_CONNECTABLE; | 992 | flags |= MGMT_ADV_FLAG_CONNECTABLE; |
992 | 993 | ||
993 | return flags; | 994 | return flags; |
994 | } | 995 | } |
995 | 996 | ||
997 | static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) | ||
998 | { | ||
999 | /* Ignore instance 0 and other unsupported instances */ | ||
1000 | if (instance != 0x01) | ||
1001 | return 0; | ||
1002 | |||
1003 | /* TODO: Take into account the "appearance" and "local-name" flags here. | ||
1004 | * These are currently being ignored as they are not supported. | ||
1005 | */ | ||
1006 | return hdev->adv_instance.scan_rsp_len; | ||
1007 | } | ||
1008 | |||
996 | static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) | 1009 | static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) |
997 | { | 1010 | { |
998 | u8 ad_len = 0, flags = 0; | 1011 | u8 ad_len = 0, flags = 0; |
@@ -1030,6 +1043,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) | |||
1030 | } | 1043 | } |
1031 | } | 1044 | } |
1032 | 1045 | ||
1046 | if (instance) { | ||
1047 | memcpy(ptr, hdev->adv_instance.adv_data, | ||
1048 | hdev->adv_instance.adv_data_len); | ||
1049 | |||
1050 | ad_len += hdev->adv_instance.adv_data_len; | ||
1051 | ptr += hdev->adv_instance.adv_data_len; | ||
1052 | } | ||
1053 | |||
1033 | /* Provide Tx Power only if we can provide a valid value for it */ | 1054 | /* Provide Tx Power only if we can provide a valid value for it */ |
1034 | if (hdev->adv_tx_power != HCI_TX_POWER_INVALID && | 1055 | if (hdev->adv_tx_power != HCI_TX_POWER_INVALID && |
1035 | (instance_flags & MGMT_ADV_FLAG_TX_POWER)) { | 1056 | (instance_flags & MGMT_ADV_FLAG_TX_POWER)) { |
@@ -1041,12 +1062,6 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) | |||
1041 | ptr += 3; | 1062 | ptr += 3; |
1042 | } | 1063 | } |
1043 | 1064 | ||
1044 | if (instance) { | ||
1045 | memcpy(ptr, hdev->adv_instance.adv_data, | ||
1046 | hdev->adv_instance.adv_data_len); | ||
1047 | ad_len += hdev->adv_instance.adv_data_len; | ||
1048 | } | ||
1049 | |||
1050 | return ad_len; | 1065 | return ad_len; |
1051 | } | 1066 | } |
1052 | 1067 | ||
@@ -1242,7 +1257,12 @@ static void enable_advertising(struct hci_request *req) | |||
1242 | 1257 | ||
1243 | instance = get_current_adv_instance(hdev); | 1258 | instance = get_current_adv_instance(hdev); |
1244 | flags = get_adv_instance_flags(hdev, instance); | 1259 | flags = get_adv_instance_flags(hdev, instance); |
1245 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE); | 1260 | |
1261 | /* If the "connectable" instance flag was not set, then choose between | ||
1262 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. | ||
1263 | */ | ||
1264 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || | ||
1265 | get_connectable(hdev); | ||
1246 | 1266 | ||
1247 | /* Set require_privacy to true only when non-connectable | 1267 | /* Set require_privacy to true only when non-connectable |
1248 | * advertising is used. In that case it is fine to use a | 1268 | * advertising is used. In that case it is fine to use a |
@@ -1254,7 +1274,14 @@ static void enable_advertising(struct hci_request *req) | |||
1254 | memset(&cp, 0, sizeof(cp)); | 1274 | memset(&cp, 0, sizeof(cp)); |
1255 | cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval); | 1275 | cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval); |
1256 | cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval); | 1276 | cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval); |
1257 | cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; | 1277 | |
1278 | if (connectable) | ||
1279 | cp.type = LE_ADV_IND; | ||
1280 | else if (get_adv_instance_scan_rsp_len(hdev, instance)) | ||
1281 | cp.type = LE_ADV_SCAN_IND; | ||
1282 | else | ||
1283 | cp.type = LE_ADV_NONCONN_IND; | ||
1284 | |||
1258 | cp.own_address_type = own_addr_type; | 1285 | cp.own_address_type = own_addr_type; |
1259 | cp.channel_map = hdev->le_adv_channel_map; | 1286 | cp.channel_map = hdev->le_adv_channel_map; |
1260 | 1287 | ||
@@ -2088,7 +2115,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
2088 | 2115 | ||
2089 | no_scan_update: | 2116 | no_scan_update: |
2090 | /* Update the advertising parameters if necessary */ | 2117 | /* Update the advertising parameters if necessary */ |
2091 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) | 2118 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || |
2119 | hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) | ||
2092 | enable_advertising(&req); | 2120 | enable_advertising(&req); |
2093 | 2121 | ||
2094 | err = hci_req_run(&req, set_connectable_complete); | 2122 | err = hci_req_run(&req, set_connectable_complete); |
@@ -3757,10 +3785,70 @@ failed: | |||
3757 | return err; | 3785 | return err; |
3758 | } | 3786 | } |
3759 | 3787 | ||
3788 | static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, | ||
3789 | u16 opcode, struct sk_buff *skb) | ||
3790 | { | ||
3791 | struct mgmt_rp_read_local_oob_data mgmt_rp; | ||
3792 | size_t rp_size = sizeof(mgmt_rp); | ||
3793 | struct mgmt_pending_cmd *cmd; | ||
3794 | |||
3795 | BT_DBG("%s status %u", hdev->name, status); | ||
3796 | |||
3797 | cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); | ||
3798 | if (!cmd) | ||
3799 | return; | ||
3800 | |||
3801 | if (status || !skb) { | ||
3802 | mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, | ||
3803 | status ? mgmt_status(status) : MGMT_STATUS_FAILED); | ||
3804 | goto remove; | ||
3805 | } | ||
3806 | |||
3807 | memset(&mgmt_rp, 0, sizeof(mgmt_rp)); | ||
3808 | |||
3809 | if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) { | ||
3810 | struct hci_rp_read_local_oob_data *rp = (void *) skb->data; | ||
3811 | |||
3812 | if (skb->len < sizeof(*rp)) { | ||
3813 | mgmt_cmd_status(cmd->sk, hdev->id, | ||
3814 | MGMT_OP_READ_LOCAL_OOB_DATA, | ||
3815 | MGMT_STATUS_FAILED); | ||
3816 | goto remove; | ||
3817 | } | ||
3818 | |||
3819 | memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash)); | ||
3820 | memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand)); | ||
3821 | |||
3822 | rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256); | ||
3823 | } else { | ||
3824 | struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; | ||
3825 | |||
3826 | if (skb->len < sizeof(*rp)) { | ||
3827 | mgmt_cmd_status(cmd->sk, hdev->id, | ||
3828 | MGMT_OP_READ_LOCAL_OOB_DATA, | ||
3829 | MGMT_STATUS_FAILED); | ||
3830 | goto remove; | ||
3831 | } | ||
3832 | |||
3833 | memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192)); | ||
3834 | memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192)); | ||
3835 | |||
3836 | memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256)); | ||
3837 | memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256)); | ||
3838 | } | ||
3839 | |||
3840 | mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, | ||
3841 | MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size); | ||
3842 | |||
3843 | remove: | ||
3844 | mgmt_pending_remove(cmd); | ||
3845 | } | ||
3846 | |||
3760 | static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, | 3847 | static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, |
3761 | void *data, u16 data_len) | 3848 | void *data, u16 data_len) |
3762 | { | 3849 | { |
3763 | struct mgmt_pending_cmd *cmd; | 3850 | struct mgmt_pending_cmd *cmd; |
3851 | struct hci_request req; | ||
3764 | int err; | 3852 | int err; |
3765 | 3853 | ||
3766 | BT_DBG("%s", hdev->name); | 3854 | BT_DBG("%s", hdev->name); |
@@ -3791,12 +3879,14 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, | |||
3791 | goto unlock; | 3879 | goto unlock; |
3792 | } | 3880 | } |
3793 | 3881 | ||
3882 | hci_req_init(&req, hdev); | ||
3883 | |||
3794 | if (bredr_sc_enabled(hdev)) | 3884 | if (bredr_sc_enabled(hdev)) |
3795 | err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA, | 3885 | hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL); |
3796 | 0, NULL); | ||
3797 | else | 3886 | else |
3798 | err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); | 3887 | hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); |
3799 | 3888 | ||
3889 | err = hci_req_run_skb(&req, read_local_oob_data_complete); | ||
3800 | if (err < 0) | 3890 | if (err < 0) |
3801 | mgmt_pending_remove(cmd); | 3891 | mgmt_pending_remove(cmd); |
3802 | 3892 | ||
@@ -6388,46 +6478,41 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, | |||
6388 | 6478 | ||
6389 | BT_DBG("%s", hdev->name); | 6479 | BT_DBG("%s", hdev->name); |
6390 | 6480 | ||
6391 | if (!hdev_is_powered(hdev)) | 6481 | if (hdev_is_powered(hdev)) { |
6392 | return mgmt_cmd_complete(sk, hdev->id, | 6482 | switch (cp->type) { |
6393 | MGMT_OP_READ_LOCAL_OOB_EXT_DATA, | 6483 | case BIT(BDADDR_BREDR): |
6394 | MGMT_STATUS_NOT_POWERED, | 6484 | status = mgmt_bredr_support(hdev); |
6395 | &cp->type, sizeof(cp->type)); | 6485 | if (status) |
6396 | 6486 | eir_len = 0; | |
6397 | switch (cp->type) { | 6487 | else |
6398 | case BIT(BDADDR_BREDR): | 6488 | eir_len = 5; |
6399 | status = mgmt_bredr_support(hdev); | 6489 | break; |
6400 | if (status) | 6490 | case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): |
6401 | return mgmt_cmd_complete(sk, hdev->id, | 6491 | status = mgmt_le_support(hdev); |
6402 | MGMT_OP_READ_LOCAL_OOB_EXT_DATA, | 6492 | if (status) |
6403 | status, &cp->type, | 6493 | eir_len = 0; |
6404 | sizeof(cp->type)); | 6494 | else |
6405 | eir_len = 5; | 6495 | eir_len = 9 + 3 + 18 + 18 + 3; |
6406 | break; | 6496 | break; |
6407 | case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): | 6497 | default: |
6408 | status = mgmt_le_support(hdev); | 6498 | status = MGMT_STATUS_INVALID_PARAMS; |
6409 | if (status) | 6499 | eir_len = 0; |
6410 | return mgmt_cmd_complete(sk, hdev->id, | 6500 | break; |
6411 | MGMT_OP_READ_LOCAL_OOB_EXT_DATA, | 6501 | } |
6412 | status, &cp->type, | 6502 | } else { |
6413 | sizeof(cp->type)); | 6503 | status = MGMT_STATUS_NOT_POWERED; |
6414 | eir_len = 9 + 3 + 18 + 18 + 3; | 6504 | eir_len = 0; |
6415 | break; | ||
6416 | default: | ||
6417 | return mgmt_cmd_complete(sk, hdev->id, | ||
6418 | MGMT_OP_READ_LOCAL_OOB_EXT_DATA, | ||
6419 | MGMT_STATUS_INVALID_PARAMS, | ||
6420 | &cp->type, sizeof(cp->type)); | ||
6421 | } | 6505 | } |
6422 | 6506 | ||
6423 | hci_dev_lock(hdev); | ||
6424 | |||
6425 | rp_len = sizeof(*rp) + eir_len; | 6507 | rp_len = sizeof(*rp) + eir_len; |
6426 | rp = kmalloc(rp_len, GFP_ATOMIC); | 6508 | rp = kmalloc(rp_len, GFP_ATOMIC); |
6427 | if (!rp) { | 6509 | if (!rp) |
6428 | hci_dev_unlock(hdev); | ||
6429 | return -ENOMEM; | 6510 | return -ENOMEM; |
6430 | } | 6511 | |
6512 | if (status) | ||
6513 | goto complete; | ||
6514 | |||
6515 | hci_dev_lock(hdev); | ||
6431 | 6516 | ||
6432 | eir_len = 0; | 6517 | eir_len = 0; |
6433 | switch (cp->type) { | 6518 | switch (cp->type) { |
@@ -6439,20 +6524,30 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, | |||
6439 | if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && | 6524 | if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && |
6440 | smp_generate_oob(hdev, hash, rand) < 0) { | 6525 | smp_generate_oob(hdev, hash, rand) < 0) { |
6441 | hci_dev_unlock(hdev); | 6526 | hci_dev_unlock(hdev); |
6442 | err = mgmt_cmd_complete(sk, hdev->id, | 6527 | status = MGMT_STATUS_FAILED; |
6443 | MGMT_OP_READ_LOCAL_OOB_EXT_DATA, | 6528 | goto complete; |
6444 | MGMT_STATUS_FAILED, | ||
6445 | &cp->type, sizeof(cp->type)); | ||
6446 | goto done; | ||
6447 | } | 6529 | } |
6448 | 6530 | ||
6531 | /* This should return the active RPA, but since the RPA | ||
6532 | * is only programmed on demand, it is really hard to fill | ||
6533 | * this in at the moment. For now disallow retrieving | ||
6534 | * local out-of-band data when privacy is in use. | ||
6535 | * | ||
6536 | * Returning the identity address will not help here since | ||
6537 | * pairing happens before the identity resolving key is | ||
6538 | * known and thus the connection establishment happens | ||
6539 | * based on the RPA and not the identity address. | ||
6540 | */ | ||
6449 | if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { | 6541 | if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { |
6450 | memcpy(addr, &hdev->rpa, 6); | 6542 | hci_dev_unlock(hdev); |
6451 | addr[6] = 0x01; | 6543 | status = MGMT_STATUS_REJECTED; |
6452 | } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || | 6544 | goto complete; |
6453 | !bacmp(&hdev->bdaddr, BDADDR_ANY) || | 6545 | } |
6454 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && | 6546 | |
6455 | bacmp(&hdev->static_addr, BDADDR_ANY))) { | 6547 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || |
6548 | !bacmp(&hdev->bdaddr, BDADDR_ANY) || | ||
6549 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && | ||
6550 | bacmp(&hdev->static_addr, BDADDR_ANY))) { | ||
6456 | memcpy(addr, &hdev->static_addr, 6); | 6551 | memcpy(addr, &hdev->static_addr, 6); |
6457 | addr[6] = 0x01; | 6552 | addr[6] = 0x01; |
6458 | } else { | 6553 | } else { |
@@ -6491,16 +6586,19 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, | |||
6491 | break; | 6586 | break; |
6492 | } | 6587 | } |
6493 | 6588 | ||
6494 | rp->type = cp->type; | ||
6495 | rp->eir_len = cpu_to_le16(eir_len); | ||
6496 | |||
6497 | hci_dev_unlock(hdev); | 6589 | hci_dev_unlock(hdev); |
6498 | 6590 | ||
6499 | hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); | 6591 | hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); |
6500 | 6592 | ||
6593 | status = MGMT_STATUS_SUCCESS; | ||
6594 | |||
6595 | complete: | ||
6596 | rp->type = cp->type; | ||
6597 | rp->eir_len = cpu_to_le16(eir_len); | ||
6598 | |||
6501 | err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, | 6599 | err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, |
6502 | MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len); | 6600 | status, rp, sizeof(*rp) + eir_len); |
6503 | if (err < 0) | 6601 | if (err < 0 || status) |
6504 | goto done; | 6602 | goto done; |
6505 | 6603 | ||
6506 | err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, | 6604 | err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, |
@@ -7899,43 +7997,6 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) | |||
7899 | cmd ? cmd->sk : NULL); | 7997 | cmd ? cmd->sk : NULL); |
7900 | } | 7998 | } |
7901 | 7999 | ||
7902 | void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, | ||
7903 | u8 *rand192, u8 *hash256, u8 *rand256, | ||
7904 | u8 status) | ||
7905 | { | ||
7906 | struct mgmt_pending_cmd *cmd; | ||
7907 | |||
7908 | BT_DBG("%s status %u", hdev->name, status); | ||
7909 | |||
7910 | cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); | ||
7911 | if (!cmd) | ||
7912 | return; | ||
7913 | |||
7914 | if (status) { | ||
7915 | mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, | ||
7916 | mgmt_status(status)); | ||
7917 | } else { | ||
7918 | struct mgmt_rp_read_local_oob_data rp; | ||
7919 | size_t rp_size = sizeof(rp); | ||
7920 | |||
7921 | memcpy(rp.hash192, hash192, sizeof(rp.hash192)); | ||
7922 | memcpy(rp.rand192, rand192, sizeof(rp.rand192)); | ||
7923 | |||
7924 | if (bredr_sc_enabled(hdev) && hash256 && rand256) { | ||
7925 | memcpy(rp.hash256, hash256, sizeof(rp.hash256)); | ||
7926 | memcpy(rp.rand256, rand256, sizeof(rp.rand256)); | ||
7927 | } else { | ||
7928 | rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256); | ||
7929 | } | ||
7930 | |||
7931 | mgmt_cmd_complete(cmd->sk, hdev->id, | ||
7932 | MGMT_OP_READ_LOCAL_OOB_DATA, 0, | ||
7933 | &rp, rp_size); | ||
7934 | } | ||
7935 | |||
7936 | mgmt_pending_remove(cmd); | ||
7937 | } | ||
7938 | |||
7939 | static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16]) | 8000 | static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16]) |
7940 | { | 8001 | { |
7941 | int i; | 8002 | int i; |
diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c index 378f4064952c..dc688f13e496 100644 --- a/net/bluetooth/selftest.c +++ b/net/bluetooth/selftest.c | |||
@@ -21,6 +21,8 @@ | |||
21 | SOFTWARE IS DISCLAIMED. | 21 | SOFTWARE IS DISCLAIMED. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/debugfs.h> | ||
25 | |||
24 | #include <net/bluetooth/bluetooth.h> | 26 | #include <net/bluetooth/bluetooth.h> |
25 | #include <net/bluetooth/hci_core.h> | 27 | #include <net/bluetooth/hci_core.h> |
26 | 28 | ||
@@ -154,6 +156,21 @@ static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32], | |||
154 | return 0; | 156 | return 0; |
155 | } | 157 | } |
156 | 158 | ||
159 | static char test_ecdh_buffer[32]; | ||
160 | |||
161 | static ssize_t test_ecdh_read(struct file *file, char __user *user_buf, | ||
162 | size_t count, loff_t *ppos) | ||
163 | { | ||
164 | return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer, | ||
165 | strlen(test_ecdh_buffer)); | ||
166 | } | ||
167 | |||
168 | static const struct file_operations test_ecdh_fops = { | ||
169 | .open = simple_open, | ||
170 | .read = test_ecdh_read, | ||
171 | .llseek = default_llseek, | ||
172 | }; | ||
173 | |||
157 | static int __init test_ecdh(void) | 174 | static int __init test_ecdh(void) |
158 | { | 175 | { |
159 | ktime_t calltime, delta, rettime; | 176 | ktime_t calltime, delta, rettime; |
@@ -165,19 +182,19 @@ static int __init test_ecdh(void) | |||
165 | err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1); | 182 | err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1); |
166 | if (err) { | 183 | if (err) { |
167 | BT_ERR("ECDH sample 1 failed"); | 184 | BT_ERR("ECDH sample 1 failed"); |
168 | return err; | 185 | goto done; |
169 | } | 186 | } |
170 | 187 | ||
171 | err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2); | 188 | err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2); |
172 | if (err) { | 189 | if (err) { |
173 | BT_ERR("ECDH sample 2 failed"); | 190 | BT_ERR("ECDH sample 2 failed"); |
174 | return err; | 191 | goto done; |
175 | } | 192 | } |
176 | 193 | ||
177 | err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3); | 194 | err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3); |
178 | if (err) { | 195 | if (err) { |
179 | BT_ERR("ECDH sample 3 failed"); | 196 | BT_ERR("ECDH sample 3 failed"); |
180 | return err; | 197 | goto done; |
181 | } | 198 | } |
182 | 199 | ||
183 | rettime = ktime_get(); | 200 | rettime = ktime_get(); |
@@ -186,7 +203,17 @@ static int __init test_ecdh(void) | |||
186 | 203 | ||
187 | BT_INFO("ECDH test passed in %llu usecs", duration); | 204 | BT_INFO("ECDH test passed in %llu usecs", duration); |
188 | 205 | ||
189 | return 0; | 206 | done: |
207 | if (!err) | ||
208 | snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), | ||
209 | "PASS (%llu usecs)\n", duration); | ||
210 | else | ||
211 | snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n"); | ||
212 | |||
213 | debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL, | ||
214 | &test_ecdh_fops); | ||
215 | |||
216 | return err; | ||
190 | } | 217 | } |
191 | 218 | ||
192 | #else | 219 | #else |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 1ec3f66b5a74..1ab3dc9c8f99 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -3017,7 +3017,7 @@ static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan, | |||
3017 | return ERR_PTR(-ENOMEM); | 3017 | return ERR_PTR(-ENOMEM); |
3018 | 3018 | ||
3019 | skb->priority = HCI_PRIO_MAX; | 3019 | skb->priority = HCI_PRIO_MAX; |
3020 | bt_cb(skb)->chan = chan; | 3020 | bt_cb(skb)->l2cap.chan = chan; |
3021 | 3021 | ||
3022 | return skb; | 3022 | return skb; |
3023 | } | 3023 | } |
@@ -3549,6 +3549,21 @@ static int __init test_h6(struct crypto_hash *tfm_cmac) | |||
3549 | return 0; | 3549 | return 0; |
3550 | } | 3550 | } |
3551 | 3551 | ||
3552 | static char test_smp_buffer[32]; | ||
3553 | |||
3554 | static ssize_t test_smp_read(struct file *file, char __user *user_buf, | ||
3555 | size_t count, loff_t *ppos) | ||
3556 | { | ||
3557 | return simple_read_from_buffer(user_buf, count, ppos, test_smp_buffer, | ||
3558 | strlen(test_smp_buffer)); | ||
3559 | } | ||
3560 | |||
3561 | static const struct file_operations test_smp_fops = { | ||
3562 | .open = simple_open, | ||
3563 | .read = test_smp_read, | ||
3564 | .llseek = default_llseek, | ||
3565 | }; | ||
3566 | |||
3552 | static int __init run_selftests(struct crypto_blkcipher *tfm_aes, | 3567 | static int __init run_selftests(struct crypto_blkcipher *tfm_aes, |
3553 | struct crypto_hash *tfm_cmac) | 3568 | struct crypto_hash *tfm_cmac) |
3554 | { | 3569 | { |
@@ -3561,49 +3576,49 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes, | |||
3561 | err = test_ah(tfm_aes); | 3576 | err = test_ah(tfm_aes); |
3562 | if (err) { | 3577 | if (err) { |
3563 | BT_ERR("smp_ah test failed"); | 3578 | BT_ERR("smp_ah test failed"); |
3564 | return err; | 3579 | goto done; |
3565 | } | 3580 | } |
3566 | 3581 | ||
3567 | err = test_c1(tfm_aes); | 3582 | err = test_c1(tfm_aes); |
3568 | if (err) { | 3583 | if (err) { |
3569 | BT_ERR("smp_c1 test failed"); | 3584 | BT_ERR("smp_c1 test failed"); |
3570 | return err; | 3585 | goto done; |
3571 | } | 3586 | } |
3572 | 3587 | ||
3573 | err = test_s1(tfm_aes); | 3588 | err = test_s1(tfm_aes); |
3574 | if (err) { | 3589 | if (err) { |
3575 | BT_ERR("smp_s1 test failed"); | 3590 | BT_ERR("smp_s1 test failed"); |
3576 | return err; | 3591 | goto done; |
3577 | } | 3592 | } |
3578 | 3593 | ||
3579 | err = test_f4(tfm_cmac); | 3594 | err = test_f4(tfm_cmac); |
3580 | if (err) { | 3595 | if (err) { |
3581 | BT_ERR("smp_f4 test failed"); | 3596 | BT_ERR("smp_f4 test failed"); |
3582 | return err; | 3597 | goto done; |
3583 | } | 3598 | } |
3584 | 3599 | ||
3585 | err = test_f5(tfm_cmac); | 3600 | err = test_f5(tfm_cmac); |
3586 | if (err) { | 3601 | if (err) { |
3587 | BT_ERR("smp_f5 test failed"); | 3602 | BT_ERR("smp_f5 test failed"); |
3588 | return err; | 3603 | goto done; |
3589 | } | 3604 | } |
3590 | 3605 | ||
3591 | err = test_f6(tfm_cmac); | 3606 | err = test_f6(tfm_cmac); |
3592 | if (err) { | 3607 | if (err) { |
3593 | BT_ERR("smp_f6 test failed"); | 3608 | BT_ERR("smp_f6 test failed"); |
3594 | return err; | 3609 | goto done; |
3595 | } | 3610 | } |
3596 | 3611 | ||
3597 | err = test_g2(tfm_cmac); | 3612 | err = test_g2(tfm_cmac); |
3598 | if (err) { | 3613 | if (err) { |
3599 | BT_ERR("smp_g2 test failed"); | 3614 | BT_ERR("smp_g2 test failed"); |
3600 | return err; | 3615 | goto done; |
3601 | } | 3616 | } |
3602 | 3617 | ||
3603 | err = test_h6(tfm_cmac); | 3618 | err = test_h6(tfm_cmac); |
3604 | if (err) { | 3619 | if (err) { |
3605 | BT_ERR("smp_h6 test failed"); | 3620 | BT_ERR("smp_h6 test failed"); |
3606 | return err; | 3621 | goto done; |
3607 | } | 3622 | } |
3608 | 3623 | ||
3609 | rettime = ktime_get(); | 3624 | rettime = ktime_get(); |
@@ -3612,7 +3627,17 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes, | |||
3612 | 3627 | ||
3613 | BT_INFO("SMP test passed in %llu usecs", duration); | 3628 | BT_INFO("SMP test passed in %llu usecs", duration); |
3614 | 3629 | ||
3615 | return 0; | 3630 | done: |
3631 | if (!err) | ||
3632 | snprintf(test_smp_buffer, sizeof(test_smp_buffer), | ||
3633 | "PASS (%llu usecs)\n", duration); | ||
3634 | else | ||
3635 | snprintf(test_smp_buffer, sizeof(test_smp_buffer), "FAIL\n"); | ||
3636 | |||
3637 | debugfs_create_file("selftest_smp", 0444, bt_debugfs, NULL, | ||
3638 | &test_smp_fops); | ||
3639 | |||
3640 | return err; | ||
3616 | } | 3641 | } |
3617 | 3642 | ||
3618 | int __init bt_selftest_smp(void) | 3643 | int __init bt_selftest_smp(void) |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 3304a5442331..e97572b5d2cc 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -35,7 +35,7 @@ static inline int should_deliver(const struct net_bridge_port *p, | |||
35 | p->state == BR_STATE_FORWARDING; | 35 | p->state == BR_STATE_FORWARDING; |
36 | } | 36 | } |
37 | 37 | ||
38 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 38 | int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) |
39 | { | 39 | { |
40 | if (!is_skb_forwardable(skb->dev, skb)) { | 40 | if (!is_skb_forwardable(skb->dev, skb)) { |
41 | kfree_skb(skb); | 41 | kfree_skb(skb); |
@@ -49,9 +49,10 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) | |||
49 | } | 49 | } |
50 | EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); | 50 | EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); |
51 | 51 | ||
52 | int br_forward_finish(struct sk_buff *skb) | 52 | int br_forward_finish(struct sock *sk, struct sk_buff *skb) |
53 | { | 53 | { |
54 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, | 54 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, sk, skb, |
55 | NULL, skb->dev, | ||
55 | br_dev_queue_push_xmit); | 56 | br_dev_queue_push_xmit); |
56 | 57 | ||
57 | } | 58 | } |
@@ -75,7 +76,8 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |||
75 | return; | 76 | return; |
76 | } | 77 | } |
77 | 78 | ||
78 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 79 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb, |
80 | NULL, skb->dev, | ||
79 | br_forward_finish); | 81 | br_forward_finish); |
80 | } | 82 | } |
81 | 83 | ||
@@ -96,7 +98,8 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | |||
96 | skb->dev = to->dev; | 98 | skb->dev = to->dev; |
97 | skb_forward_csum(skb); | 99 | skb_forward_csum(skb); |
98 | 100 | ||
99 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, | 101 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, NULL, skb, |
102 | indev, skb->dev, | ||
100 | br_forward_finish); | 103 | br_forward_finish); |
101 | } | 104 | } |
102 | 105 | ||
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 052c5ebbc947..f921a5dce22d 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -55,8 +55,9 @@ static int br_pass_frame_up(struct sk_buff *skb) | |||
55 | if (!skb) | 55 | if (!skb) |
56 | return NET_RX_DROP; | 56 | return NET_RX_DROP; |
57 | 57 | ||
58 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, | 58 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb, |
59 | netif_receive_skb); | 59 | indev, NULL, |
60 | netif_receive_skb_sk); | ||
60 | } | 61 | } |
61 | 62 | ||
62 | static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, | 63 | static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, |
@@ -119,7 +120,7 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, | |||
119 | } | 120 | } |
120 | 121 | ||
121 | /* note: already called with rcu_read_lock */ | 122 | /* note: already called with rcu_read_lock */ |
122 | int br_handle_frame_finish(struct sk_buff *skb) | 123 | int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb) |
123 | { | 124 | { |
124 | const unsigned char *dest = eth_hdr(skb)->h_dest; | 125 | const unsigned char *dest = eth_hdr(skb)->h_dest; |
125 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); | 126 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); |
@@ -207,7 +208,7 @@ drop: | |||
207 | EXPORT_SYMBOL_GPL(br_handle_frame_finish); | 208 | EXPORT_SYMBOL_GPL(br_handle_frame_finish); |
208 | 209 | ||
209 | /* note: already called with rcu_read_lock */ | 210 | /* note: already called with rcu_read_lock */ |
210 | static int br_handle_local_finish(struct sk_buff *skb) | 211 | static int br_handle_local_finish(struct sock *sk, struct sk_buff *skb) |
211 | { | 212 | { |
212 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); | 213 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); |
213 | u16 vid = 0; | 214 | u16 vid = 0; |
@@ -277,8 +278,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) | |||
277 | } | 278 | } |
278 | 279 | ||
279 | /* Deliver packet to local host only */ | 280 | /* Deliver packet to local host only */ |
280 | if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, | 281 | if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb, |
281 | NULL, br_handle_local_finish)) { | 282 | skb->dev, NULL, br_handle_local_finish)) { |
282 | return RX_HANDLER_CONSUMED; /* consumed by filter */ | 283 | return RX_HANDLER_CONSUMED; /* consumed by filter */ |
283 | } else { | 284 | } else { |
284 | *pskb = skb; | 285 | *pskb = skb; |
@@ -302,7 +303,8 @@ forward: | |||
302 | if (ether_addr_equal(p->br->dev->dev_addr, dest)) | 303 | if (ether_addr_equal(p->br->dev->dev_addr, dest)) |
303 | skb->pkt_type = PACKET_HOST; | 304 | skb->pkt_type = PACKET_HOST; |
304 | 305 | ||
305 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, | 306 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, NULL, skb, |
307 | skb->dev, NULL, | ||
306 | br_handle_frame_finish); | 308 | br_handle_frame_finish); |
307 | break; | 309 | break; |
308 | default: | 310 | default: |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index c465876c7861..4b6722f8f179 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -814,7 +814,8 @@ static void __br_multicast_send_query(struct net_bridge *br, | |||
814 | 814 | ||
815 | if (port) { | 815 | if (port) { |
816 | skb->dev = port->dev; | 816 | skb->dev = port->dev; |
817 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 817 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb, |
818 | NULL, skb->dev, | ||
818 | br_dev_queue_push_xmit); | 819 | br_dev_queue_push_xmit); |
819 | } else { | 820 | } else { |
820 | br_multicast_select_own_querier(br, ip, skb); | 821 | br_multicast_select_own_querier(br, ip, skb); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index e8ac7432acb6..ab55e2472beb 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -277,7 +277,7 @@ static void nf_bridge_update_protocol(struct sk_buff *skb) | |||
277 | /* PF_BRIDGE/PRE_ROUTING *********************************************/ | 277 | /* PF_BRIDGE/PRE_ROUTING *********************************************/ |
278 | /* Undo the changes made for ip6tables PREROUTING and continue the | 278 | /* Undo the changes made for ip6tables PREROUTING and continue the |
279 | * bridge PRE_ROUTING hook. */ | 279 | * bridge PRE_ROUTING hook. */ |
280 | static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | 280 | static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb) |
281 | { | 281 | { |
282 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); | 282 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); |
283 | struct rtable *rt; | 283 | struct rtable *rt; |
@@ -298,7 +298,8 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | |||
298 | skb->dev = nf_bridge->physindev; | 298 | skb->dev = nf_bridge->physindev; |
299 | nf_bridge_update_protocol(skb); | 299 | nf_bridge_update_protocol(skb); |
300 | nf_bridge_push_encap_header(skb); | 300 | nf_bridge_push_encap_header(skb); |
301 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, | 301 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb, |
302 | skb->dev, NULL, | ||
302 | br_handle_frame_finish, 1); | 303 | br_handle_frame_finish, 1); |
303 | 304 | ||
304 | return 0; | 305 | return 0; |
@@ -309,7 +310,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | |||
309 | * don't, we use the neighbour framework to find out. In both cases, we make | 310 | * don't, we use the neighbour framework to find out. In both cases, we make |
310 | * sure that br_handle_frame_finish() is called afterwards. | 311 | * sure that br_handle_frame_finish() is called afterwards. |
311 | */ | 312 | */ |
312 | static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) | 313 | static int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb) |
313 | { | 314 | { |
314 | struct neighbour *neigh; | 315 | struct neighbour *neigh; |
315 | struct dst_entry *dst; | 316 | struct dst_entry *dst; |
@@ -326,7 +327,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) | |||
326 | if (neigh->hh.hh_len) { | 327 | if (neigh->hh.hh_len) { |
327 | neigh_hh_bridge(&neigh->hh, skb); | 328 | neigh_hh_bridge(&neigh->hh, skb); |
328 | skb->dev = nf_bridge->physindev; | 329 | skb->dev = nf_bridge->physindev; |
329 | ret = br_handle_frame_finish(skb); | 330 | ret = br_handle_frame_finish(sk, skb); |
330 | } else { | 331 | } else { |
331 | /* the neighbour function below overwrites the complete | 332 | /* the neighbour function below overwrites the complete |
332 | * MAC header, so we save the Ethernet source address and | 333 | * MAC header, so we save the Ethernet source address and |
@@ -403,7 +404,7 @@ static bool dnat_took_place(const struct sk_buff *skb) | |||
403 | * device, we proceed as if ip_route_input() succeeded. If it differs from the | 404 | * device, we proceed as if ip_route_input() succeeded. If it differs from the |
404 | * logical bridge port or if ip_route_output_key() fails we drop the packet. | 405 | * logical bridge port or if ip_route_output_key() fails we drop the packet. |
405 | */ | 406 | */ |
406 | static int br_nf_pre_routing_finish(struct sk_buff *skb) | 407 | static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb) |
407 | { | 408 | { |
408 | struct net_device *dev = skb->dev; | 409 | struct net_device *dev = skb->dev; |
409 | struct iphdr *iph = ip_hdr(skb); | 410 | struct iphdr *iph = ip_hdr(skb); |
@@ -456,7 +457,7 @@ bridged_dnat: | |||
456 | nf_bridge_push_encap_header(skb); | 457 | nf_bridge_push_encap_header(skb); |
457 | NF_HOOK_THRESH(NFPROTO_BRIDGE, | 458 | NF_HOOK_THRESH(NFPROTO_BRIDGE, |
458 | NF_BR_PRE_ROUTING, | 459 | NF_BR_PRE_ROUTING, |
459 | skb, skb->dev, NULL, | 460 | sk, skb, skb->dev, NULL, |
460 | br_nf_pre_routing_finish_bridge, | 461 | br_nf_pre_routing_finish_bridge, |
461 | 1); | 462 | 1); |
462 | return 0; | 463 | return 0; |
@@ -476,7 +477,8 @@ bridged_dnat: | |||
476 | skb->dev = nf_bridge->physindev; | 477 | skb->dev = nf_bridge->physindev; |
477 | nf_bridge_update_protocol(skb); | 478 | nf_bridge_update_protocol(skb); |
478 | nf_bridge_push_encap_header(skb); | 479 | nf_bridge_push_encap_header(skb); |
479 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, | 480 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb, |
481 | skb->dev, NULL, | ||
480 | br_handle_frame_finish, 1); | 482 | br_handle_frame_finish, 1); |
481 | 483 | ||
482 | return 0; | 484 | return 0; |
@@ -579,9 +581,7 @@ bad: | |||
579 | * to ip6tables, which doesn't support NAT, so things are fairly simple. */ | 581 | * to ip6tables, which doesn't support NAT, so things are fairly simple. */ |
580 | static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, | 582 | static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, |
581 | struct sk_buff *skb, | 583 | struct sk_buff *skb, |
582 | const struct net_device *in, | 584 | const struct nf_hook_state *state) |
583 | const struct net_device *out, | ||
584 | int (*okfn)(struct sk_buff *)) | ||
585 | { | 585 | { |
586 | const struct ipv6hdr *hdr; | 586 | const struct ipv6hdr *hdr; |
587 | u32 pkt_len; | 587 | u32 pkt_len; |
@@ -615,7 +615,8 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, | |||
615 | return NF_DROP; | 615 | return NF_DROP; |
616 | 616 | ||
617 | skb->protocol = htons(ETH_P_IPV6); | 617 | skb->protocol = htons(ETH_P_IPV6); |
618 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | 618 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->sk, skb, |
619 | skb->dev, NULL, | ||
619 | br_nf_pre_routing_finish_ipv6); | 620 | br_nf_pre_routing_finish_ipv6); |
620 | 621 | ||
621 | return NF_STOLEN; | 622 | return NF_STOLEN; |
@@ -629,9 +630,7 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, | |||
629 | * address to be able to detect DNAT afterwards. */ | 630 | * address to be able to detect DNAT afterwards. */ |
630 | static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, | 631 | static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, |
631 | struct sk_buff *skb, | 632 | struct sk_buff *skb, |
632 | const struct net_device *in, | 633 | const struct nf_hook_state *state) |
633 | const struct net_device *out, | ||
634 | int (*okfn)(struct sk_buff *)) | ||
635 | { | 634 | { |
636 | struct net_bridge_port *p; | 635 | struct net_bridge_port *p; |
637 | struct net_bridge *br; | 636 | struct net_bridge *br; |
@@ -640,7 +639,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, | |||
640 | if (unlikely(!pskb_may_pull(skb, len))) | 639 | if (unlikely(!pskb_may_pull(skb, len))) |
641 | return NF_DROP; | 640 | return NF_DROP; |
642 | 641 | ||
643 | p = br_port_get_rcu(in); | 642 | p = br_port_get_rcu(state->in); |
644 | if (p == NULL) | 643 | if (p == NULL) |
645 | return NF_DROP; | 644 | return NF_DROP; |
646 | br = p->br; | 645 | br = p->br; |
@@ -650,7 +649,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, | |||
650 | return NF_ACCEPT; | 649 | return NF_ACCEPT; |
651 | 650 | ||
652 | nf_bridge_pull_encap_header_rcsum(skb); | 651 | nf_bridge_pull_encap_header_rcsum(skb); |
653 | return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn); | 652 | return br_nf_pre_routing_ipv6(ops, skb, state); |
654 | } | 653 | } |
655 | 654 | ||
656 | if (!brnf_call_iptables && !br->nf_call_iptables) | 655 | if (!brnf_call_iptables && !br->nf_call_iptables) |
@@ -672,7 +671,8 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, | |||
672 | 671 | ||
673 | skb->protocol = htons(ETH_P_IP); | 672 | skb->protocol = htons(ETH_P_IP); |
674 | 673 | ||
675 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | 674 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb, |
675 | skb->dev, NULL, | ||
676 | br_nf_pre_routing_finish); | 676 | br_nf_pre_routing_finish); |
677 | 677 | ||
678 | return NF_STOLEN; | 678 | return NF_STOLEN; |
@@ -688,16 +688,14 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, | |||
688 | * prevent this from happening. */ | 688 | * prevent this from happening. */ |
689 | static unsigned int br_nf_local_in(const struct nf_hook_ops *ops, | 689 | static unsigned int br_nf_local_in(const struct nf_hook_ops *ops, |
690 | struct sk_buff *skb, | 690 | struct sk_buff *skb, |
691 | const struct net_device *in, | 691 | const struct nf_hook_state *state) |
692 | const struct net_device *out, | ||
693 | int (*okfn)(struct sk_buff *)) | ||
694 | { | 692 | { |
695 | br_drop_fake_rtable(skb); | 693 | br_drop_fake_rtable(skb); |
696 | return NF_ACCEPT; | 694 | return NF_ACCEPT; |
697 | } | 695 | } |
698 | 696 | ||
699 | /* PF_BRIDGE/FORWARD *************************************************/ | 697 | /* PF_BRIDGE/FORWARD *************************************************/ |
700 | static int br_nf_forward_finish(struct sk_buff *skb) | 698 | static int br_nf_forward_finish(struct sock *sk, struct sk_buff *skb) |
701 | { | 699 | { |
702 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); | 700 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); |
703 | struct net_device *in; | 701 | struct net_device *in; |
@@ -721,8 +719,8 @@ static int br_nf_forward_finish(struct sk_buff *skb) | |||
721 | } | 719 | } |
722 | nf_bridge_push_encap_header(skb); | 720 | nf_bridge_push_encap_header(skb); |
723 | 721 | ||
724 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in, | 722 | NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, sk, skb, |
725 | skb->dev, br_forward_finish, 1); | 723 | in, skb->dev, br_forward_finish, 1); |
726 | return 0; | 724 | return 0; |
727 | } | 725 | } |
728 | 726 | ||
@@ -734,9 +732,7 @@ static int br_nf_forward_finish(struct sk_buff *skb) | |||
734 | * bridge ports. */ | 732 | * bridge ports. */ |
735 | static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops, | 733 | static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops, |
736 | struct sk_buff *skb, | 734 | struct sk_buff *skb, |
737 | const struct net_device *in, | 735 | const struct nf_hook_state *state) |
738 | const struct net_device *out, | ||
739 | int (*okfn)(struct sk_buff *)) | ||
740 | { | 736 | { |
741 | struct nf_bridge_info *nf_bridge; | 737 | struct nf_bridge_info *nf_bridge; |
742 | struct net_device *parent; | 738 | struct net_device *parent; |
@@ -754,7 +750,7 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops, | |||
754 | if (!nf_bridge) | 750 | if (!nf_bridge) |
755 | return NF_DROP; | 751 | return NF_DROP; |
756 | 752 | ||
757 | parent = bridge_parent(out); | 753 | parent = bridge_parent(state->out); |
758 | if (!parent) | 754 | if (!parent) |
759 | return NF_DROP; | 755 | return NF_DROP; |
760 | 756 | ||
@@ -787,23 +783,22 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops, | |||
787 | else | 783 | else |
788 | skb->protocol = htons(ETH_P_IPV6); | 784 | skb->protocol = htons(ETH_P_IPV6); |
789 | 785 | ||
790 | NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent, | 786 | NF_HOOK(pf, NF_INET_FORWARD, NULL, skb, |
791 | br_nf_forward_finish); | 787 | brnf_get_logical_dev(skb, state->in), |
788 | parent, br_nf_forward_finish); | ||
792 | 789 | ||
793 | return NF_STOLEN; | 790 | return NF_STOLEN; |
794 | } | 791 | } |
795 | 792 | ||
796 | static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, | 793 | static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, |
797 | struct sk_buff *skb, | 794 | struct sk_buff *skb, |
798 | const struct net_device *in, | 795 | const struct nf_hook_state *state) |
799 | const struct net_device *out, | ||
800 | int (*okfn)(struct sk_buff *)) | ||
801 | { | 796 | { |
802 | struct net_bridge_port *p; | 797 | struct net_bridge_port *p; |
803 | struct net_bridge *br; | 798 | struct net_bridge *br; |
804 | struct net_device **d = (struct net_device **)(skb->cb); | 799 | struct net_device **d = (struct net_device **)(skb->cb); |
805 | 800 | ||
806 | p = br_port_get_rcu(out); | 801 | p = br_port_get_rcu(state->out); |
807 | if (p == NULL) | 802 | if (p == NULL) |
808 | return NF_ACCEPT; | 803 | return NF_ACCEPT; |
809 | br = p->br; | 804 | br = p->br; |
@@ -822,15 +817,15 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, | |||
822 | nf_bridge_push_encap_header(skb); | 817 | nf_bridge_push_encap_header(skb); |
823 | return NF_ACCEPT; | 818 | return NF_ACCEPT; |
824 | } | 819 | } |
825 | *d = (struct net_device *)in; | 820 | *d = state->in; |
826 | NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in, | 821 | NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->sk, skb, |
827 | (struct net_device *)out, br_nf_forward_finish); | 822 | state->in, state->out, br_nf_forward_finish); |
828 | 823 | ||
829 | return NF_STOLEN; | 824 | return NF_STOLEN; |
830 | } | 825 | } |
831 | 826 | ||
832 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) | 827 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) |
833 | static int br_nf_push_frag_xmit(struct sk_buff *skb) | 828 | static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb) |
834 | { | 829 | { |
835 | struct brnf_frag_data *data; | 830 | struct brnf_frag_data *data; |
836 | int err; | 831 | int err; |
@@ -846,17 +841,17 @@ static int br_nf_push_frag_xmit(struct sk_buff *skb) | |||
846 | skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size); | 841 | skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size); |
847 | __skb_push(skb, data->encap_size); | 842 | __skb_push(skb, data->encap_size); |
848 | 843 | ||
849 | return br_dev_queue_push_xmit(skb); | 844 | return br_dev_queue_push_xmit(sk, skb); |
850 | } | 845 | } |
851 | 846 | ||
852 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 847 | static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb) |
853 | { | 848 | { |
854 | int ret; | 849 | int ret; |
855 | int frag_max_size; | 850 | int frag_max_size; |
856 | unsigned int mtu_reserved; | 851 | unsigned int mtu_reserved; |
857 | 852 | ||
858 | if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP)) | 853 | if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP)) |
859 | return br_dev_queue_push_xmit(skb); | 854 | return br_dev_queue_push_xmit(sk, skb); |
860 | 855 | ||
861 | mtu_reserved = nf_bridge_mtu_reduction(skb); | 856 | mtu_reserved = nf_bridge_mtu_reduction(skb); |
862 | /* This is wrong! We should preserve the original fragment | 857 | /* This is wrong! We should preserve the original fragment |
@@ -880,26 +875,24 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
880 | skb_copy_from_linear_data_offset(skb, -data->size, data->mac, | 875 | skb_copy_from_linear_data_offset(skb, -data->size, data->mac, |
881 | data->size); | 876 | data->size); |
882 | 877 | ||
883 | ret = ip_fragment(skb, br_nf_push_frag_xmit); | 878 | ret = ip_fragment(sk, skb, br_nf_push_frag_xmit); |
884 | } else { | 879 | } else { |
885 | ret = br_dev_queue_push_xmit(skb); | 880 | ret = br_dev_queue_push_xmit(sk, skb); |
886 | } | 881 | } |
887 | 882 | ||
888 | return ret; | 883 | return ret; |
889 | } | 884 | } |
890 | #else | 885 | #else |
891 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 886 | static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb) |
892 | { | 887 | { |
893 | return br_dev_queue_push_xmit(skb); | 888 | return br_dev_queue_push_xmit(sk, skb); |
894 | } | 889 | } |
895 | #endif | 890 | #endif |
896 | 891 | ||
897 | /* PF_BRIDGE/POST_ROUTING ********************************************/ | 892 | /* PF_BRIDGE/POST_ROUTING ********************************************/ |
898 | static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops, | 893 | static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops, |
899 | struct sk_buff *skb, | 894 | struct sk_buff *skb, |
900 | const struct net_device *in, | 895 | const struct nf_hook_state *state) |
901 | const struct net_device *out, | ||
902 | int (*okfn)(struct sk_buff *)) | ||
903 | { | 896 | { |
904 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); | 897 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); |
905 | struct net_device *realoutdev = bridge_parent(skb->dev); | 898 | struct net_device *realoutdev = bridge_parent(skb->dev); |
@@ -936,7 +929,8 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops, | |||
936 | else | 929 | else |
937 | skb->protocol = htons(ETH_P_IPV6); | 930 | skb->protocol = htons(ETH_P_IPV6); |
938 | 931 | ||
939 | NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev, | 932 | NF_HOOK(pf, NF_INET_POST_ROUTING, state->sk, skb, |
933 | NULL, realoutdev, | ||
940 | br_nf_dev_queue_xmit); | 934 | br_nf_dev_queue_xmit); |
941 | 935 | ||
942 | return NF_STOLEN; | 936 | return NF_STOLEN; |
@@ -947,9 +941,7 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops, | |||
947 | * for the second time. */ | 941 | * for the second time. */ |
948 | static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops, | 942 | static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops, |
949 | struct sk_buff *skb, | 943 | struct sk_buff *skb, |
950 | const struct net_device *in, | 944 | const struct nf_hook_state *state) |
951 | const struct net_device *out, | ||
952 | int (*okfn)(struct sk_buff *)) | ||
953 | { | 945 | { |
954 | if (skb->nf_bridge && | 946 | if (skb->nf_bridge && |
955 | !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { | 947 | !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { |
@@ -981,7 +973,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) | |||
981 | nf_bridge->neigh_header, | 973 | nf_bridge->neigh_header, |
982 | ETH_HLEN - ETH_ALEN); | 974 | ETH_HLEN - ETH_ALEN); |
983 | skb->dev = nf_bridge->physindev; | 975 | skb->dev = nf_bridge->physindev; |
984 | br_handle_frame_finish(skb); | 976 | br_handle_frame_finish(NULL, skb); |
985 | } | 977 | } |
986 | 978 | ||
987 | static int br_nf_dev_xmit(struct sk_buff *skb) | 979 | static int br_nf_dev_xmit(struct sk_buff *skb) |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e1115a224a95..0e4ddb81610d 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -305,8 +305,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, | |||
305 | nla_put_u8(skb, IFLA_OPERSTATE, operstate) || | 305 | nla_put_u8(skb, IFLA_OPERSTATE, operstate) || |
306 | (dev->addr_len && | 306 | (dev->addr_len && |
307 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || | 307 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || |
308 | (dev->ifindex != dev->iflink && | 308 | (dev->ifindex != dev_get_iflink(dev) && |
309 | nla_put_u32(skb, IFLA_LINK, dev->iflink))) | 309 | nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) |
310 | goto nla_put_failure; | 310 | goto nla_put_failure; |
311 | 311 | ||
312 | if (event == RTM_NEWLINK && port) { | 312 | if (event == RTM_NEWLINK && port) { |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index b46fa0c5b8ec..6ca0251cb478 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -410,10 +410,10 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, | |||
410 | 410 | ||
411 | /* br_forward.c */ | 411 | /* br_forward.c */ |
412 | void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); | 412 | void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); |
413 | int br_dev_queue_push_xmit(struct sk_buff *skb); | 413 | int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb); |
414 | void br_forward(const struct net_bridge_port *to, | 414 | void br_forward(const struct net_bridge_port *to, |
415 | struct sk_buff *skb, struct sk_buff *skb0); | 415 | struct sk_buff *skb, struct sk_buff *skb0); |
416 | int br_forward_finish(struct sk_buff *skb); | 416 | int br_forward_finish(struct sock *sk, struct sk_buff *skb); |
417 | void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast); | 417 | void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast); |
418 | void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, | 418 | void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, |
419 | struct sk_buff *skb2, bool unicast); | 419 | struct sk_buff *skb2, bool unicast); |
@@ -431,7 +431,7 @@ void br_port_flags_change(struct net_bridge_port *port, unsigned long mask); | |||
431 | void br_manage_promisc(struct net_bridge *br); | 431 | void br_manage_promisc(struct net_bridge *br); |
432 | 432 | ||
433 | /* br_input.c */ | 433 | /* br_input.c */ |
434 | int br_handle_frame_finish(struct sk_buff *skb); | 434 | int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); |
435 | rx_handler_result_t br_handle_frame(struct sk_buff **pskb); | 435 | rx_handler_result_t br_handle_frame(struct sk_buff **pskb); |
436 | 436 | ||
437 | static inline bool br_rx_handler_check_rcu(const struct net_device *dev) | 437 | static inline bool br_rx_handler_check_rcu(const struct net_device *dev) |
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index bdb459d21ad8..534fc4cd263e 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -54,8 +54,9 @@ static void br_send_bpdu(struct net_bridge_port *p, | |||
54 | 54 | ||
55 | skb_reset_mac_header(skb); | 55 | skb_reset_mac_header(skb); |
56 | 56 | ||
57 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 57 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb, |
58 | dev_queue_xmit); | 58 | NULL, skb->dev, |
59 | dev_queue_xmit_sk); | ||
59 | } | 60 | } |
60 | 61 | ||
61 | static inline void br_set_ticks(unsigned char *dest, int j) | 62 | static inline void br_set_ticks(unsigned char *dest, int j) |
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index ce205aabf9c5..8a3f63b2e807 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c | |||
@@ -58,20 +58,18 @@ static const struct ebt_table frame_filter = { | |||
58 | 58 | ||
59 | static unsigned int | 59 | static unsigned int |
60 | ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 60 | ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
61 | const struct net_device *in, const struct net_device *out, | 61 | const struct nf_hook_state *state) |
62 | int (*okfn)(struct sk_buff *)) | ||
63 | { | 62 | { |
64 | return ebt_do_table(ops->hooknum, skb, in, out, | 63 | return ebt_do_table(ops->hooknum, skb, state->in, state->out, |
65 | dev_net(in)->xt.frame_filter); | 64 | dev_net(state->in)->xt.frame_filter); |
66 | } | 65 | } |
67 | 66 | ||
68 | static unsigned int | 67 | static unsigned int |
69 | ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 68 | ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
70 | const struct net_device *in, const struct net_device *out, | 69 | const struct nf_hook_state *state) |
71 | int (*okfn)(struct sk_buff *)) | ||
72 | { | 70 | { |
73 | return ebt_do_table(ops->hooknum, skb, in, out, | 71 | return ebt_do_table(ops->hooknum, skb, state->in, state->out, |
74 | dev_net(out)->xt.frame_filter); | 72 | dev_net(state->out)->xt.frame_filter); |
75 | } | 73 | } |
76 | 74 | ||
77 | static struct nf_hook_ops ebt_ops_filter[] __read_mostly = { | 75 | static struct nf_hook_ops ebt_ops_filter[] __read_mostly = { |
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index a0ac2984fb6c..c5ef5b1ab678 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c | |||
@@ -58,20 +58,18 @@ static struct ebt_table frame_nat = { | |||
58 | 58 | ||
59 | static unsigned int | 59 | static unsigned int |
60 | ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb, | 60 | ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb, |
61 | const struct net_device *in, const struct net_device *out, | 61 | const struct nf_hook_state *state) |
62 | int (*okfn)(struct sk_buff *)) | ||
63 | { | 62 | { |
64 | return ebt_do_table(ops->hooknum, skb, in, out, | 63 | return ebt_do_table(ops->hooknum, skb, state->in, state->out, |
65 | dev_net(in)->xt.frame_nat); | 64 | dev_net(state->in)->xt.frame_nat); |
66 | } | 65 | } |
67 | 66 | ||
68 | static unsigned int | 67 | static unsigned int |
69 | ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb, | 68 | ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb, |
70 | const struct net_device *in, const struct net_device *out, | 69 | const struct nf_hook_state *state) |
71 | int (*okfn)(struct sk_buff *)) | ||
72 | { | 70 | { |
73 | return ebt_do_table(ops->hooknum, skb, in, out, | 71 | return ebt_do_table(ops->hooknum, skb, state->in, state->out, |
74 | dev_net(out)->xt.frame_nat); | 72 | dev_net(state->out)->xt.frame_nat); |
75 | } | 73 | } |
76 | 74 | ||
77 | static struct nf_hook_ops ebt_ops_nat[] __read_mostly = { | 75 | static struct nf_hook_ops ebt_ops_nat[] __read_mostly = { |
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index 19473a9371b8..a343e62442b1 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c | |||
@@ -67,47 +67,43 @@ EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate); | |||
67 | static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt, | 67 | static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt, |
68 | const struct nf_hook_ops *ops, | 68 | const struct nf_hook_ops *ops, |
69 | struct sk_buff *skb, | 69 | struct sk_buff *skb, |
70 | const struct net_device *in, | 70 | const struct nf_hook_state *state) |
71 | const struct net_device *out) | ||
72 | { | 71 | { |
73 | if (nft_bridge_iphdr_validate(skb)) | 72 | if (nft_bridge_iphdr_validate(skb)) |
74 | nft_set_pktinfo_ipv4(pkt, ops, skb, in, out); | 73 | nft_set_pktinfo_ipv4(pkt, ops, skb, state); |
75 | else | 74 | else |
76 | nft_set_pktinfo(pkt, ops, skb, in, out); | 75 | nft_set_pktinfo(pkt, ops, skb, state); |
77 | } | 76 | } |
78 | 77 | ||
79 | static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt, | 78 | static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt, |
80 | const struct nf_hook_ops *ops, | 79 | const struct nf_hook_ops *ops, |
81 | struct sk_buff *skb, | 80 | struct sk_buff *skb, |
82 | const struct net_device *in, | 81 | const struct nf_hook_state *state) |
83 | const struct net_device *out) | ||
84 | { | 82 | { |
85 | #if IS_ENABLED(CONFIG_IPV6) | 83 | #if IS_ENABLED(CONFIG_IPV6) |
86 | if (nft_bridge_ip6hdr_validate(skb) && | 84 | if (nft_bridge_ip6hdr_validate(skb) && |
87 | nft_set_pktinfo_ipv6(pkt, ops, skb, in, out) == 0) | 85 | nft_set_pktinfo_ipv6(pkt, ops, skb, state) == 0) |
88 | return; | 86 | return; |
89 | #endif | 87 | #endif |
90 | nft_set_pktinfo(pkt, ops, skb, in, out); | 88 | nft_set_pktinfo(pkt, ops, skb, state); |
91 | } | 89 | } |
92 | 90 | ||
93 | static unsigned int | 91 | static unsigned int |
94 | nft_do_chain_bridge(const struct nf_hook_ops *ops, | 92 | nft_do_chain_bridge(const struct nf_hook_ops *ops, |
95 | struct sk_buff *skb, | 93 | struct sk_buff *skb, |
96 | const struct net_device *in, | 94 | const struct nf_hook_state *state) |
97 | const struct net_device *out, | ||
98 | int (*okfn)(struct sk_buff *)) | ||
99 | { | 95 | { |
100 | struct nft_pktinfo pkt; | 96 | struct nft_pktinfo pkt; |
101 | 97 | ||
102 | switch (eth_hdr(skb)->h_proto) { | 98 | switch (eth_hdr(skb)->h_proto) { |
103 | case htons(ETH_P_IP): | 99 | case htons(ETH_P_IP): |
104 | nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, in, out); | 100 | nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, state); |
105 | break; | 101 | break; |
106 | case htons(ETH_P_IPV6): | 102 | case htons(ETH_P_IPV6): |
107 | nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, in, out); | 103 | nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, state); |
108 | break; | 104 | break; |
109 | default: | 105 | default: |
110 | nft_set_pktinfo(&pkt, ops, skb, in, out); | 106 | nft_set_pktinfo(&pkt, ops, skb, state); |
111 | break; | 107 | break; |
112 | } | 108 | } |
113 | 109 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index 63ffdb0f3a23..31b9748cbb4e 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -74,6 +74,12 @@ MODULE_ALIAS("can-proto-1"); | |||
74 | * storing the single filter in dfilter, to avoid using dynamic memory. | 74 | * storing the single filter in dfilter, to avoid using dynamic memory. |
75 | */ | 75 | */ |
76 | 76 | ||
77 | struct uniqframe { | ||
78 | ktime_t tstamp; | ||
79 | const struct sk_buff *skb; | ||
80 | unsigned int join_rx_count; | ||
81 | }; | ||
82 | |||
77 | struct raw_sock { | 83 | struct raw_sock { |
78 | struct sock sk; | 84 | struct sock sk; |
79 | int bound; | 85 | int bound; |
@@ -82,10 +88,12 @@ struct raw_sock { | |||
82 | int loopback; | 88 | int loopback; |
83 | int recv_own_msgs; | 89 | int recv_own_msgs; |
84 | int fd_frames; | 90 | int fd_frames; |
91 | int join_filters; | ||
85 | int count; /* number of active filters */ | 92 | int count; /* number of active filters */ |
86 | struct can_filter dfilter; /* default/single filter */ | 93 | struct can_filter dfilter; /* default/single filter */ |
87 | struct can_filter *filter; /* pointer to filter(s) */ | 94 | struct can_filter *filter; /* pointer to filter(s) */ |
88 | can_err_mask_t err_mask; | 95 | can_err_mask_t err_mask; |
96 | struct uniqframe __percpu *uniq; | ||
89 | }; | 97 | }; |
90 | 98 | ||
91 | /* | 99 | /* |
@@ -123,6 +131,26 @@ static void raw_rcv(struct sk_buff *oskb, void *data) | |||
123 | if (!ro->fd_frames && oskb->len != CAN_MTU) | 131 | if (!ro->fd_frames && oskb->len != CAN_MTU) |
124 | return; | 132 | return; |
125 | 133 | ||
134 | /* eliminate multiple filter matches for the same skb */ | ||
135 | if (this_cpu_ptr(ro->uniq)->skb == oskb && | ||
136 | ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) { | ||
137 | if (ro->join_filters) { | ||
138 | this_cpu_inc(ro->uniq->join_rx_count); | ||
139 | /* drop frame until all enabled filters matched */ | ||
140 | if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count) | ||
141 | return; | ||
142 | } else { | ||
143 | return; | ||
144 | } | ||
145 | } else { | ||
146 | this_cpu_ptr(ro->uniq)->skb = oskb; | ||
147 | this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp; | ||
148 | this_cpu_ptr(ro->uniq)->join_rx_count = 1; | ||
149 | /* drop first frame to check all enabled filters? */ | ||
150 | if (ro->join_filters && ro->count > 1) | ||
151 | return; | ||
152 | } | ||
153 | |||
126 | /* clone the given skb to be able to enqueue it into the rcv queue */ | 154 | /* clone the given skb to be able to enqueue it into the rcv queue */ |
127 | skb = skb_clone(oskb, GFP_ATOMIC); | 155 | skb = skb_clone(oskb, GFP_ATOMIC); |
128 | if (!skb) | 156 | if (!skb) |
@@ -296,6 +324,12 @@ static int raw_init(struct sock *sk) | |||
296 | ro->loopback = 1; | 324 | ro->loopback = 1; |
297 | ro->recv_own_msgs = 0; | 325 | ro->recv_own_msgs = 0; |
298 | ro->fd_frames = 0; | 326 | ro->fd_frames = 0; |
327 | ro->join_filters = 0; | ||
328 | |||
329 | /* alloc_percpu provides zero'ed memory */ | ||
330 | ro->uniq = alloc_percpu(struct uniqframe); | ||
331 | if (unlikely(!ro->uniq)) | ||
332 | return -ENOMEM; | ||
299 | 333 | ||
300 | /* set notifier */ | 334 | /* set notifier */ |
301 | ro->notifier.notifier_call = raw_notifier; | 335 | ro->notifier.notifier_call = raw_notifier; |
@@ -339,6 +373,7 @@ static int raw_release(struct socket *sock) | |||
339 | ro->ifindex = 0; | 373 | ro->ifindex = 0; |
340 | ro->bound = 0; | 374 | ro->bound = 0; |
341 | ro->count = 0; | 375 | ro->count = 0; |
376 | free_percpu(ro->uniq); | ||
342 | 377 | ||
343 | sock_orphan(sk); | 378 | sock_orphan(sk); |
344 | sock->sk = NULL; | 379 | sock->sk = NULL; |
@@ -583,6 +618,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
583 | 618 | ||
584 | break; | 619 | break; |
585 | 620 | ||
621 | case CAN_RAW_JOIN_FILTERS: | ||
622 | if (optlen != sizeof(ro->join_filters)) | ||
623 | return -EINVAL; | ||
624 | |||
625 | if (copy_from_user(&ro->join_filters, optval, optlen)) | ||
626 | return -EFAULT; | ||
627 | |||
628 | break; | ||
629 | |||
586 | default: | 630 | default: |
587 | return -ENOPROTOOPT; | 631 | return -ENOPROTOOPT; |
588 | } | 632 | } |
@@ -647,6 +691,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname, | |||
647 | val = &ro->fd_frames; | 691 | val = &ro->fd_frames; |
648 | break; | 692 | break; |
649 | 693 | ||
694 | case CAN_RAW_JOIN_FILTERS: | ||
695 | if (len > sizeof(int)) | ||
696 | len = sizeof(int); | ||
697 | val = &ro->join_filters; | ||
698 | break; | ||
699 | |||
650 | default: | 700 | default: |
651 | return -ENOPROTOOPT; | 701 | return -ENOPROTOOPT; |
652 | } | 702 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index 65492b0354c0..b2775f06c710 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -660,6 +660,27 @@ __setup("netdev=", netdev_boot_setup); | |||
660 | *******************************************************************************/ | 660 | *******************************************************************************/ |
661 | 661 | ||
662 | /** | 662 | /** |
663 | * dev_get_iflink - get 'iflink' value of a interface | ||
664 | * @dev: targeted interface | ||
665 | * | ||
666 | * Indicates the ifindex the interface is linked to. | ||
667 | * Physical interfaces have the same 'ifindex' and 'iflink' values. | ||
668 | */ | ||
669 | |||
670 | int dev_get_iflink(const struct net_device *dev) | ||
671 | { | ||
672 | if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) | ||
673 | return dev->netdev_ops->ndo_get_iflink(dev); | ||
674 | |||
675 | /* If dev->rtnl_link_ops is set, it's a virtual interface. */ | ||
676 | if (dev->rtnl_link_ops) | ||
677 | return 0; | ||
678 | |||
679 | return dev->ifindex; | ||
680 | } | ||
681 | EXPORT_SYMBOL(dev_get_iflink); | ||
682 | |||
683 | /** | ||
663 | * __dev_get_by_name - find a device by its name | 684 | * __dev_get_by_name - find a device by its name |
664 | * @net: the applicable net namespace | 685 | * @net: the applicable net namespace |
665 | * @name: name to find | 686 | * @name: name to find |
@@ -2849,14 +2870,16 @@ static void skb_update_prio(struct sk_buff *skb) | |||
2849 | #define skb_update_prio(skb) | 2870 | #define skb_update_prio(skb) |
2850 | #endif | 2871 | #endif |
2851 | 2872 | ||
2852 | static DEFINE_PER_CPU(int, xmit_recursion); | 2873 | DEFINE_PER_CPU(int, xmit_recursion); |
2874 | EXPORT_SYMBOL(xmit_recursion); | ||
2875 | |||
2853 | #define RECURSION_LIMIT 10 | 2876 | #define RECURSION_LIMIT 10 |
2854 | 2877 | ||
2855 | /** | 2878 | /** |
2856 | * dev_loopback_xmit - loop back @skb | 2879 | * dev_loopback_xmit - loop back @skb |
2857 | * @skb: buffer to transmit | 2880 | * @skb: buffer to transmit |
2858 | */ | 2881 | */ |
2859 | int dev_loopback_xmit(struct sk_buff *skb) | 2882 | int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb) |
2860 | { | 2883 | { |
2861 | skb_reset_mac_header(skb); | 2884 | skb_reset_mac_header(skb); |
2862 | __skb_pull(skb, skb_network_offset(skb)); | 2885 | __skb_pull(skb, skb_network_offset(skb)); |
@@ -2994,11 +3017,11 @@ out: | |||
2994 | return rc; | 3017 | return rc; |
2995 | } | 3018 | } |
2996 | 3019 | ||
2997 | int dev_queue_xmit(struct sk_buff *skb) | 3020 | int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb) |
2998 | { | 3021 | { |
2999 | return __dev_queue_xmit(skb, NULL); | 3022 | return __dev_queue_xmit(skb, NULL); |
3000 | } | 3023 | } |
3001 | EXPORT_SYMBOL(dev_queue_xmit); | 3024 | EXPORT_SYMBOL(dev_queue_xmit_sk); |
3002 | 3025 | ||
3003 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | 3026 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) |
3004 | { | 3027 | { |
@@ -3830,13 +3853,13 @@ static int netif_receive_skb_internal(struct sk_buff *skb) | |||
3830 | * NET_RX_SUCCESS: no congestion | 3853 | * NET_RX_SUCCESS: no congestion |
3831 | * NET_RX_DROP: packet was dropped | 3854 | * NET_RX_DROP: packet was dropped |
3832 | */ | 3855 | */ |
3833 | int netif_receive_skb(struct sk_buff *skb) | 3856 | int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb) |
3834 | { | 3857 | { |
3835 | trace_netif_receive_skb_entry(skb); | 3858 | trace_netif_receive_skb_entry(skb); |
3836 | 3859 | ||
3837 | return netif_receive_skb_internal(skb); | 3860 | return netif_receive_skb_internal(skb); |
3838 | } | 3861 | } |
3839 | EXPORT_SYMBOL(netif_receive_skb); | 3862 | EXPORT_SYMBOL(netif_receive_skb_sk); |
3840 | 3863 | ||
3841 | /* Network device is going away, flush any packets still pending | 3864 | /* Network device is going away, flush any packets still pending |
3842 | * Called with irqs disabled. | 3865 | * Called with irqs disabled. |
@@ -6314,8 +6337,6 @@ int register_netdevice(struct net_device *dev) | |||
6314 | spin_lock_init(&dev->addr_list_lock); | 6337 | spin_lock_init(&dev->addr_list_lock); |
6315 | netdev_set_addr_lockdep_class(dev); | 6338 | netdev_set_addr_lockdep_class(dev); |
6316 | 6339 | ||
6317 | dev->iflink = -1; | ||
6318 | |||
6319 | ret = dev_get_valid_name(net, dev, dev->name); | 6340 | ret = dev_get_valid_name(net, dev, dev->name); |
6320 | if (ret < 0) | 6341 | if (ret < 0) |
6321 | goto out; | 6342 | goto out; |
@@ -6345,9 +6366,6 @@ int register_netdevice(struct net_device *dev) | |||
6345 | else if (__dev_get_by_index(net, dev->ifindex)) | 6366 | else if (__dev_get_by_index(net, dev->ifindex)) |
6346 | goto err_uninit; | 6367 | goto err_uninit; |
6347 | 6368 | ||
6348 | if (dev->iflink == -1) | ||
6349 | dev->iflink = dev->ifindex; | ||
6350 | |||
6351 | /* Transfer changeable features to wanted_features and enable | 6369 | /* Transfer changeable features to wanted_features and enable |
6352 | * software offloads (GSO and GRO). | 6370 | * software offloads (GSO and GRO). |
6353 | */ | 6371 | */ |
@@ -7060,12 +7078,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
7060 | dev_net_set(dev, net); | 7078 | dev_net_set(dev, net); |
7061 | 7079 | ||
7062 | /* If there is an ifindex conflict assign a new one */ | 7080 | /* If there is an ifindex conflict assign a new one */ |
7063 | if (__dev_get_by_index(net, dev->ifindex)) { | 7081 | if (__dev_get_by_index(net, dev->ifindex)) |
7064 | int iflink = (dev->iflink == dev->ifindex); | ||
7065 | dev->ifindex = dev_new_index(net); | 7082 | dev->ifindex = dev_new_index(net); |
7066 | if (iflink) | ||
7067 | dev->iflink = dev->ifindex; | ||
7068 | } | ||
7069 | 7083 | ||
7070 | /* Send a netdev-add uevent to the new namespace */ | 7084 | /* Send a netdev-add uevent to the new namespace */ |
7071 | kobject_uevent(&dev->dev.kobj, KOBJ_ADD); | 7085 | kobject_uevent(&dev->dev.kobj, KOBJ_ADD); |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 68ea6950cad1..9a12668f7d62 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -165,9 +165,9 @@ void fib_rules_unregister(struct fib_rules_ops *ops) | |||
165 | 165 | ||
166 | spin_lock(&net->rules_mod_lock); | 166 | spin_lock(&net->rules_mod_lock); |
167 | list_del_rcu(&ops->list); | 167 | list_del_rcu(&ops->list); |
168 | fib_rules_cleanup_ops(ops); | ||
169 | spin_unlock(&net->rules_mod_lock); | 168 | spin_unlock(&net->rules_mod_lock); |
170 | 169 | ||
170 | fib_rules_cleanup_ops(ops); | ||
171 | kfree_rcu(ops, rcu); | 171 | kfree_rcu(ops, rcu); |
172 | } | 172 | } |
173 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 173 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
diff --git a/net/core/filter.c b/net/core/filter.c index 444a07e4f68d..b669e75d2b36 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -1175,7 +1175,9 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) | |||
1175 | return 0; | 1175 | return 0; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | 1178 | #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) |
1179 | |||
1180 | static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) | ||
1179 | { | 1181 | { |
1180 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | 1182 | struct sk_buff *skb = (struct sk_buff *) (long) r1; |
1181 | unsigned int offset = (unsigned int) r2; | 1183 | unsigned int offset = (unsigned int) r2; |
@@ -1192,7 +1194,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |||
1192 | * | 1194 | * |
1193 | * so check for invalid 'offset' and too large 'len' | 1195 | * so check for invalid 'offset' and too large 'len' |
1194 | */ | 1196 | */ |
1195 | if (offset > 0xffff || len > sizeof(buf)) | 1197 | if (unlikely(offset > 0xffff || len > sizeof(buf))) |
1196 | return -EFAULT; | 1198 | return -EFAULT; |
1197 | 1199 | ||
1198 | if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len)) | 1200 | if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len)) |
@@ -1202,7 +1204,8 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |||
1202 | if (unlikely(!ptr)) | 1204 | if (unlikely(!ptr)) |
1203 | return -EFAULT; | 1205 | return -EFAULT; |
1204 | 1206 | ||
1205 | skb_postpull_rcsum(skb, ptr, len); | 1207 | if (BPF_RECOMPUTE_CSUM(flags)) |
1208 | skb_postpull_rcsum(skb, ptr, len); | ||
1206 | 1209 | ||
1207 | memcpy(ptr, from, len); | 1210 | memcpy(ptr, from, len); |
1208 | 1211 | ||
@@ -1210,7 +1213,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |||
1210 | /* skb_store_bits cannot return -EFAULT here */ | 1213 | /* skb_store_bits cannot return -EFAULT here */ |
1211 | skb_store_bits(skb, offset, ptr, len); | 1214 | skb_store_bits(skb, offset, ptr, len); |
1212 | 1215 | ||
1213 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 1216 | if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE) |
1214 | skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0)); | 1217 | skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0)); |
1215 | return 0; | 1218 | return 0; |
1216 | } | 1219 | } |
@@ -1223,6 +1226,99 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = { | |||
1223 | .arg2_type = ARG_ANYTHING, | 1226 | .arg2_type = ARG_ANYTHING, |
1224 | .arg3_type = ARG_PTR_TO_STACK, | 1227 | .arg3_type = ARG_PTR_TO_STACK, |
1225 | .arg4_type = ARG_CONST_STACK_SIZE, | 1228 | .arg4_type = ARG_CONST_STACK_SIZE, |
1229 | .arg5_type = ARG_ANYTHING, | ||
1230 | }; | ||
1231 | |||
1232 | #define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) | ||
1233 | #define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) | ||
1234 | |||
1235 | static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) | ||
1236 | { | ||
1237 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | ||
1238 | __sum16 sum, *ptr; | ||
1239 | |||
1240 | if (unlikely(offset > 0xffff)) | ||
1241 | return -EFAULT; | ||
1242 | |||
1243 | if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) | ||
1244 | return -EFAULT; | ||
1245 | |||
1246 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); | ||
1247 | if (unlikely(!ptr)) | ||
1248 | return -EFAULT; | ||
1249 | |||
1250 | switch (BPF_HEADER_FIELD_SIZE(flags)) { | ||
1251 | case 2: | ||
1252 | csum_replace2(ptr, from, to); | ||
1253 | break; | ||
1254 | case 4: | ||
1255 | csum_replace4(ptr, from, to); | ||
1256 | break; | ||
1257 | default: | ||
1258 | return -EINVAL; | ||
1259 | } | ||
1260 | |||
1261 | if (ptr == &sum) | ||
1262 | /* skb_store_bits guaranteed to not return -EFAULT here */ | ||
1263 | skb_store_bits(skb, offset, ptr, sizeof(sum)); | ||
1264 | |||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | const struct bpf_func_proto bpf_l3_csum_replace_proto = { | ||
1269 | .func = bpf_l3_csum_replace, | ||
1270 | .gpl_only = false, | ||
1271 | .ret_type = RET_INTEGER, | ||
1272 | .arg1_type = ARG_PTR_TO_CTX, | ||
1273 | .arg2_type = ARG_ANYTHING, | ||
1274 | .arg3_type = ARG_ANYTHING, | ||
1275 | .arg4_type = ARG_ANYTHING, | ||
1276 | .arg5_type = ARG_ANYTHING, | ||
1277 | }; | ||
1278 | |||
1279 | static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) | ||
1280 | { | ||
1281 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | ||
1282 | u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags); | ||
1283 | __sum16 sum, *ptr; | ||
1284 | |||
1285 | if (unlikely(offset > 0xffff)) | ||
1286 | return -EFAULT; | ||
1287 | |||
1288 | if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) | ||
1289 | return -EFAULT; | ||
1290 | |||
1291 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); | ||
1292 | if (unlikely(!ptr)) | ||
1293 | return -EFAULT; | ||
1294 | |||
1295 | switch (BPF_HEADER_FIELD_SIZE(flags)) { | ||
1296 | case 2: | ||
1297 | inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); | ||
1298 | break; | ||
1299 | case 4: | ||
1300 | inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); | ||
1301 | break; | ||
1302 | default: | ||
1303 | return -EINVAL; | ||
1304 | } | ||
1305 | |||
1306 | if (ptr == &sum) | ||
1307 | /* skb_store_bits guaranteed to not return -EFAULT here */ | ||
1308 | skb_store_bits(skb, offset, ptr, sizeof(sum)); | ||
1309 | |||
1310 | return 0; | ||
1311 | } | ||
1312 | |||
1313 | const struct bpf_func_proto bpf_l4_csum_replace_proto = { | ||
1314 | .func = bpf_l4_csum_replace, | ||
1315 | .gpl_only = false, | ||
1316 | .ret_type = RET_INTEGER, | ||
1317 | .arg1_type = ARG_PTR_TO_CTX, | ||
1318 | .arg2_type = ARG_ANYTHING, | ||
1319 | .arg3_type = ARG_ANYTHING, | ||
1320 | .arg4_type = ARG_ANYTHING, | ||
1321 | .arg5_type = ARG_ANYTHING, | ||
1226 | }; | 1322 | }; |
1227 | 1323 | ||
1228 | static const struct bpf_func_proto * | 1324 | static const struct bpf_func_proto * |
@@ -1250,6 +1346,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) | |||
1250 | switch (func_id) { | 1346 | switch (func_id) { |
1251 | case BPF_FUNC_skb_store_bytes: | 1347 | case BPF_FUNC_skb_store_bytes: |
1252 | return &bpf_skb_store_bytes_proto; | 1348 | return &bpf_skb_store_bytes_proto; |
1349 | case BPF_FUNC_l3_csum_replace: | ||
1350 | return &bpf_l3_csum_replace_proto; | ||
1351 | case BPF_FUNC_l4_csum_replace: | ||
1352 | return &bpf_l4_csum_replace_proto; | ||
1253 | default: | 1353 | default: |
1254 | return sk_filter_func_proto(func_id); | 1354 | return sk_filter_func_proto(func_id); |
1255 | } | 1355 | } |
@@ -1304,6 +1404,13 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, | |||
1304 | offsetof(struct sk_buff, vlan_proto)); | 1404 | offsetof(struct sk_buff, vlan_proto)); |
1305 | break; | 1405 | break; |
1306 | 1406 | ||
1407 | case offsetof(struct __sk_buff, priority): | ||
1408 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4); | ||
1409 | |||
1410 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, | ||
1411 | offsetof(struct sk_buff, priority)); | ||
1412 | break; | ||
1413 | |||
1307 | case offsetof(struct __sk_buff, mark): | 1414 | case offsetof(struct __sk_buff, mark): |
1308 | return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn); | 1415 | return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn); |
1309 | 1416 | ||
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 49a9e3e06c08..982861607f88 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -40,7 +40,7 @@ static DEFINE_SPINLOCK(lweventlist_lock); | |||
40 | static unsigned char default_operstate(const struct net_device *dev) | 40 | static unsigned char default_operstate(const struct net_device *dev) |
41 | { | 41 | { |
42 | if (!netif_carrier_ok(dev)) | 42 | if (!netif_carrier_ok(dev)) |
43 | return (dev->ifindex != dev->iflink ? | 43 | return (dev->ifindex != dev_get_iflink(dev) ? |
44 | IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN); | 44 | IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN); |
45 | 45 | ||
46 | if (netif_dormant(dev)) | 46 | if (netif_dormant(dev)) |
@@ -89,7 +89,7 @@ static bool linkwatch_urgent_event(struct net_device *dev) | |||
89 | if (!netif_running(dev)) | 89 | if (!netif_running(dev)) |
90 | return false; | 90 | return false; |
91 | 91 | ||
92 | if (dev->ifindex != dev->iflink) | 92 | if (dev->ifindex != dev_get_iflink(dev)) |
93 | return true; | 93 | return true; |
94 | 94 | ||
95 | if (dev->priv_flags & IFF_TEAM_PORT) | 95 | if (dev->priv_flags & IFF_TEAM_PORT) |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index cc5cf689809c..4238d6da5c60 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -109,11 +109,19 @@ NETDEVICE_SHOW_RO(dev_id, fmt_hex); | |||
109 | NETDEVICE_SHOW_RO(dev_port, fmt_dec); | 109 | NETDEVICE_SHOW_RO(dev_port, fmt_dec); |
110 | NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); | 110 | NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); |
111 | NETDEVICE_SHOW_RO(addr_len, fmt_dec); | 111 | NETDEVICE_SHOW_RO(addr_len, fmt_dec); |
112 | NETDEVICE_SHOW_RO(iflink, fmt_dec); | ||
113 | NETDEVICE_SHOW_RO(ifindex, fmt_dec); | 112 | NETDEVICE_SHOW_RO(ifindex, fmt_dec); |
114 | NETDEVICE_SHOW_RO(type, fmt_dec); | 113 | NETDEVICE_SHOW_RO(type, fmt_dec); |
115 | NETDEVICE_SHOW_RO(link_mode, fmt_dec); | 114 | NETDEVICE_SHOW_RO(link_mode, fmt_dec); |
116 | 115 | ||
116 | static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, | ||
117 | char *buf) | ||
118 | { | ||
119 | struct net_device *ndev = to_net_dev(dev); | ||
120 | |||
121 | return sprintf(buf, fmt_dec, dev_get_iflink(ndev)); | ||
122 | } | ||
123 | static DEVICE_ATTR_RO(iflink); | ||
124 | |||
117 | static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) | 125 | static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) |
118 | { | 126 | { |
119 | return sprintf(buf, fmt_dec, dev->name_assign_type); | 127 | return sprintf(buf, fmt_dec, dev->name_assign_type); |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index e5e96b0f6717..a3abb719221f 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -148,9 +148,11 @@ static void ops_free_list(const struct pernet_operations *ops, | |||
148 | } | 148 | } |
149 | } | 149 | } |
150 | 150 | ||
151 | static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd, | ||
152 | int id); | ||
151 | static int alloc_netid(struct net *net, struct net *peer, int reqid) | 153 | static int alloc_netid(struct net *net, struct net *peer, int reqid) |
152 | { | 154 | { |
153 | int min = 0, max = 0; | 155 | int min = 0, max = 0, id; |
154 | 156 | ||
155 | ASSERT_RTNL(); | 157 | ASSERT_RTNL(); |
156 | 158 | ||
@@ -159,7 +161,11 @@ static int alloc_netid(struct net *net, struct net *peer, int reqid) | |||
159 | max = reqid + 1; | 161 | max = reqid + 1; |
160 | } | 162 | } |
161 | 163 | ||
162 | return idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL); | 164 | id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL); |
165 | if (id >= 0) | ||
166 | rtnl_net_notifyid(net, peer, RTM_NEWNSID, id); | ||
167 | |||
168 | return id; | ||
163 | } | 169 | } |
164 | 170 | ||
165 | /* This function is used by idr_for_each(). If net is equal to peer, the | 171 | /* This function is used by idr_for_each(). If net is equal to peer, the |
@@ -198,8 +204,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc) | |||
198 | */ | 204 | */ |
199 | int peernet2id(struct net *net, struct net *peer) | 205 | int peernet2id(struct net *net, struct net *peer) |
200 | { | 206 | { |
201 | int id = __peernet2id(net, peer, true); | 207 | bool alloc = atomic_read(&peer->count) == 0 ? false : true; |
208 | int id; | ||
202 | 209 | ||
210 | id = __peernet2id(net, peer, alloc); | ||
203 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; | 211 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; |
204 | } | 212 | } |
205 | EXPORT_SYMBOL(peernet2id); | 213 | EXPORT_SYMBOL(peernet2id); |
@@ -357,8 +365,10 @@ static void cleanup_net(struct work_struct *work) | |||
357 | for_each_net(tmp) { | 365 | for_each_net(tmp) { |
358 | int id = __peernet2id(tmp, net, false); | 366 | int id = __peernet2id(tmp, net, false); |
359 | 367 | ||
360 | if (id >= 0) | 368 | if (id >= 0) { |
369 | rtnl_net_notifyid(tmp, net, RTM_DELNSID, id); | ||
361 | idr_remove(&tmp->netns_ids, id); | 370 | idr_remove(&tmp->netns_ids, id); |
371 | } | ||
362 | } | 372 | } |
363 | idr_destroy(&net->netns_ids); | 373 | idr_destroy(&net->netns_ids); |
364 | 374 | ||
@@ -529,7 +539,8 @@ static int rtnl_net_get_size(void) | |||
529 | } | 539 | } |
530 | 540 | ||
531 | static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, | 541 | static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, |
532 | int cmd, struct net *net, struct net *peer) | 542 | int cmd, struct net *net, struct net *peer, |
543 | int nsid) | ||
533 | { | 544 | { |
534 | struct nlmsghdr *nlh; | 545 | struct nlmsghdr *nlh; |
535 | struct rtgenmsg *rth; | 546 | struct rtgenmsg *rth; |
@@ -544,9 +555,13 @@ static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, | |||
544 | rth = nlmsg_data(nlh); | 555 | rth = nlmsg_data(nlh); |
545 | rth->rtgen_family = AF_UNSPEC; | 556 | rth->rtgen_family = AF_UNSPEC; |
546 | 557 | ||
547 | id = __peernet2id(net, peer, false); | 558 | if (nsid >= 0) { |
548 | if (id < 0) | 559 | id = nsid; |
549 | id = NETNSA_NSID_NOT_ASSIGNED; | 560 | } else { |
561 | id = __peernet2id(net, peer, false); | ||
562 | if (id < 0) | ||
563 | id = NETNSA_NSID_NOT_ASSIGNED; | ||
564 | } | ||
550 | if (nla_put_s32(skb, NETNSA_NSID, id)) | 565 | if (nla_put_s32(skb, NETNSA_NSID, id)) |
551 | goto nla_put_failure; | 566 | goto nla_put_failure; |
552 | 567 | ||
@@ -563,8 +578,8 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
563 | struct net *net = sock_net(skb->sk); | 578 | struct net *net = sock_net(skb->sk); |
564 | struct nlattr *tb[NETNSA_MAX + 1]; | 579 | struct nlattr *tb[NETNSA_MAX + 1]; |
565 | struct sk_buff *msg; | 580 | struct sk_buff *msg; |
566 | int err = -ENOBUFS; | ||
567 | struct net *peer; | 581 | struct net *peer; |
582 | int err; | ||
568 | 583 | ||
569 | err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, | 584 | err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, |
570 | rtnl_net_policy); | 585 | rtnl_net_policy); |
@@ -587,7 +602,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
587 | } | 602 | } |
588 | 603 | ||
589 | err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, | 604 | err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, |
590 | RTM_GETNSID, net, peer); | 605 | RTM_GETNSID, net, peer, -1); |
591 | if (err < 0) | 606 | if (err < 0) |
592 | goto err_out; | 607 | goto err_out; |
593 | 608 | ||
@@ -601,6 +616,75 @@ out: | |||
601 | return err; | 616 | return err; |
602 | } | 617 | } |
603 | 618 | ||
619 | struct rtnl_net_dump_cb { | ||
620 | struct net *net; | ||
621 | struct sk_buff *skb; | ||
622 | struct netlink_callback *cb; | ||
623 | int idx; | ||
624 | int s_idx; | ||
625 | }; | ||
626 | |||
627 | static int rtnl_net_dumpid_one(int id, void *peer, void *data) | ||
628 | { | ||
629 | struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; | ||
630 | int ret; | ||
631 | |||
632 | if (net_cb->idx < net_cb->s_idx) | ||
633 | goto cont; | ||
634 | |||
635 | ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, | ||
636 | net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
637 | RTM_NEWNSID, net_cb->net, peer, id); | ||
638 | if (ret < 0) | ||
639 | return ret; | ||
640 | |||
641 | cont: | ||
642 | net_cb->idx++; | ||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) | ||
647 | { | ||
648 | struct net *net = sock_net(skb->sk); | ||
649 | struct rtnl_net_dump_cb net_cb = { | ||
650 | .net = net, | ||
651 | .skb = skb, | ||
652 | .cb = cb, | ||
653 | .idx = 0, | ||
654 | .s_idx = cb->args[0], | ||
655 | }; | ||
656 | |||
657 | ASSERT_RTNL(); | ||
658 | |||
659 | idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); | ||
660 | |||
661 | cb->args[0] = net_cb.idx; | ||
662 | return skb->len; | ||
663 | } | ||
664 | |||
665 | static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd, | ||
666 | int id) | ||
667 | { | ||
668 | struct sk_buff *msg; | ||
669 | int err = -ENOMEM; | ||
670 | |||
671 | msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); | ||
672 | if (!msg) | ||
673 | goto out; | ||
674 | |||
675 | err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id); | ||
676 | if (err < 0) | ||
677 | goto err_out; | ||
678 | |||
679 | rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); | ||
680 | return; | ||
681 | |||
682 | err_out: | ||
683 | nlmsg_free(msg); | ||
684 | out: | ||
685 | rtnl_set_sk_err(net, RTNLGRP_NSID, err); | ||
686 | } | ||
687 | |||
604 | static int __init net_ns_init(void) | 688 | static int __init net_ns_init(void) |
605 | { | 689 | { |
606 | struct net_generic *ng; | 690 | struct net_generic *ng; |
@@ -635,7 +719,8 @@ static int __init net_ns_init(void) | |||
635 | register_pernet_subsys(&net_ns_ops); | 719 | register_pernet_subsys(&net_ns_ops); |
636 | 720 | ||
637 | rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); | 721 | rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); |
638 | rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, NULL, NULL); | 722 | rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, |
723 | NULL); | ||
639 | 724 | ||
640 | return 0; | 725 | return 0; |
641 | } | 726 | } |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b96ac2109c82..5e02260b087f 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1055,8 +1055,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
1055 | #ifdef CONFIG_RPS | 1055 | #ifdef CONFIG_RPS |
1056 | nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || | 1056 | nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || |
1057 | #endif | 1057 | #endif |
1058 | (dev->ifindex != dev->iflink && | 1058 | (dev->ifindex != dev_get_iflink(dev) && |
1059 | nla_put_u32(skb, IFLA_LINK, dev->iflink)) || | 1059 | nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) || |
1060 | (upper_dev && | 1060 | (upper_dev && |
1061 | nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) || | 1061 | nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) || |
1062 | nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || | 1062 | nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || |
@@ -1991,10 +1991,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb, | |||
1991 | struct ifinfomsg *ifm, | 1991 | struct ifinfomsg *ifm, |
1992 | struct nlattr **tb) | 1992 | struct nlattr **tb) |
1993 | { | 1993 | { |
1994 | struct net_device *dev; | 1994 | struct net_device *dev, *aux; |
1995 | int err; | 1995 | int err; |
1996 | 1996 | ||
1997 | for_each_netdev(net, dev) { | 1997 | for_each_netdev_safe(net, dev, aux) { |
1998 | if (dev->group == group) { | 1998 | if (dev->group == group) { |
1999 | err = do_setlink(skb, dev, ifm, tb, NULL, 0); | 1999 | err = do_setlink(skb, dev, ifm, tb, NULL, 0); |
2000 | if (err < 0) | 2000 | if (err < 0) |
@@ -2863,8 +2863,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
2863 | nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || | 2863 | nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || |
2864 | (dev->addr_len && | 2864 | (dev->addr_len && |
2865 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || | 2865 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || |
2866 | (dev->ifindex != dev->iflink && | 2866 | (dev->ifindex != dev_get_iflink(dev) && |
2867 | nla_put_u32(skb, IFLA_LINK, dev->iflink))) | 2867 | nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) |
2868 | goto nla_put_failure; | 2868 | goto nla_put_failure; |
2869 | 2869 | ||
2870 | br_afspec = nla_nest_start(skb, IFLA_AF_SPEC); | 2870 | br_afspec = nla_nest_start(skb, IFLA_AF_SPEC); |
diff --git a/net/core/sock.c b/net/core/sock.c index 119ae464b44a..654e38a99759 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) | |||
653 | sock_reset_flag(sk, bit); | 653 | sock_reset_flag(sk, bit); |
654 | } | 654 | } |
655 | 655 | ||
656 | bool sk_mc_loop(struct sock *sk) | ||
657 | { | ||
658 | if (dev_recursion_level()) | ||
659 | return false; | ||
660 | if (!sk) | ||
661 | return true; | ||
662 | switch (sk->sk_family) { | ||
663 | case AF_INET: | ||
664 | return inet_sk(sk)->mc_loop; | ||
665 | #if IS_ENABLED(CONFIG_IPV6) | ||
666 | case AF_INET6: | ||
667 | return inet6_sk(sk)->mc_loop; | ||
668 | #endif | ||
669 | } | ||
670 | WARN_ON(1); | ||
671 | return true; | ||
672 | } | ||
673 | EXPORT_SYMBOL(sk_mc_loop); | ||
674 | |||
656 | /* | 675 | /* |
657 | * This is meant for all protocols to use and covers goings on | 676 | * This is meant for all protocols to use and covers goings on |
658 | * at the socket level. Everything here is generic. | 677 | * at the socket level. Everything here is generic. |
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index be1f08cdad29..4507b188fc51 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c | |||
@@ -194,7 +194,7 @@ static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb) | |||
194 | return err; | 194 | return err; |
195 | } | 195 | } |
196 | 196 | ||
197 | static int dn_neigh_output_packet(struct sk_buff *skb) | 197 | static int dn_neigh_output_packet(struct sock *sk, struct sk_buff *skb) |
198 | { | 198 | { |
199 | struct dst_entry *dst = skb_dst(skb); | 199 | struct dst_entry *dst = skb_dst(skb); |
200 | struct dn_route *rt = (struct dn_route *)dst; | 200 | struct dn_route *rt = (struct dn_route *)dst; |
@@ -206,7 +206,8 @@ static int dn_neigh_output_packet(struct sk_buff *skb) | |||
206 | /* | 206 | /* |
207 | * For talking to broadcast devices: Ethernet & PPP | 207 | * For talking to broadcast devices: Ethernet & PPP |
208 | */ | 208 | */ |
209 | static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) | 209 | static int dn_long_output(struct neighbour *neigh, struct sock *sk, |
210 | struct sk_buff *skb) | ||
210 | { | 211 | { |
211 | struct net_device *dev = neigh->dev; | 212 | struct net_device *dev = neigh->dev; |
212 | int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; | 213 | int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; |
@@ -245,14 +246,15 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) | |||
245 | 246 | ||
246 | skb_reset_network_header(skb); | 247 | skb_reset_network_header(skb); |
247 | 248 | ||
248 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, | 249 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb, |
249 | neigh->dev, dn_neigh_output_packet); | 250 | NULL, neigh->dev, dn_neigh_output_packet); |
250 | } | 251 | } |
251 | 252 | ||
252 | /* | 253 | /* |
253 | * For talking to pointopoint and multidrop devices: DDCMP and X.25 | 254 | * For talking to pointopoint and multidrop devices: DDCMP and X.25 |
254 | */ | 255 | */ |
255 | static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb) | 256 | static int dn_short_output(struct neighbour *neigh, struct sock *sk, |
257 | struct sk_buff *skb) | ||
256 | { | 258 | { |
257 | struct net_device *dev = neigh->dev; | 259 | struct net_device *dev = neigh->dev; |
258 | int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; | 260 | int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; |
@@ -284,8 +286,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb) | |||
284 | 286 | ||
285 | skb_reset_network_header(skb); | 287 | skb_reset_network_header(skb); |
286 | 288 | ||
287 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, | 289 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb, |
288 | neigh->dev, dn_neigh_output_packet); | 290 | NULL, neigh->dev, dn_neigh_output_packet); |
289 | } | 291 | } |
290 | 292 | ||
291 | /* | 293 | /* |
@@ -293,7 +295,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb) | |||
293 | * Phase 3 output is the same as short output, execpt that | 295 | * Phase 3 output is the same as short output, execpt that |
294 | * it clears the area bits before transmission. | 296 | * it clears the area bits before transmission. |
295 | */ | 297 | */ |
296 | static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb) | 298 | static int dn_phase3_output(struct neighbour *neigh, struct sock *sk, |
299 | struct sk_buff *skb) | ||
297 | { | 300 | { |
298 | struct net_device *dev = neigh->dev; | 301 | struct net_device *dev = neigh->dev; |
299 | int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; | 302 | int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; |
@@ -324,11 +327,11 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb) | |||
324 | 327 | ||
325 | skb_reset_network_header(skb); | 328 | skb_reset_network_header(skb); |
326 | 329 | ||
327 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, | 330 | return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb, |
328 | neigh->dev, dn_neigh_output_packet); | 331 | NULL, neigh->dev, dn_neigh_output_packet); |
329 | } | 332 | } |
330 | 333 | ||
331 | int dn_to_neigh_output(struct sk_buff *skb) | 334 | int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb) |
332 | { | 335 | { |
333 | struct dst_entry *dst = skb_dst(skb); | 336 | struct dst_entry *dst = skb_dst(skb); |
334 | struct dn_route *rt = (struct dn_route *) dst; | 337 | struct dn_route *rt = (struct dn_route *) dst; |
@@ -347,11 +350,11 @@ int dn_to_neigh_output(struct sk_buff *skb) | |||
347 | rcu_read_unlock(); | 350 | rcu_read_unlock(); |
348 | 351 | ||
349 | if (dn->flags & DN_NDFLAG_P3) | 352 | if (dn->flags & DN_NDFLAG_P3) |
350 | return dn_phase3_output(neigh, skb); | 353 | return dn_phase3_output(neigh, sk, skb); |
351 | if (use_long) | 354 | if (use_long) |
352 | return dn_long_output(neigh, skb); | 355 | return dn_long_output(neigh, sk, skb); |
353 | else | 356 | else |
354 | return dn_short_output(neigh, skb); | 357 | return dn_short_output(neigh, sk, skb); |
355 | } | 358 | } |
356 | 359 | ||
357 | /* | 360 | /* |
@@ -372,7 +375,7 @@ void dn_neigh_pointopoint_hello(struct sk_buff *skb) | |||
372 | /* | 375 | /* |
373 | * Ethernet router hello message received | 376 | * Ethernet router hello message received |
374 | */ | 377 | */ |
375 | int dn_neigh_router_hello(struct sk_buff *skb) | 378 | int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb) |
376 | { | 379 | { |
377 | struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data; | 380 | struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data; |
378 | 381 | ||
@@ -434,7 +437,7 @@ int dn_neigh_router_hello(struct sk_buff *skb) | |||
434 | /* | 437 | /* |
435 | * Endnode hello message received | 438 | * Endnode hello message received |
436 | */ | 439 | */ |
437 | int dn_neigh_endnode_hello(struct sk_buff *skb) | 440 | int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb) |
438 | { | 441 | { |
439 | struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data; | 442 | struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data; |
440 | struct neighbour *neigh; | 443 | struct neighbour *neigh; |
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index fe5f01485d33..a321eac9fd0c 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c | |||
@@ -714,7 +714,7 @@ out: | |||
714 | return ret; | 714 | return ret; |
715 | } | 715 | } |
716 | 716 | ||
717 | static int dn_nsp_rx_packet(struct sk_buff *skb) | 717 | static int dn_nsp_rx_packet(struct sock *sk2, struct sk_buff *skb) |
718 | { | 718 | { |
719 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 719 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
720 | struct sock *sk = NULL; | 720 | struct sock *sk = NULL; |
@@ -814,7 +814,8 @@ free_out: | |||
814 | 814 | ||
815 | int dn_nsp_rx(struct sk_buff *skb) | 815 | int dn_nsp_rx(struct sk_buff *skb) |
816 | { | 816 | { |
817 | return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL, | 817 | return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, NULL, skb, |
818 | skb->dev, NULL, | ||
818 | dn_nsp_rx_packet); | 819 | dn_nsp_rx_packet); |
819 | } | 820 | } |
820 | 821 | ||
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 9ab0c4ba297f..03227ffd19ce 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -512,7 +512,7 @@ static int dn_return_long(struct sk_buff *skb) | |||
512 | * | 512 | * |
513 | * Returns: result of input function if route is found, error code otherwise | 513 | * Returns: result of input function if route is found, error code otherwise |
514 | */ | 514 | */ |
515 | static int dn_route_rx_packet(struct sk_buff *skb) | 515 | static int dn_route_rx_packet(struct sock *sk, struct sk_buff *skb) |
516 | { | 516 | { |
517 | struct dn_skb_cb *cb; | 517 | struct dn_skb_cb *cb; |
518 | int err; | 518 | int err; |
@@ -573,7 +573,8 @@ static int dn_route_rx_long(struct sk_buff *skb) | |||
573 | ptr++; | 573 | ptr++; |
574 | cb->hops = *ptr++; /* Visit Count */ | 574 | cb->hops = *ptr++; /* Visit Count */ |
575 | 575 | ||
576 | return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, | 576 | return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb, |
577 | skb->dev, NULL, | ||
577 | dn_route_rx_packet); | 578 | dn_route_rx_packet); |
578 | 579 | ||
579 | drop_it: | 580 | drop_it: |
@@ -600,7 +601,8 @@ static int dn_route_rx_short(struct sk_buff *skb) | |||
600 | ptr += 2; | 601 | ptr += 2; |
601 | cb->hops = *ptr & 0x3f; | 602 | cb->hops = *ptr & 0x3f; |
602 | 603 | ||
603 | return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, | 604 | return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb, |
605 | skb->dev, NULL, | ||
604 | dn_route_rx_packet); | 606 | dn_route_rx_packet); |
605 | 607 | ||
606 | drop_it: | 608 | drop_it: |
@@ -608,7 +610,7 @@ drop_it: | |||
608 | return NET_RX_DROP; | 610 | return NET_RX_DROP; |
609 | } | 611 | } |
610 | 612 | ||
611 | static int dn_route_discard(struct sk_buff *skb) | 613 | static int dn_route_discard(struct sock *sk, struct sk_buff *skb) |
612 | { | 614 | { |
613 | /* | 615 | /* |
614 | * I know we drop the packet here, but thats considered success in | 616 | * I know we drop the packet here, but thats considered success in |
@@ -618,7 +620,7 @@ static int dn_route_discard(struct sk_buff *skb) | |||
618 | return NET_RX_SUCCESS; | 620 | return NET_RX_SUCCESS; |
619 | } | 621 | } |
620 | 622 | ||
621 | static int dn_route_ptp_hello(struct sk_buff *skb) | 623 | static int dn_route_ptp_hello(struct sock *sk, struct sk_buff *skb) |
622 | { | 624 | { |
623 | dn_dev_hello(skb); | 625 | dn_dev_hello(skb); |
624 | dn_neigh_pointopoint_hello(skb); | 626 | dn_neigh_pointopoint_hello(skb); |
@@ -704,22 +706,22 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type | |||
704 | switch (flags & DN_RT_CNTL_MSK) { | 706 | switch (flags & DN_RT_CNTL_MSK) { |
705 | case DN_RT_PKT_HELO: | 707 | case DN_RT_PKT_HELO: |
706 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, | 708 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
707 | skb, skb->dev, NULL, | 709 | NULL, skb, skb->dev, NULL, |
708 | dn_route_ptp_hello); | 710 | dn_route_ptp_hello); |
709 | 711 | ||
710 | case DN_RT_PKT_L1RT: | 712 | case DN_RT_PKT_L1RT: |
711 | case DN_RT_PKT_L2RT: | 713 | case DN_RT_PKT_L2RT: |
712 | return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, | 714 | return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, |
713 | skb, skb->dev, NULL, | 715 | NULL, skb, skb->dev, NULL, |
714 | dn_route_discard); | 716 | dn_route_discard); |
715 | case DN_RT_PKT_ERTH: | 717 | case DN_RT_PKT_ERTH: |
716 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, | 718 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
717 | skb, skb->dev, NULL, | 719 | NULL, skb, skb->dev, NULL, |
718 | dn_neigh_router_hello); | 720 | dn_neigh_router_hello); |
719 | 721 | ||
720 | case DN_RT_PKT_EEDH: | 722 | case DN_RT_PKT_EEDH: |
721 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, | 723 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
722 | skb, skb->dev, NULL, | 724 | NULL, skb, skb->dev, NULL, |
723 | dn_neigh_endnode_hello); | 725 | dn_neigh_endnode_hello); |
724 | } | 726 | } |
725 | } else { | 727 | } else { |
@@ -768,7 +770,8 @@ static int dn_output(struct sock *sk, struct sk_buff *skb) | |||
768 | cb->rt_flags |= DN_RT_F_IE; | 770 | cb->rt_flags |= DN_RT_F_IE; |
769 | cb->hops = 0; | 771 | cb->hops = 0; |
770 | 772 | ||
771 | return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev, | 773 | return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, sk, skb, |
774 | NULL, dev, | ||
772 | dn_to_neigh_output); | 775 | dn_to_neigh_output); |
773 | 776 | ||
774 | error: | 777 | error: |
@@ -816,7 +819,8 @@ static int dn_forward(struct sk_buff *skb) | |||
816 | if (rt->rt_flags & RTCF_DOREDIRECT) | 819 | if (rt->rt_flags & RTCF_DOREDIRECT) |
817 | cb->rt_flags |= DN_RT_F_IE; | 820 | cb->rt_flags |= DN_RT_F_IE; |
818 | 821 | ||
819 | return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev, | 822 | return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, NULL, skb, |
823 | dev, skb->dev, | ||
820 | dn_to_neigh_output); | 824 | dn_to_neigh_output); |
821 | 825 | ||
822 | drop: | 826 | drop: |
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index faf7cc3483fe..9d66a0f72f90 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -248,7 +248,9 @@ void __init dn_fib_rules_init(void) | |||
248 | 248 | ||
249 | void __exit dn_fib_rules_cleanup(void) | 249 | void __exit dn_fib_rules_cleanup(void) |
250 | { | 250 | { |
251 | rtnl_lock(); | ||
251 | fib_rules_unregister(dn_fib_rules_ops); | 252 | fib_rules_unregister(dn_fib_rules_ops); |
253 | rtnl_unlock(); | ||
252 | rcu_barrier(); | 254 | rcu_barrier(); |
253 | } | 255 | } |
254 | 256 | ||
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index e4d9560a910b..af34fc9bdf69 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c | |||
@@ -89,9 +89,7 @@ static void dnrmg_send_peer(struct sk_buff *skb) | |||
89 | 89 | ||
90 | static unsigned int dnrmg_hook(const struct nf_hook_ops *ops, | 90 | static unsigned int dnrmg_hook(const struct nf_hook_ops *ops, |
91 | struct sk_buff *skb, | 91 | struct sk_buff *skb, |
92 | const struct net_device *in, | 92 | const struct nf_hook_state *state) |
93 | const struct net_device *out, | ||
94 | int (*okfn)(struct sk_buff *)) | ||
95 | { | 93 | { |
96 | dnrmg_send_peer(skb); | 94 | dnrmg_send_peer(skb); |
97 | return NF_ACCEPT; | 95 | return NF_ACCEPT; |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 899772108ee3..5eaadabe23a1 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -513,12 +513,10 @@ static struct net_device *dev_to_net_device(struct device *dev) | |||
513 | #ifdef CONFIG_OF | 513 | #ifdef CONFIG_OF |
514 | static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | 514 | static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, |
515 | struct dsa_chip_data *cd, | 515 | struct dsa_chip_data *cd, |
516 | int chip_index, | 516 | int chip_index, int port_index, |
517 | struct device_node *link) | 517 | struct device_node *link) |
518 | { | 518 | { |
519 | int ret; | ||
520 | const __be32 *reg; | 519 | const __be32 *reg; |
521 | int link_port_addr; | ||
522 | int link_sw_addr; | 520 | int link_sw_addr; |
523 | struct device_node *parent_sw; | 521 | struct device_node *parent_sw; |
524 | int len; | 522 | int len; |
@@ -531,6 +529,10 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | |||
531 | if (!reg || (len != sizeof(*reg) * 2)) | 529 | if (!reg || (len != sizeof(*reg) * 2)) |
532 | return -EINVAL; | 530 | return -EINVAL; |
533 | 531 | ||
532 | /* | ||
533 | * Get the destination switch number from the second field of its 'reg' | ||
534 | * property, i.e. for "reg = <0x19 1>" sw_addr is '1'. | ||
535 | */ | ||
534 | link_sw_addr = be32_to_cpup(reg + 1); | 536 | link_sw_addr = be32_to_cpup(reg + 1); |
535 | 537 | ||
536 | if (link_sw_addr >= pd->nr_chips) | 538 | if (link_sw_addr >= pd->nr_chips) |
@@ -547,20 +549,9 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | |||
547 | memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); | 549 | memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); |
548 | } | 550 | } |
549 | 551 | ||
550 | reg = of_get_property(link, "reg", NULL); | 552 | cd->rtable[link_sw_addr] = port_index; |
551 | if (!reg) { | ||
552 | ret = -EINVAL; | ||
553 | goto out; | ||
554 | } | ||
555 | |||
556 | link_port_addr = be32_to_cpup(reg); | ||
557 | |||
558 | cd->rtable[link_sw_addr] = link_port_addr; | ||
559 | 553 | ||
560 | return 0; | 554 | return 0; |
561 | out: | ||
562 | kfree(cd->rtable); | ||
563 | return ret; | ||
564 | } | 555 | } |
565 | 556 | ||
566 | static void dsa_of_free_platform_data(struct dsa_platform_data *pd) | 557 | static void dsa_of_free_platform_data(struct dsa_platform_data *pd) |
@@ -670,7 +661,7 @@ static int dsa_of_probe(struct device *dev) | |||
670 | if (!strcmp(port_name, "dsa") && link && | 661 | if (!strcmp(port_name, "dsa") && link && |
671 | pd->nr_chips > 1) { | 662 | pd->nr_chips > 1) { |
672 | ret = dsa_of_setup_routing_table(pd, cd, | 663 | ret = dsa_of_setup_routing_table(pd, cd, |
673 | chip_index, link); | 664 | chip_index, port_index, link); |
674 | if (ret) | 665 | if (ret) |
675 | goto out_free_chip; | 666 | goto out_free_chip; |
676 | } | 667 | } |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 3597724ec3d8..827cda560a55 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -55,13 +55,11 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) | |||
55 | 55 | ||
56 | 56 | ||
57 | /* slave device handling ****************************************************/ | 57 | /* slave device handling ****************************************************/ |
58 | static int dsa_slave_init(struct net_device *dev) | 58 | static int dsa_slave_get_iflink(const struct net_device *dev) |
59 | { | 59 | { |
60 | struct dsa_slave_priv *p = netdev_priv(dev); | 60 | struct dsa_slave_priv *p = netdev_priv(dev); |
61 | 61 | ||
62 | dev->iflink = p->parent->dst->master_netdev->ifindex; | 62 | return p->parent->dst->master_netdev->ifindex; |
63 | |||
64 | return 0; | ||
65 | } | 63 | } |
66 | 64 | ||
67 | static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p) | 65 | static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p) |
@@ -664,7 +662,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { | |||
664 | }; | 662 | }; |
665 | 663 | ||
666 | static const struct net_device_ops dsa_slave_netdev_ops = { | 664 | static const struct net_device_ops dsa_slave_netdev_ops = { |
667 | .ndo_init = dsa_slave_init, | ||
668 | .ndo_open = dsa_slave_open, | 665 | .ndo_open = dsa_slave_open, |
669 | .ndo_stop = dsa_slave_close, | 666 | .ndo_stop = dsa_slave_close, |
670 | .ndo_start_xmit = dsa_slave_xmit, | 667 | .ndo_start_xmit = dsa_slave_xmit, |
@@ -675,6 +672,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = { | |||
675 | .ndo_fdb_del = dsa_slave_fdb_del, | 672 | .ndo_fdb_del = dsa_slave_fdb_del, |
676 | .ndo_fdb_dump = dsa_slave_fdb_dump, | 673 | .ndo_fdb_dump = dsa_slave_fdb_dump, |
677 | .ndo_do_ioctl = dsa_slave_ioctl, | 674 | .ndo_do_ioctl = dsa_slave_ioctl, |
675 | .ndo_get_iflink = dsa_slave_get_iflink, | ||
678 | }; | 676 | }; |
679 | 677 | ||
680 | static const struct swdev_ops dsa_slave_swdev_ops = { | 678 | static const struct swdev_ops dsa_slave_swdev_ops = { |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 64a9c0fdc4aa..8b47a4d79d04 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog) | |||
217 | * shutdown() (rather than close()). | 217 | * shutdown() (rather than close()). |
218 | */ | 218 | */ |
219 | if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 && | 219 | if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 && |
220 | inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) { | 220 | !inet_csk(sk)->icsk_accept_queue.fastopenq) { |
221 | if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0) | 221 | if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0) |
222 | err = fastopen_init_queue(sk, backlog); | 222 | err = fastopen_init_queue(sk, backlog); |
223 | else if ((sysctl_tcp_fastopen & | 223 | else if ((sysctl_tcp_fastopen & |
@@ -314,11 +314,11 @@ lookup_protocol: | |||
314 | answer_flags = answer->flags; | 314 | answer_flags = answer->flags; |
315 | rcu_read_unlock(); | 315 | rcu_read_unlock(); |
316 | 316 | ||
317 | WARN_ON(answer_prot->slab == NULL); | 317 | WARN_ON(!answer_prot->slab); |
318 | 318 | ||
319 | err = -ENOBUFS; | 319 | err = -ENOBUFS; |
320 | sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); | 320 | sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); |
321 | if (sk == NULL) | 321 | if (!sk) |
322 | goto out; | 322 | goto out; |
323 | 323 | ||
324 | err = 0; | 324 | err = 0; |
@@ -1269,7 +1269,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1269 | if (udpfrag) { | 1269 | if (udpfrag) { |
1270 | iph->id = htons(id); | 1270 | iph->id = htons(id); |
1271 | iph->frag_off = htons(offset >> 3); | 1271 | iph->frag_off = htons(offset >> 3); |
1272 | if (skb->next != NULL) | 1272 | if (skb->next) |
1273 | iph->frag_off |= htons(IP_MF); | 1273 | iph->frag_off |= htons(IP_MF); |
1274 | offset += skb->len - nhoff - ihl; | 1274 | offset += skb->len - nhoff - ihl; |
1275 | } else { | 1275 | } else { |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 5f5c674e130a..933a92820d26 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -228,7 +228,7 @@ static int arp_constructor(struct neighbour *neigh) | |||
228 | 228 | ||
229 | rcu_read_lock(); | 229 | rcu_read_lock(); |
230 | in_dev = __in_dev_get_rcu(dev); | 230 | in_dev = __in_dev_get_rcu(dev); |
231 | if (in_dev == NULL) { | 231 | if (!in_dev) { |
232 | rcu_read_unlock(); | 232 | rcu_read_unlock(); |
233 | return -EINVAL; | 233 | return -EINVAL; |
234 | } | 234 | } |
@@ -475,7 +475,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev, | |||
475 | */ | 475 | */ |
476 | 476 | ||
477 | /* | 477 | /* |
478 | * Create an arp packet. If (dest_hw == NULL), we create a broadcast | 478 | * Create an arp packet. If dest_hw is not set, we create a broadcast |
479 | * message. | 479 | * message. |
480 | */ | 480 | */ |
481 | struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | 481 | struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, |
@@ -495,7 +495,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
495 | */ | 495 | */ |
496 | 496 | ||
497 | skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); | 497 | skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); |
498 | if (skb == NULL) | 498 | if (!skb) |
499 | return NULL; | 499 | return NULL; |
500 | 500 | ||
501 | skb_reserve(skb, hlen); | 501 | skb_reserve(skb, hlen); |
@@ -503,9 +503,9 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
503 | arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); | 503 | arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); |
504 | skb->dev = dev; | 504 | skb->dev = dev; |
505 | skb->protocol = htons(ETH_P_ARP); | 505 | skb->protocol = htons(ETH_P_ARP); |
506 | if (src_hw == NULL) | 506 | if (!src_hw) |
507 | src_hw = dev->dev_addr; | 507 | src_hw = dev->dev_addr; |
508 | if (dest_hw == NULL) | 508 | if (!dest_hw) |
509 | dest_hw = dev->broadcast; | 509 | dest_hw = dev->broadcast; |
510 | 510 | ||
511 | /* | 511 | /* |
@@ -569,7 +569,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
569 | break; | 569 | break; |
570 | #endif | 570 | #endif |
571 | default: | 571 | default: |
572 | if (target_hw != NULL) | 572 | if (target_hw) |
573 | memcpy(arp_ptr, target_hw, dev->addr_len); | 573 | memcpy(arp_ptr, target_hw, dev->addr_len); |
574 | else | 574 | else |
575 | memset(arp_ptr, 0, dev->addr_len); | 575 | memset(arp_ptr, 0, dev->addr_len); |
@@ -591,7 +591,8 @@ EXPORT_SYMBOL(arp_create); | |||
591 | void arp_xmit(struct sk_buff *skb) | 591 | void arp_xmit(struct sk_buff *skb) |
592 | { | 592 | { |
593 | /* Send it off, maybe filter it using firewalling first. */ | 593 | /* Send it off, maybe filter it using firewalling first. */ |
594 | NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit); | 594 | NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, NULL, skb, |
595 | NULL, skb->dev, dev_queue_xmit_sk); | ||
595 | } | 596 | } |
596 | EXPORT_SYMBOL(arp_xmit); | 597 | EXPORT_SYMBOL(arp_xmit); |
597 | 598 | ||
@@ -614,7 +615,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, | |||
614 | 615 | ||
615 | skb = arp_create(type, ptype, dest_ip, dev, src_ip, | 616 | skb = arp_create(type, ptype, dest_ip, dev, src_ip, |
616 | dest_hw, src_hw, target_hw); | 617 | dest_hw, src_hw, target_hw); |
617 | if (skb == NULL) | 618 | if (!skb) |
618 | return; | 619 | return; |
619 | 620 | ||
620 | arp_xmit(skb); | 621 | arp_xmit(skb); |
@@ -625,7 +626,7 @@ EXPORT_SYMBOL(arp_send); | |||
625 | * Process an arp request. | 626 | * Process an arp request. |
626 | */ | 627 | */ |
627 | 628 | ||
628 | static int arp_process(struct sk_buff *skb) | 629 | static int arp_process(struct sock *sk, struct sk_buff *skb) |
629 | { | 630 | { |
630 | struct net_device *dev = skb->dev; | 631 | struct net_device *dev = skb->dev; |
631 | struct in_device *in_dev = __in_dev_get_rcu(dev); | 632 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
@@ -644,7 +645,7 @@ static int arp_process(struct sk_buff *skb) | |||
644 | * is ARP'able. | 645 | * is ARP'able. |
645 | */ | 646 | */ |
646 | 647 | ||
647 | if (in_dev == NULL) | 648 | if (!in_dev) |
648 | goto out; | 649 | goto out; |
649 | 650 | ||
650 | arp = arp_hdr(skb); | 651 | arp = arp_hdr(skb); |
@@ -808,7 +809,7 @@ static int arp_process(struct sk_buff *skb) | |||
808 | is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && | 809 | is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && |
809 | inet_addr_type(net, sip) == RTN_UNICAST; | 810 | inet_addr_type(net, sip) == RTN_UNICAST; |
810 | 811 | ||
811 | if (n == NULL && | 812 | if (!n && |
812 | ((arp->ar_op == htons(ARPOP_REPLY) && | 813 | ((arp->ar_op == htons(ARPOP_REPLY) && |
813 | inet_addr_type(net, sip) == RTN_UNICAST) || is_garp)) | 814 | inet_addr_type(net, sip) == RTN_UNICAST) || is_garp)) |
814 | n = __neigh_lookup(&arp_tbl, &sip, dev, 1); | 815 | n = __neigh_lookup(&arp_tbl, &sip, dev, 1); |
@@ -846,7 +847,7 @@ out: | |||
846 | 847 | ||
847 | static void parp_redo(struct sk_buff *skb) | 848 | static void parp_redo(struct sk_buff *skb) |
848 | { | 849 | { |
849 | arp_process(skb); | 850 | arp_process(NULL, skb); |
850 | } | 851 | } |
851 | 852 | ||
852 | 853 | ||
@@ -879,7 +880,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev, | |||
879 | 880 | ||
880 | memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); | 881 | memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); |
881 | 882 | ||
882 | return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); | 883 | return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, NULL, skb, |
884 | dev, NULL, arp_process); | ||
883 | 885 | ||
884 | consumeskb: | 886 | consumeskb: |
885 | consume_skb(skb); | 887 | consume_skb(skb); |
@@ -900,7 +902,7 @@ out_of_mem: | |||
900 | 902 | ||
901 | static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on) | 903 | static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on) |
902 | { | 904 | { |
903 | if (dev == NULL) { | 905 | if (!dev) { |
904 | IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; | 906 | IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; |
905 | return 0; | 907 | return 0; |
906 | } | 908 | } |
@@ -926,7 +928,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r, | |||
926 | return -ENODEV; | 928 | return -ENODEV; |
927 | } | 929 | } |
928 | if (mask) { | 930 | if (mask) { |
929 | if (pneigh_lookup(&arp_tbl, net, &ip, dev, 1) == NULL) | 931 | if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1)) |
930 | return -ENOBUFS; | 932 | return -ENOBUFS; |
931 | return 0; | 933 | return 0; |
932 | } | 934 | } |
@@ -947,7 +949,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, | |||
947 | ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; | 949 | ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; |
948 | if (r->arp_flags & ATF_PERM) | 950 | if (r->arp_flags & ATF_PERM) |
949 | r->arp_flags |= ATF_COM; | 951 | r->arp_flags |= ATF_COM; |
950 | if (dev == NULL) { | 952 | if (!dev) { |
951 | struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); | 953 | struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); |
952 | 954 | ||
953 | if (IS_ERR(rt)) | 955 | if (IS_ERR(rt)) |
@@ -1067,7 +1069,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1067 | return arp_req_delete_public(net, r, dev); | 1069 | return arp_req_delete_public(net, r, dev); |
1068 | 1070 | ||
1069 | ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; | 1071 | ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; |
1070 | if (dev == NULL) { | 1072 | if (!dev) { |
1071 | struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); | 1073 | struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); |
1072 | if (IS_ERR(rt)) | 1074 | if (IS_ERR(rt)) |
1073 | return PTR_ERR(rt); | 1075 | return PTR_ERR(rt); |
@@ -1116,7 +1118,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
1116 | if (r.arp_dev[0]) { | 1118 | if (r.arp_dev[0]) { |
1117 | err = -ENODEV; | 1119 | err = -ENODEV; |
1118 | dev = __dev_get_by_name(net, r.arp_dev); | 1120 | dev = __dev_get_by_name(net, r.arp_dev); |
1119 | if (dev == NULL) | 1121 | if (!dev) |
1120 | goto out; | 1122 | goto out; |
1121 | 1123 | ||
1122 | /* Mmmm... It is wrong... ARPHRD_NETROM==0 */ | 1124 | /* Mmmm... It is wrong... ARPHRD_NETROM==0 */ |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index e361ea6f3fc8..bdb2a07ec363 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -255,7 +255,7 @@ static int __init cipso_v4_cache_init(void) | |||
255 | cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, | 255 | cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, |
256 | sizeof(struct cipso_v4_map_cache_bkt), | 256 | sizeof(struct cipso_v4_map_cache_bkt), |
257 | GFP_KERNEL); | 257 | GFP_KERNEL); |
258 | if (cipso_v4_cache == NULL) | 258 | if (!cipso_v4_cache) |
259 | return -ENOMEM; | 259 | return -ENOMEM; |
260 | 260 | ||
261 | for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { | 261 | for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { |
@@ -339,7 +339,7 @@ static int cipso_v4_cache_check(const unsigned char *key, | |||
339 | secattr->cache = entry->lsm_data; | 339 | secattr->cache = entry->lsm_data; |
340 | secattr->flags |= NETLBL_SECATTR_CACHE; | 340 | secattr->flags |= NETLBL_SECATTR_CACHE; |
341 | secattr->type = NETLBL_NLTYPE_CIPSOV4; | 341 | secattr->type = NETLBL_NLTYPE_CIPSOV4; |
342 | if (prev_entry == NULL) { | 342 | if (!prev_entry) { |
343 | spin_unlock_bh(&cipso_v4_cache[bkt].lock); | 343 | spin_unlock_bh(&cipso_v4_cache[bkt].lock); |
344 | return 0; | 344 | return 0; |
345 | } | 345 | } |
@@ -393,10 +393,10 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr, | |||
393 | cipso_ptr_len = cipso_ptr[1]; | 393 | cipso_ptr_len = cipso_ptr[1]; |
394 | 394 | ||
395 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 395 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
396 | if (entry == NULL) | 396 | if (!entry) |
397 | return -ENOMEM; | 397 | return -ENOMEM; |
398 | entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); | 398 | entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); |
399 | if (entry->key == NULL) { | 399 | if (!entry->key) { |
400 | ret_val = -ENOMEM; | 400 | ret_val = -ENOMEM; |
401 | goto cache_add_failure; | 401 | goto cache_add_failure; |
402 | } | 402 | } |
@@ -502,7 +502,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, | |||
502 | atomic_set(&doi_def->refcount, 1); | 502 | atomic_set(&doi_def->refcount, 1); |
503 | 503 | ||
504 | spin_lock(&cipso_v4_doi_list_lock); | 504 | spin_lock(&cipso_v4_doi_list_lock); |
505 | if (cipso_v4_doi_search(doi_def->doi) != NULL) { | 505 | if (cipso_v4_doi_search(doi_def->doi)) { |
506 | spin_unlock(&cipso_v4_doi_list_lock); | 506 | spin_unlock(&cipso_v4_doi_list_lock); |
507 | ret_val = -EEXIST; | 507 | ret_val = -EEXIST; |
508 | goto doi_add_return; | 508 | goto doi_add_return; |
@@ -513,7 +513,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, | |||
513 | 513 | ||
514 | doi_add_return: | 514 | doi_add_return: |
515 | audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); | 515 | audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); |
516 | if (audit_buf != NULL) { | 516 | if (audit_buf) { |
517 | const char *type_str; | 517 | const char *type_str; |
518 | switch (doi_type) { | 518 | switch (doi_type) { |
519 | case CIPSO_V4_MAP_TRANS: | 519 | case CIPSO_V4_MAP_TRANS: |
@@ -547,7 +547,7 @@ doi_add_return: | |||
547 | */ | 547 | */ |
548 | void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) | 548 | void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) |
549 | { | 549 | { |
550 | if (doi_def == NULL) | 550 | if (!doi_def) |
551 | return; | 551 | return; |
552 | 552 | ||
553 | switch (doi_def->type) { | 553 | switch (doi_def->type) { |
@@ -598,7 +598,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) | |||
598 | 598 | ||
599 | spin_lock(&cipso_v4_doi_list_lock); | 599 | spin_lock(&cipso_v4_doi_list_lock); |
600 | doi_def = cipso_v4_doi_search(doi); | 600 | doi_def = cipso_v4_doi_search(doi); |
601 | if (doi_def == NULL) { | 601 | if (!doi_def) { |
602 | spin_unlock(&cipso_v4_doi_list_lock); | 602 | spin_unlock(&cipso_v4_doi_list_lock); |
603 | ret_val = -ENOENT; | 603 | ret_val = -ENOENT; |
604 | goto doi_remove_return; | 604 | goto doi_remove_return; |
@@ -617,7 +617,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) | |||
617 | 617 | ||
618 | doi_remove_return: | 618 | doi_remove_return: |
619 | audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); | 619 | audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); |
620 | if (audit_buf != NULL) { | 620 | if (audit_buf) { |
621 | audit_log_format(audit_buf, | 621 | audit_log_format(audit_buf, |
622 | " cipso_doi=%u res=%u", | 622 | " cipso_doi=%u res=%u", |
623 | doi, ret_val == 0 ? 1 : 0); | 623 | doi, ret_val == 0 ? 1 : 0); |
@@ -644,7 +644,7 @@ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) | |||
644 | 644 | ||
645 | rcu_read_lock(); | 645 | rcu_read_lock(); |
646 | doi_def = cipso_v4_doi_search(doi); | 646 | doi_def = cipso_v4_doi_search(doi); |
647 | if (doi_def == NULL) | 647 | if (!doi_def) |
648 | goto doi_getdef_return; | 648 | goto doi_getdef_return; |
649 | if (!atomic_inc_not_zero(&doi_def->refcount)) | 649 | if (!atomic_inc_not_zero(&doi_def->refcount)) |
650 | doi_def = NULL; | 650 | doi_def = NULL; |
@@ -664,7 +664,7 @@ doi_getdef_return: | |||
664 | */ | 664 | */ |
665 | void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) | 665 | void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) |
666 | { | 666 | { |
667 | if (doi_def == NULL) | 667 | if (!doi_def) |
668 | return; | 668 | return; |
669 | 669 | ||
670 | if (!atomic_dec_and_test(&doi_def->refcount)) | 670 | if (!atomic_dec_and_test(&doi_def->refcount)) |
@@ -1642,7 +1642,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) | |||
1642 | 1642 | ||
1643 | rcu_read_lock(); | 1643 | rcu_read_lock(); |
1644 | doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); | 1644 | doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); |
1645 | if (doi_def == NULL) { | 1645 | if (!doi_def) { |
1646 | err_offset = 2; | 1646 | err_offset = 2; |
1647 | goto validate_return_locked; | 1647 | goto validate_return_locked; |
1648 | } | 1648 | } |
@@ -1736,7 +1736,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) | |||
1736 | * not the loopback device drop the packet. Further, | 1736 | * not the loopback device drop the packet. Further, |
1737 | * there is no legitimate reason for setting this from | 1737 | * there is no legitimate reason for setting this from |
1738 | * userspace so reject it if skb is NULL. */ | 1738 | * userspace so reject it if skb is NULL. */ |
1739 | if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) { | 1739 | if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) { |
1740 | err_offset = opt_iter; | 1740 | err_offset = opt_iter; |
1741 | goto validate_return_locked; | 1741 | goto validate_return_locked; |
1742 | } | 1742 | } |
@@ -1897,7 +1897,7 @@ int cipso_v4_sock_setattr(struct sock *sk, | |||
1897 | * defined yet but it is not a problem as the only users of these | 1897 | * defined yet but it is not a problem as the only users of these |
1898 | * "lite" PF_INET sockets are functions which do an accept() call | 1898 | * "lite" PF_INET sockets are functions which do an accept() call |
1899 | * afterwards so we will label the socket as part of the accept(). */ | 1899 | * afterwards so we will label the socket as part of the accept(). */ |
1900 | if (sk == NULL) | 1900 | if (!sk) |
1901 | return 0; | 1901 | return 0; |
1902 | 1902 | ||
1903 | /* We allocate the maximum CIPSO option size here so we are probably | 1903 | /* We allocate the maximum CIPSO option size here so we are probably |
@@ -1905,7 +1905,7 @@ int cipso_v4_sock_setattr(struct sock *sk, | |||
1905 | * on and after all we are only talking about 40 bytes. */ | 1905 | * on and after all we are only talking about 40 bytes. */ |
1906 | buf_len = CIPSO_V4_OPT_LEN_MAX; | 1906 | buf_len = CIPSO_V4_OPT_LEN_MAX; |
1907 | buf = kmalloc(buf_len, GFP_ATOMIC); | 1907 | buf = kmalloc(buf_len, GFP_ATOMIC); |
1908 | if (buf == NULL) { | 1908 | if (!buf) { |
1909 | ret_val = -ENOMEM; | 1909 | ret_val = -ENOMEM; |
1910 | goto socket_setattr_failure; | 1910 | goto socket_setattr_failure; |
1911 | } | 1911 | } |
@@ -1921,7 +1921,7 @@ int cipso_v4_sock_setattr(struct sock *sk, | |||
1921 | * set the IPOPT_CIPSO option. */ | 1921 | * set the IPOPT_CIPSO option. */ |
1922 | opt_len = (buf_len + 3) & ~3; | 1922 | opt_len = (buf_len + 3) & ~3; |
1923 | opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); | 1923 | opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); |
1924 | if (opt == NULL) { | 1924 | if (!opt) { |
1925 | ret_val = -ENOMEM; | 1925 | ret_val = -ENOMEM; |
1926 | goto socket_setattr_failure; | 1926 | goto socket_setattr_failure; |
1927 | } | 1927 | } |
@@ -1981,7 +1981,7 @@ int cipso_v4_req_setattr(struct request_sock *req, | |||
1981 | * on and after all we are only talking about 40 bytes. */ | 1981 | * on and after all we are only talking about 40 bytes. */ |
1982 | buf_len = CIPSO_V4_OPT_LEN_MAX; | 1982 | buf_len = CIPSO_V4_OPT_LEN_MAX; |
1983 | buf = kmalloc(buf_len, GFP_ATOMIC); | 1983 | buf = kmalloc(buf_len, GFP_ATOMIC); |
1984 | if (buf == NULL) { | 1984 | if (!buf) { |
1985 | ret_val = -ENOMEM; | 1985 | ret_val = -ENOMEM; |
1986 | goto req_setattr_failure; | 1986 | goto req_setattr_failure; |
1987 | } | 1987 | } |
@@ -1997,7 +1997,7 @@ int cipso_v4_req_setattr(struct request_sock *req, | |||
1997 | * set the IPOPT_CIPSO option. */ | 1997 | * set the IPOPT_CIPSO option. */ |
1998 | opt_len = (buf_len + 3) & ~3; | 1998 | opt_len = (buf_len + 3) & ~3; |
1999 | opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); | 1999 | opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); |
2000 | if (opt == NULL) { | 2000 | if (!opt) { |
2001 | ret_val = -ENOMEM; | 2001 | ret_val = -ENOMEM; |
2002 | goto req_setattr_failure; | 2002 | goto req_setattr_failure; |
2003 | } | 2003 | } |
@@ -2102,7 +2102,7 @@ void cipso_v4_sock_delattr(struct sock *sk) | |||
2102 | 2102 | ||
2103 | sk_inet = inet_sk(sk); | 2103 | sk_inet = inet_sk(sk); |
2104 | opt = rcu_dereference_protected(sk_inet->inet_opt, 1); | 2104 | opt = rcu_dereference_protected(sk_inet->inet_opt, 1); |
2105 | if (opt == NULL || opt->opt.cipso == 0) | 2105 | if (!opt || opt->opt.cipso == 0) |
2106 | return; | 2106 | return; |
2107 | 2107 | ||
2108 | hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); | 2108 | hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); |
@@ -2128,7 +2128,7 @@ void cipso_v4_req_delattr(struct request_sock *req) | |||
2128 | 2128 | ||
2129 | req_inet = inet_rsk(req); | 2129 | req_inet = inet_rsk(req); |
2130 | opt = req_inet->opt; | 2130 | opt = req_inet->opt; |
2131 | if (opt == NULL || opt->opt.cipso == 0) | 2131 | if (!opt || opt->opt.cipso == 0) |
2132 | return; | 2132 | return; |
2133 | 2133 | ||
2134 | cipso_v4_delopt(&req_inet->opt); | 2134 | cipso_v4_delopt(&req_inet->opt); |
@@ -2157,7 +2157,7 @@ int cipso_v4_getattr(const unsigned char *cipso, | |||
2157 | doi = get_unaligned_be32(&cipso[2]); | 2157 | doi = get_unaligned_be32(&cipso[2]); |
2158 | rcu_read_lock(); | 2158 | rcu_read_lock(); |
2159 | doi_def = cipso_v4_doi_search(doi); | 2159 | doi_def = cipso_v4_doi_search(doi); |
2160 | if (doi_def == NULL) | 2160 | if (!doi_def) |
2161 | goto getattr_return; | 2161 | goto getattr_return; |
2162 | /* XXX - This code assumes only one tag per CIPSO option which isn't | 2162 | /* XXX - This code assumes only one tag per CIPSO option which isn't |
2163 | * really a good assumption to make but since we only support the MAC | 2163 | * really a good assumption to make but since we only support the MAC |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index c6473f365ad1..419d23c53ec7 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -585,7 +585,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
585 | 585 | ||
586 | ifm = nlmsg_data(nlh); | 586 | ifm = nlmsg_data(nlh); |
587 | in_dev = inetdev_by_index(net, ifm->ifa_index); | 587 | in_dev = inetdev_by_index(net, ifm->ifa_index); |
588 | if (in_dev == NULL) { | 588 | if (!in_dev) { |
589 | err = -ENODEV; | 589 | err = -ENODEV; |
590 | goto errout; | 590 | goto errout; |
591 | } | 591 | } |
@@ -755,21 +755,21 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, | |||
755 | 755 | ||
756 | ifm = nlmsg_data(nlh); | 756 | ifm = nlmsg_data(nlh); |
757 | err = -EINVAL; | 757 | err = -EINVAL; |
758 | if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL) | 758 | if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL]) |
759 | goto errout; | 759 | goto errout; |
760 | 760 | ||
761 | dev = __dev_get_by_index(net, ifm->ifa_index); | 761 | dev = __dev_get_by_index(net, ifm->ifa_index); |
762 | err = -ENODEV; | 762 | err = -ENODEV; |
763 | if (dev == NULL) | 763 | if (!dev) |
764 | goto errout; | 764 | goto errout; |
765 | 765 | ||
766 | in_dev = __in_dev_get_rtnl(dev); | 766 | in_dev = __in_dev_get_rtnl(dev); |
767 | err = -ENOBUFS; | 767 | err = -ENOBUFS; |
768 | if (in_dev == NULL) | 768 | if (!in_dev) |
769 | goto errout; | 769 | goto errout; |
770 | 770 | ||
771 | ifa = inet_alloc_ifa(); | 771 | ifa = inet_alloc_ifa(); |
772 | if (ifa == NULL) | 772 | if (!ifa) |
773 | /* | 773 | /* |
774 | * A potential indev allocation can be left alive, it stays | 774 | * A potential indev allocation can be left alive, it stays |
775 | * assigned to its device and is destroy with it. | 775 | * assigned to its device and is destroy with it. |
@@ -780,7 +780,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, | |||
780 | neigh_parms_data_state_setall(in_dev->arp_parms); | 780 | neigh_parms_data_state_setall(in_dev->arp_parms); |
781 | in_dev_hold(in_dev); | 781 | in_dev_hold(in_dev); |
782 | 782 | ||
783 | if (tb[IFA_ADDRESS] == NULL) | 783 | if (!tb[IFA_ADDRESS]) |
784 | tb[IFA_ADDRESS] = tb[IFA_LOCAL]; | 784 | tb[IFA_ADDRESS] = tb[IFA_LOCAL]; |
785 | 785 | ||
786 | INIT_HLIST_NODE(&ifa->hash); | 786 | INIT_HLIST_NODE(&ifa->hash); |
@@ -1290,7 +1290,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, | |||
1290 | __be32 addr = 0; | 1290 | __be32 addr = 0; |
1291 | struct net_device *dev; | 1291 | struct net_device *dev; |
1292 | 1292 | ||
1293 | if (in_dev != NULL) | 1293 | if (in_dev) |
1294 | return confirm_addr_indev(in_dev, dst, local, scope); | 1294 | return confirm_addr_indev(in_dev, dst, local, scope); |
1295 | 1295 | ||
1296 | rcu_read_lock(); | 1296 | rcu_read_lock(); |
@@ -1340,7 +1340,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) | |||
1340 | if (named++ == 0) | 1340 | if (named++ == 0) |
1341 | goto skip; | 1341 | goto skip; |
1342 | dot = strchr(old, ':'); | 1342 | dot = strchr(old, ':'); |
1343 | if (dot == NULL) { | 1343 | if (!dot) { |
1344 | sprintf(old, ":%d", named); | 1344 | sprintf(old, ":%d", named); |
1345 | dot = old; | 1345 | dot = old; |
1346 | } | 1346 | } |
@@ -1509,7 +1509,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, | |||
1509 | u32 preferred, valid; | 1509 | u32 preferred, valid; |
1510 | 1510 | ||
1511 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); | 1511 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); |
1512 | if (nlh == NULL) | 1512 | if (!nlh) |
1513 | return -EMSGSIZE; | 1513 | return -EMSGSIZE; |
1514 | 1514 | ||
1515 | ifm = nlmsg_data(nlh); | 1515 | ifm = nlmsg_data(nlh); |
@@ -1628,7 +1628,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, | |||
1628 | 1628 | ||
1629 | net = dev_net(ifa->ifa_dev->dev); | 1629 | net = dev_net(ifa->ifa_dev->dev); |
1630 | skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); | 1630 | skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); |
1631 | if (skb == NULL) | 1631 | if (!skb) |
1632 | goto errout; | 1632 | goto errout; |
1633 | 1633 | ||
1634 | err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0); | 1634 | err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0); |
@@ -1665,7 +1665,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev) | |||
1665 | return -ENODATA; | 1665 | return -ENODATA; |
1666 | 1666 | ||
1667 | nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4); | 1667 | nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4); |
1668 | if (nla == NULL) | 1668 | if (!nla) |
1669 | return -EMSGSIZE; | 1669 | return -EMSGSIZE; |
1670 | 1670 | ||
1671 | for (i = 0; i < IPV4_DEVCONF_MAX; i++) | 1671 | for (i = 0; i < IPV4_DEVCONF_MAX; i++) |
@@ -1754,7 +1754,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, | |||
1754 | 1754 | ||
1755 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), | 1755 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), |
1756 | flags); | 1756 | flags); |
1757 | if (nlh == NULL) | 1757 | if (!nlh) |
1758 | return -EMSGSIZE; | 1758 | return -EMSGSIZE; |
1759 | 1759 | ||
1760 | ncm = nlmsg_data(nlh); | 1760 | ncm = nlmsg_data(nlh); |
@@ -1796,7 +1796,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, | |||
1796 | int err = -ENOBUFS; | 1796 | int err = -ENOBUFS; |
1797 | 1797 | ||
1798 | skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC); | 1798 | skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC); |
1799 | if (skb == NULL) | 1799 | if (!skb) |
1800 | goto errout; | 1800 | goto errout; |
1801 | 1801 | ||
1802 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, | 1802 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, |
@@ -1853,10 +1853,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb, | |||
1853 | break; | 1853 | break; |
1854 | default: | 1854 | default: |
1855 | dev = __dev_get_by_index(net, ifindex); | 1855 | dev = __dev_get_by_index(net, ifindex); |
1856 | if (dev == NULL) | 1856 | if (!dev) |
1857 | goto errout; | 1857 | goto errout; |
1858 | in_dev = __in_dev_get_rtnl(dev); | 1858 | in_dev = __in_dev_get_rtnl(dev); |
1859 | if (in_dev == NULL) | 1859 | if (!in_dev) |
1860 | goto errout; | 1860 | goto errout; |
1861 | devconf = &in_dev->cnf; | 1861 | devconf = &in_dev->cnf; |
1862 | break; | 1862 | break; |
@@ -1864,7 +1864,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb, | |||
1864 | 1864 | ||
1865 | err = -ENOBUFS; | 1865 | err = -ENOBUFS; |
1866 | skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC); | 1866 | skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC); |
1867 | if (skb == NULL) | 1867 | if (!skb) |
1868 | goto errout; | 1868 | goto errout; |
1869 | 1869 | ||
1870 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, | 1870 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, |
@@ -2215,7 +2215,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf) | |||
2215 | { | 2215 | { |
2216 | struct devinet_sysctl_table *t = cnf->sysctl; | 2216 | struct devinet_sysctl_table *t = cnf->sysctl; |
2217 | 2217 | ||
2218 | if (t == NULL) | 2218 | if (!t) |
2219 | return; | 2219 | return; |
2220 | 2220 | ||
2221 | cnf->sysctl = NULL; | 2221 | cnf->sysctl = NULL; |
@@ -2276,16 +2276,16 @@ static __net_init int devinet_init_net(struct net *net) | |||
2276 | 2276 | ||
2277 | if (!net_eq(net, &init_net)) { | 2277 | if (!net_eq(net, &init_net)) { |
2278 | all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL); | 2278 | all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL); |
2279 | if (all == NULL) | 2279 | if (!all) |
2280 | goto err_alloc_all; | 2280 | goto err_alloc_all; |
2281 | 2281 | ||
2282 | dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL); | 2282 | dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL); |
2283 | if (dflt == NULL) | 2283 | if (!dflt) |
2284 | goto err_alloc_dflt; | 2284 | goto err_alloc_dflt; |
2285 | 2285 | ||
2286 | #ifdef CONFIG_SYSCTL | 2286 | #ifdef CONFIG_SYSCTL |
2287 | tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL); | 2287 | tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL); |
2288 | if (tbl == NULL) | 2288 | if (!tbl) |
2289 | goto err_alloc_ctl; | 2289 | goto err_alloc_ctl; |
2290 | 2290 | ||
2291 | tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; | 2291 | tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; |
@@ -2305,7 +2305,7 @@ static __net_init int devinet_init_net(struct net *net) | |||
2305 | 2305 | ||
2306 | err = -ENOMEM; | 2306 | err = -ENOMEM; |
2307 | forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); | 2307 | forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); |
2308 | if (forw_hdr == NULL) | 2308 | if (!forw_hdr) |
2309 | goto err_reg_ctl; | 2309 | goto err_reg_ctl; |
2310 | net->ipv4.forw_hdr = forw_hdr; | 2310 | net->ipv4.forw_hdr = forw_hdr; |
2311 | #endif | 2311 | #endif |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 60173d4d3a0e..421a80b09b62 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -553,7 +553,7 @@ static int esp_init_authenc(struct xfrm_state *x) | |||
553 | int err; | 553 | int err; |
554 | 554 | ||
555 | err = -EINVAL; | 555 | err = -EINVAL; |
556 | if (x->ealg == NULL) | 556 | if (!x->ealg) |
557 | goto error; | 557 | goto error; |
558 | 558 | ||
559 | err = -ENAMETOOLONG; | 559 | err = -ENAMETOOLONG; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 718b0a16ea40..872494e6e6eb 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -53,11 +53,11 @@ static int __net_init fib4_rules_init(struct net *net) | |||
53 | struct fib_table *local_table, *main_table; | 53 | struct fib_table *local_table, *main_table; |
54 | 54 | ||
55 | main_table = fib_trie_table(RT_TABLE_MAIN, NULL); | 55 | main_table = fib_trie_table(RT_TABLE_MAIN, NULL); |
56 | if (main_table == NULL) | 56 | if (!main_table) |
57 | return -ENOMEM; | 57 | return -ENOMEM; |
58 | 58 | ||
59 | local_table = fib_trie_table(RT_TABLE_LOCAL, main_table); | 59 | local_table = fib_trie_table(RT_TABLE_LOCAL, main_table); |
60 | if (local_table == NULL) | 60 | if (!local_table) |
61 | goto fail; | 61 | goto fail; |
62 | 62 | ||
63 | hlist_add_head_rcu(&local_table->tb_hlist, | 63 | hlist_add_head_rcu(&local_table->tb_hlist, |
@@ -486,7 +486,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, | |||
486 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) | 486 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) |
487 | if (strcmp(ifa->ifa_label, devname) == 0) | 487 | if (strcmp(ifa->ifa_label, devname) == 0) |
488 | break; | 488 | break; |
489 | if (ifa == NULL) | 489 | if (!ifa) |
490 | return -ENODEV; | 490 | return -ENODEV; |
491 | cfg->fc_prefsrc = ifa->ifa_local; | 491 | cfg->fc_prefsrc = ifa->ifa_local; |
492 | } | 492 | } |
@@ -514,7 +514,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, | |||
514 | int len = 0; | 514 | int len = 0; |
515 | 515 | ||
516 | mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); | 516 | mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); |
517 | if (mx == NULL) | 517 | if (!mx) |
518 | return -ENOMEM; | 518 | return -ENOMEM; |
519 | 519 | ||
520 | if (rt->rt_flags & RTF_MTU) | 520 | if (rt->rt_flags & RTF_MTU) |
@@ -676,7 +676,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
676 | goto errout; | 676 | goto errout; |
677 | 677 | ||
678 | tb = fib_get_table(net, cfg.fc_table); | 678 | tb = fib_get_table(net, cfg.fc_table); |
679 | if (tb == NULL) { | 679 | if (!tb) { |
680 | err = -ESRCH; | 680 | err = -ESRCH; |
681 | goto errout; | 681 | goto errout; |
682 | } | 682 | } |
@@ -698,7 +698,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
698 | goto errout; | 698 | goto errout; |
699 | 699 | ||
700 | tb = fib_new_table(net, cfg.fc_table); | 700 | tb = fib_new_table(net, cfg.fc_table); |
701 | if (tb == NULL) { | 701 | if (!tb) { |
702 | err = -ENOBUFS; | 702 | err = -ENOBUFS; |
703 | goto errout; | 703 | goto errout; |
704 | } | 704 | } |
@@ -779,7 +779,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad | |||
779 | else | 779 | else |
780 | tb = fib_new_table(net, RT_TABLE_LOCAL); | 780 | tb = fib_new_table(net, RT_TABLE_LOCAL); |
781 | 781 | ||
782 | if (tb == NULL) | 782 | if (!tb) |
783 | return; | 783 | return; |
784 | 784 | ||
785 | cfg.fc_table = tb->tb_id; | 785 | cfg.fc_table = tb->tb_id; |
@@ -806,7 +806,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) | |||
806 | 806 | ||
807 | if (ifa->ifa_flags & IFA_F_SECONDARY) { | 807 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
808 | prim = inet_ifa_byprefix(in_dev, prefix, mask); | 808 | prim = inet_ifa_byprefix(in_dev, prefix, mask); |
809 | if (prim == NULL) { | 809 | if (!prim) { |
810 | pr_warn("%s: bug: prim == NULL\n", __func__); | 810 | pr_warn("%s: bug: prim == NULL\n", __func__); |
811 | return; | 811 | return; |
812 | } | 812 | } |
@@ -860,7 +860,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) | |||
860 | 860 | ||
861 | if (ifa->ifa_flags & IFA_F_SECONDARY) { | 861 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
862 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); | 862 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); |
863 | if (prim == NULL) { | 863 | if (!prim) { |
864 | pr_warn("%s: bug: prim == NULL\n", __func__); | 864 | pr_warn("%s: bug: prim == NULL\n", __func__); |
865 | return; | 865 | return; |
866 | } | 866 | } |
@@ -1030,7 +1030,7 @@ static void nl_fib_input(struct sk_buff *skb) | |||
1030 | return; | 1030 | return; |
1031 | 1031 | ||
1032 | skb = netlink_skb_clone(skb, GFP_KERNEL); | 1032 | skb = netlink_skb_clone(skb, GFP_KERNEL); |
1033 | if (skb == NULL) | 1033 | if (!skb) |
1034 | return; | 1034 | return; |
1035 | nlh = nlmsg_hdr(skb); | 1035 | nlh = nlmsg_hdr(skb); |
1036 | 1036 | ||
@@ -1051,7 +1051,7 @@ static int __net_init nl_fib_lookup_init(struct net *net) | |||
1051 | }; | 1051 | }; |
1052 | 1052 | ||
1053 | sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg); | 1053 | sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg); |
1054 | if (sk == NULL) | 1054 | if (!sk) |
1055 | return -EAFNOSUPPORT; | 1055 | return -EAFNOSUPPORT; |
1056 | net->ipv4.fibnl = sk; | 1056 | net->ipv4.fibnl = sk; |
1057 | return 0; | 1057 | return 0; |
@@ -1089,7 +1089,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
1089 | case NETDEV_DOWN: | 1089 | case NETDEV_DOWN: |
1090 | fib_del_ifaddr(ifa, NULL); | 1090 | fib_del_ifaddr(ifa, NULL); |
1091 | atomic_inc(&net->ipv4.dev_addr_genid); | 1091 | atomic_inc(&net->ipv4.dev_addr_genid); |
1092 | if (ifa->ifa_dev->ifa_list == NULL) { | 1092 | if (!ifa->ifa_dev->ifa_list) { |
1093 | /* Last address was deleted from this interface. | 1093 | /* Last address was deleted from this interface. |
1094 | * Disable IP. | 1094 | * Disable IP. |
1095 | */ | 1095 | */ |
@@ -1157,7 +1157,7 @@ static int __net_init ip_fib_net_init(struct net *net) | |||
1157 | size = max_t(size_t, size, L1_CACHE_BYTES); | 1157 | size = max_t(size_t, size, L1_CACHE_BYTES); |
1158 | 1158 | ||
1159 | net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL); | 1159 | net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL); |
1160 | if (net->ipv4.fib_table_hash == NULL) | 1160 | if (!net->ipv4.fib_table_hash) |
1161 | return -ENOMEM; | 1161 | return -ENOMEM; |
1162 | 1162 | ||
1163 | err = fib4_rules_init(net); | 1163 | err = fib4_rules_init(net); |
@@ -1175,13 +1175,11 @@ static void ip_fib_net_exit(struct net *net) | |||
1175 | unsigned int i; | 1175 | unsigned int i; |
1176 | 1176 | ||
1177 | rtnl_lock(); | 1177 | rtnl_lock(); |
1178 | |||
1179 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 1178 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
1180 | RCU_INIT_POINTER(net->ipv4.fib_local, NULL); | 1179 | RCU_INIT_POINTER(net->ipv4.fib_local, NULL); |
1181 | RCU_INIT_POINTER(net->ipv4.fib_main, NULL); | 1180 | RCU_INIT_POINTER(net->ipv4.fib_main, NULL); |
1182 | RCU_INIT_POINTER(net->ipv4.fib_default, NULL); | 1181 | RCU_INIT_POINTER(net->ipv4.fib_default, NULL); |
1183 | #endif | 1182 | #endif |
1184 | |||
1185 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { | 1183 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { |
1186 | struct hlist_head *head = &net->ipv4.fib_table_hash[i]; | 1184 | struct hlist_head *head = &net->ipv4.fib_table_hash[i]; |
1187 | struct hlist_node *tmp; | 1185 | struct hlist_node *tmp; |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 8162dd8e86d7..56151982f74e 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -153,7 +153,7 @@ static struct fib_table *fib_empty_table(struct net *net) | |||
153 | u32 id; | 153 | u32 id; |
154 | 154 | ||
155 | for (id = 1; id <= RT_TABLE_MAX; id++) | 155 | for (id = 1; id <= RT_TABLE_MAX; id++) |
156 | if (fib_get_table(net, id) == NULL) | 156 | if (!fib_get_table(net, id)) |
157 | return fib_new_table(net, id); | 157 | return fib_new_table(net, id); |
158 | return NULL; | 158 | return NULL; |
159 | } | 159 | } |
@@ -184,7 +184,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | |||
184 | struct fib_table *table; | 184 | struct fib_table *table; |
185 | 185 | ||
186 | table = fib_empty_table(net); | 186 | table = fib_empty_table(net); |
187 | if (table == NULL) { | 187 | if (!table) { |
188 | err = -ENOBUFS; | 188 | err = -ENOBUFS; |
189 | goto errout; | 189 | goto errout; |
190 | } | 190 | } |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index eac5aec7772a..8d695b6659c7 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -390,7 +390,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, | |||
390 | int err = -ENOBUFS; | 390 | int err = -ENOBUFS; |
391 | 391 | ||
392 | skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); | 392 | skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); |
393 | if (skb == NULL) | 393 | if (!skb) |
394 | goto errout; | 394 | goto errout; |
395 | 395 | ||
396 | err = fib_dump_info(skb, info->portid, seq, event, tb_id, | 396 | err = fib_dump_info(skb, info->portid, seq, event, tb_id, |
@@ -503,7 +503,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) | |||
503 | } | 503 | } |
504 | 504 | ||
505 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 505 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
506 | if (cfg->fc_mp == NULL) | 506 | if (!cfg->fc_mp) |
507 | return 0; | 507 | return 0; |
508 | 508 | ||
509 | rtnh = cfg->fc_mp; | 509 | rtnh = cfg->fc_mp; |
@@ -646,7 +646,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, | |||
646 | rcu_read_lock(); | 646 | rcu_read_lock(); |
647 | err = -ENODEV; | 647 | err = -ENODEV; |
648 | in_dev = inetdev_by_index(net, nh->nh_oif); | 648 | in_dev = inetdev_by_index(net, nh->nh_oif); |
649 | if (in_dev == NULL) | 649 | if (!in_dev) |
650 | goto out; | 650 | goto out; |
651 | err = -ENETDOWN; | 651 | err = -ENETDOWN; |
652 | if (!(in_dev->dev->flags & IFF_UP)) | 652 | if (!(in_dev->dev->flags & IFF_UP)) |
@@ -803,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
803 | } | 803 | } |
804 | 804 | ||
805 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); | 805 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); |
806 | if (fi == NULL) | 806 | if (!fi) |
807 | goto failure; | 807 | goto failure; |
808 | fib_info_cnt++; | 808 | fib_info_cnt++; |
809 | if (cfg->fc_mx) { | 809 | if (cfg->fc_mx) { |
@@ -921,7 +921,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
921 | nh->nh_scope = RT_SCOPE_NOWHERE; | 921 | nh->nh_scope = RT_SCOPE_NOWHERE; |
922 | nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); | 922 | nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); |
923 | err = -ENODEV; | 923 | err = -ENODEV; |
924 | if (nh->nh_dev == NULL) | 924 | if (!nh->nh_dev) |
925 | goto failure; | 925 | goto failure; |
926 | } else { | 926 | } else { |
927 | change_nexthops(fi) { | 927 | change_nexthops(fi) { |
@@ -995,7 +995,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
995 | struct rtmsg *rtm; | 995 | struct rtmsg *rtm; |
996 | 996 | ||
997 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); | 997 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); |
998 | if (nlh == NULL) | 998 | if (!nlh) |
999 | return -EMSGSIZE; | 999 | return -EMSGSIZE; |
1000 | 1000 | ||
1001 | rtm = nlmsg_data(nlh); | 1001 | rtm = nlmsg_data(nlh); |
@@ -1045,12 +1045,12 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
1045 | struct nlattr *mp; | 1045 | struct nlattr *mp; |
1046 | 1046 | ||
1047 | mp = nla_nest_start(skb, RTA_MULTIPATH); | 1047 | mp = nla_nest_start(skb, RTA_MULTIPATH); |
1048 | if (mp == NULL) | 1048 | if (!mp) |
1049 | goto nla_put_failure; | 1049 | goto nla_put_failure; |
1050 | 1050 | ||
1051 | for_nexthops(fi) { | 1051 | for_nexthops(fi) { |
1052 | rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); | 1052 | rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); |
1053 | if (rtnh == NULL) | 1053 | if (!rtnh) |
1054 | goto nla_put_failure; | 1054 | goto nla_put_failure; |
1055 | 1055 | ||
1056 | rtnh->rtnh_flags = nh->nh_flags & 0xFF; | 1056 | rtnh->rtnh_flags = nh->nh_flags & 0xFF; |
@@ -1093,7 +1093,7 @@ int fib_sync_down_addr(struct net *net, __be32 local) | |||
1093 | struct hlist_head *head = &fib_info_laddrhash[hash]; | 1093 | struct hlist_head *head = &fib_info_laddrhash[hash]; |
1094 | struct fib_info *fi; | 1094 | struct fib_info *fi; |
1095 | 1095 | ||
1096 | if (fib_info_laddrhash == NULL || local == 0) | 1096 | if (!fib_info_laddrhash || local == 0) |
1097 | return 0; | 1097 | return 0; |
1098 | 1098 | ||
1099 | hlist_for_each_entry(fi, head, fib_lhash) { | 1099 | hlist_for_each_entry(fi, head, fib_lhash) { |
@@ -1182,7 +1182,7 @@ void fib_select_default(struct fib_result *res) | |||
1182 | 1182 | ||
1183 | fib_alias_accessed(fa); | 1183 | fib_alias_accessed(fa); |
1184 | 1184 | ||
1185 | if (fi == NULL) { | 1185 | if (!fi) { |
1186 | if (next_fi != res->fi) | 1186 | if (next_fi != res->fi) |
1187 | break; | 1187 | break; |
1188 | } else if (!fib_detect_death(fi, order, &last_resort, | 1188 | } else if (!fib_detect_death(fi, order, &last_resort, |
@@ -1195,7 +1195,7 @@ void fib_select_default(struct fib_result *res) | |||
1195 | order++; | 1195 | order++; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | if (order <= 0 || fi == NULL) { | 1198 | if (order <= 0 || !fi) { |
1199 | tb->tb_default = -1; | 1199 | tb->tb_default = -1; |
1200 | goto out; | 1200 | goto out; |
1201 | } | 1201 | } |
@@ -1251,7 +1251,7 @@ int fib_sync_up(struct net_device *dev) | |||
1251 | alive++; | 1251 | alive++; |
1252 | continue; | 1252 | continue; |
1253 | } | 1253 | } |
1254 | if (nexthop_nh->nh_dev == NULL || | 1254 | if (!nexthop_nh->nh_dev || |
1255 | !(nexthop_nh->nh_dev->flags & IFF_UP)) | 1255 | !(nexthop_nh->nh_dev->flags & IFF_UP)) |
1256 | continue; | 1256 | continue; |
1257 | if (nexthop_nh->nh_dev != dev || | 1257 | if (nexthop_nh->nh_dev != dev || |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 2c7c299ee2b9..e13fcc602da2 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -391,9 +391,9 @@ static void put_child(struct key_vector *tn, unsigned long i, | |||
391 | BUG_ON(i >= child_length(tn)); | 391 | BUG_ON(i >= child_length(tn)); |
392 | 392 | ||
393 | /* update emptyChildren, overflow into fullChildren */ | 393 | /* update emptyChildren, overflow into fullChildren */ |
394 | if (n == NULL && chi != NULL) | 394 | if (!n && chi) |
395 | empty_child_inc(tn); | 395 | empty_child_inc(tn); |
396 | if (n != NULL && chi == NULL) | 396 | if (n && !chi) |
397 | empty_child_dec(tn); | 397 | empty_child_dec(tn); |
398 | 398 | ||
399 | /* update fullChildren */ | 399 | /* update fullChildren */ |
@@ -528,7 +528,7 @@ static struct key_vector *inflate(struct trie *t, | |||
528 | unsigned long j, k; | 528 | unsigned long j, k; |
529 | 529 | ||
530 | /* An empty child */ | 530 | /* An empty child */ |
531 | if (inode == NULL) | 531 | if (!inode) |
532 | continue; | 532 | continue; |
533 | 533 | ||
534 | /* A leaf or an internal node with skipped bits */ | 534 | /* A leaf or an internal node with skipped bits */ |
@@ -1154,7 +1154,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1154 | } | 1154 | } |
1155 | err = -ENOBUFS; | 1155 | err = -ENOBUFS; |
1156 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); | 1156 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
1157 | if (new_fa == NULL) | 1157 | if (!new_fa) |
1158 | goto out; | 1158 | goto out; |
1159 | 1159 | ||
1160 | fi_drop = fa->fa_info; | 1160 | fi_drop = fa->fa_info; |
@@ -1204,7 +1204,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1204 | 1204 | ||
1205 | err = -ENOBUFS; | 1205 | err = -ENOBUFS; |
1206 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); | 1206 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
1207 | if (new_fa == NULL) | 1207 | if (!new_fa) |
1208 | goto out; | 1208 | goto out; |
1209 | 1209 | ||
1210 | new_fa->fa_info = fi; | 1210 | new_fa->fa_info = fi; |
@@ -1975,7 +1975,7 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias) | |||
1975 | sz += sizeof(struct trie); | 1975 | sz += sizeof(struct trie); |
1976 | 1976 | ||
1977 | tb = kzalloc(sz, GFP_KERNEL); | 1977 | tb = kzalloc(sz, GFP_KERNEL); |
1978 | if (tb == NULL) | 1978 | if (!tb) |
1979 | return NULL; | 1979 | return NULL; |
1980 | 1980 | ||
1981 | tb->tb_id = id; | 1981 | tb->tb_id = id; |
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 5a4828ba05ad..b77f5e84c623 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c | |||
@@ -136,7 +136,7 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, | |||
136 | 136 | ||
137 | skb_set_inner_protocol(skb, htons(ETH_P_TEB)); | 137 | skb_set_inner_protocol(skb, htons(ETH_P_TEB)); |
138 | 138 | ||
139 | return udp_tunnel_xmit_skb(rt, skb, src, dst, | 139 | return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst, |
140 | tos, ttl, df, src_port, dst_port, xnet, | 140 | tos, ttl, df, src_port, dst_port, xnet, |
141 | !csum); | 141 | !csum); |
142 | } | 142 | } |
@@ -196,7 +196,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, | |||
196 | 196 | ||
197 | rcu_read_lock(); | 197 | rcu_read_lock(); |
198 | ptype = gro_find_receive_by_type(type); | 198 | ptype = gro_find_receive_by_type(type); |
199 | if (ptype == NULL) { | 199 | if (!ptype) { |
200 | flush = 1; | 200 | flush = 1; |
201 | goto out_unlock; | 201 | goto out_unlock; |
202 | } | 202 | } |
@@ -230,7 +230,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff, | |||
230 | 230 | ||
231 | rcu_read_lock(); | 231 | rcu_read_lock(); |
232 | ptype = gro_find_complete_by_type(type); | 232 | ptype = gro_find_complete_by_type(type); |
233 | if (ptype != NULL) | 233 | if (ptype) |
234 | err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); | 234 | err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); |
235 | 235 | ||
236 | rcu_read_unlock(); | 236 | rcu_read_unlock(); |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 51973ddc05a6..5aa46d4b44ef 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -149,7 +149,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, | |||
149 | 149 | ||
150 | rcu_read_lock(); | 150 | rcu_read_lock(); |
151 | ptype = gro_find_receive_by_type(type); | 151 | ptype = gro_find_receive_by_type(type); |
152 | if (ptype == NULL) | 152 | if (!ptype) |
153 | goto out_unlock; | 153 | goto out_unlock; |
154 | 154 | ||
155 | grehlen = GRE_HEADER_SECTION; | 155 | grehlen = GRE_HEADER_SECTION; |
@@ -243,7 +243,7 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) | |||
243 | 243 | ||
244 | rcu_read_lock(); | 244 | rcu_read_lock(); |
245 | ptype = gro_find_complete_by_type(type); | 245 | ptype = gro_find_complete_by_type(type); |
246 | if (ptype != NULL) | 246 | if (ptype) |
247 | err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); | 247 | err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); |
248 | 248 | ||
249 | rcu_read_unlock(); | 249 | rcu_read_unlock(); |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 5e564014a0b7..f5203fba6236 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -399,7 +399,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
399 | return; | 399 | return; |
400 | 400 | ||
401 | sk = icmp_xmit_lock(net); | 401 | sk = icmp_xmit_lock(net); |
402 | if (sk == NULL) | 402 | if (!sk) |
403 | return; | 403 | return; |
404 | inet = inet_sk(sk); | 404 | inet = inet_sk(sk); |
405 | 405 | ||
@@ -609,7 +609,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
609 | skb_in->data, | 609 | skb_in->data, |
610 | sizeof(_inner_type), | 610 | sizeof(_inner_type), |
611 | &_inner_type); | 611 | &_inner_type); |
612 | if (itp == NULL) | 612 | if (!itp) |
613 | goto out; | 613 | goto out; |
614 | 614 | ||
615 | /* | 615 | /* |
@@ -627,7 +627,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
627 | return; | 627 | return; |
628 | 628 | ||
629 | sk = icmp_xmit_lock(net); | 629 | sk = icmp_xmit_lock(net); |
630 | if (sk == NULL) | 630 | if (!sk) |
631 | goto out_free; | 631 | goto out_free; |
632 | 632 | ||
633 | /* | 633 | /* |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index ad09213ac5b2..a3a697f5ffba 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -692,7 +692,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
692 | hlen = LL_RESERVED_SPACE(dev); | 692 | hlen = LL_RESERVED_SPACE(dev); |
693 | tlen = dev->needed_tailroom; | 693 | tlen = dev->needed_tailroom; |
694 | skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); | 694 | skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); |
695 | if (skb == NULL) { | 695 | if (!skb) { |
696 | ip_rt_put(rt); | 696 | ip_rt_put(rt); |
697 | return -1; | 697 | return -1; |
698 | } | 698 | } |
@@ -981,7 +981,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
981 | int len = skb->len; | 981 | int len = skb->len; |
982 | bool dropped = true; | 982 | bool dropped = true; |
983 | 983 | ||
984 | if (in_dev == NULL) | 984 | if (!in_dev) |
985 | goto drop; | 985 | goto drop; |
986 | 986 | ||
987 | if (!pskb_may_pull(skb, sizeof(struct igmphdr))) | 987 | if (!pskb_may_pull(skb, sizeof(struct igmphdr))) |
@@ -1888,7 +1888,7 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) | |||
1888 | if (count >= sysctl_igmp_max_memberships) | 1888 | if (count >= sysctl_igmp_max_memberships) |
1889 | goto done; | 1889 | goto done; |
1890 | iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); | 1890 | iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); |
1891 | if (iml == NULL) | 1891 | if (!iml) |
1892 | goto done; | 1892 | goto done; |
1893 | 1893 | ||
1894 | memcpy(&iml->multi, imr, sizeof(*imr)); | 1894 | memcpy(&iml->multi, imr, sizeof(*imr)); |
@@ -1909,7 +1909,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, | |||
1909 | struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); | 1909 | struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); |
1910 | int err; | 1910 | int err; |
1911 | 1911 | ||
1912 | if (psf == NULL) { | 1912 | if (!psf) { |
1913 | /* any-source empty exclude case */ | 1913 | /* any-source empty exclude case */ |
1914 | return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, | 1914 | return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, |
1915 | iml->sfmode, 0, NULL, 0); | 1915 | iml->sfmode, 0, NULL, 0); |
@@ -2360,7 +2360,7 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2360 | struct ip_mc_socklist *iml; | 2360 | struct ip_mc_socklist *iml; |
2361 | struct net *net = sock_net(sk); | 2361 | struct net *net = sock_net(sk); |
2362 | 2362 | ||
2363 | if (inet->mc_list == NULL) | 2363 | if (!inet->mc_list) |
2364 | return; | 2364 | return; |
2365 | 2365 | ||
2366 | rtnl_lock(); | 2366 | rtnl_lock(); |
@@ -2370,7 +2370,7 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2370 | inet->mc_list = iml->next_rcu; | 2370 | inet->mc_list = iml->next_rcu; |
2371 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); | 2371 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); |
2372 | (void) ip_mc_leave_src(sk, iml, in_dev); | 2372 | (void) ip_mc_leave_src(sk, iml, in_dev); |
2373 | if (in_dev != NULL) | 2373 | if (in_dev) |
2374 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); | 2374 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); |
2375 | /* decrease mem now to avoid the memleak warning */ | 2375 | /* decrease mem now to avoid the memleak warning */ |
2376 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | 2376 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); |
@@ -2587,13 +2587,13 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) | |||
2587 | for_each_netdev_rcu(net, state->dev) { | 2587 | for_each_netdev_rcu(net, state->dev) { |
2588 | struct in_device *idev; | 2588 | struct in_device *idev; |
2589 | idev = __in_dev_get_rcu(state->dev); | 2589 | idev = __in_dev_get_rcu(state->dev); |
2590 | if (unlikely(idev == NULL)) | 2590 | if (unlikely(!idev)) |
2591 | continue; | 2591 | continue; |
2592 | im = rcu_dereference(idev->mc_list); | 2592 | im = rcu_dereference(idev->mc_list); |
2593 | if (likely(im != NULL)) { | 2593 | if (likely(im)) { |
2594 | spin_lock_bh(&im->lock); | 2594 | spin_lock_bh(&im->lock); |
2595 | psf = im->sources; | 2595 | psf = im->sources; |
2596 | if (likely(psf != NULL)) { | 2596 | if (likely(psf)) { |
2597 | state->im = im; | 2597 | state->im = im; |
2598 | state->idev = idev; | 2598 | state->idev = idev; |
2599 | break; | 2599 | break; |
@@ -2663,7 +2663,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) | |||
2663 | __releases(rcu) | 2663 | __releases(rcu) |
2664 | { | 2664 | { |
2665 | struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); | 2665 | struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); |
2666 | if (likely(state->im != NULL)) { | 2666 | if (likely(state->im)) { |
2667 | spin_unlock_bh(&state->im->lock); | 2667 | spin_unlock_bh(&state->im->lock); |
2668 | state->im = NULL; | 2668 | state->im = NULL; |
2669 | } | 2669 | } |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 79c0c9439fdc..5c3dd6267ed3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -673,7 +673,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, | |||
673 | { | 673 | { |
674 | struct sock *newsk = sk_clone_lock(sk, priority); | 674 | struct sock *newsk = sk_clone_lock(sk, priority); |
675 | 675 | ||
676 | if (newsk != NULL) { | 676 | if (newsk) { |
677 | struct inet_connection_sock *newicsk = inet_csk(newsk); | 677 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
678 | 678 | ||
679 | newsk->sk_state = TCP_SYN_RECV; | 679 | newsk->sk_state = TCP_SYN_RECV; |
@@ -843,7 +843,7 @@ void inet_csk_listen_stop(struct sock *sk) | |||
843 | sk_acceptq_removed(sk); | 843 | sk_acceptq_removed(sk); |
844 | reqsk_put(req); | 844 | reqsk_put(req); |
845 | } | 845 | } |
846 | if (queue->fastopenq != NULL) { | 846 | if (queue->fastopenq) { |
847 | /* Free all the reqs queued in rskq_rst_head. */ | 847 | /* Free all the reqs queued in rskq_rst_head. */ |
848 | spin_lock_bh(&queue->fastopenq->lock); | 848 | spin_lock_bh(&queue->fastopenq->lock); |
849 | acc_req = queue->fastopenq->rskq_rst_head; | 849 | acc_req = queue->fastopenq->rskq_rst_head; |
@@ -875,7 +875,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, | |||
875 | { | 875 | { |
876 | const struct inet_connection_sock *icsk = inet_csk(sk); | 876 | const struct inet_connection_sock *icsk = inet_csk(sk); |
877 | 877 | ||
878 | if (icsk->icsk_af_ops->compat_getsockopt != NULL) | 878 | if (icsk->icsk_af_ops->compat_getsockopt) |
879 | return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, | 879 | return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, |
880 | optval, optlen); | 880 | optval, optlen); |
881 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, | 881 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, |
@@ -888,7 +888,7 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, | |||
888 | { | 888 | { |
889 | const struct inet_connection_sock *icsk = inet_csk(sk); | 889 | const struct inet_connection_sock *icsk = inet_csk(sk); |
890 | 890 | ||
891 | if (icsk->icsk_af_ops->compat_setsockopt != NULL) | 891 | if (icsk->icsk_af_ops->compat_setsockopt) |
892 | return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, | 892 | return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, |
893 | optval, optlen); | 893 | optval, optlen); |
894 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | 894 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index e7920352646a..5e346a082e5f 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -385,7 +385,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, | |||
385 | } | 385 | } |
386 | 386 | ||
387 | q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); | 387 | q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); |
388 | if (q == NULL) | 388 | if (!q) |
389 | return NULL; | 389 | return NULL; |
390 | 390 | ||
391 | q->net = nf; | 391 | q->net = nf; |
@@ -406,7 +406,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, | |||
406 | struct inet_frag_queue *q; | 406 | struct inet_frag_queue *q; |
407 | 407 | ||
408 | q = inet_frag_alloc(nf, f, arg); | 408 | q = inet_frag_alloc(nf, f, arg); |
409 | if (q == NULL) | 409 | if (!q) |
410 | return NULL; | 410 | return NULL; |
411 | 411 | ||
412 | return inet_frag_intern(nf, q, f, arg); | 412 | return inet_frag_intern(nf, q, f, arg); |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 0fb841b9d834..d4630bf2d9aa 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -64,7 +64,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, | |||
64 | { | 64 | { |
65 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); | 65 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); |
66 | 66 | ||
67 | if (tb != NULL) { | 67 | if (tb) { |
68 | write_pnet(&tb->ib_net, net); | 68 | write_pnet(&tb->ib_net, net); |
69 | tb->port = snum; | 69 | tb->port = snum; |
70 | tb->fastreuse = 0; | 70 | tb->fastreuse = 0; |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index f38e387448fb..118f0f195820 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -173,7 +173,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat | |||
173 | struct inet_timewait_sock *tw = | 173 | struct inet_timewait_sock *tw = |
174 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, | 174 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, |
175 | GFP_ATOMIC); | 175 | GFP_ATOMIC); |
176 | if (tw != NULL) { | 176 | if (tw) { |
177 | const struct inet_sock *inet = inet_sk(sk); | 177 | const struct inet_sock *inet = inet_sk(sk); |
178 | 178 | ||
179 | kmemcheck_annotate_bitfield(tw, flags); | 179 | kmemcheck_annotate_bitfield(tw, flags); |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index d9bc28ac5d1b..939992c456f3 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -57,7 +57,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | |||
57 | } | 57 | } |
58 | 58 | ||
59 | 59 | ||
60 | static int ip_forward_finish(struct sk_buff *skb) | 60 | static int ip_forward_finish(struct sock *sk, struct sk_buff *skb) |
61 | { | 61 | { |
62 | struct ip_options *opt = &(IPCB(skb)->opt); | 62 | struct ip_options *opt = &(IPCB(skb)->opt); |
63 | 63 | ||
@@ -68,7 +68,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
68 | ip_forward_options(skb); | 68 | ip_forward_options(skb); |
69 | 69 | ||
70 | skb_sender_cpu_clear(skb); | 70 | skb_sender_cpu_clear(skb); |
71 | return dst_output(skb); | 71 | return dst_output_sk(sk, skb); |
72 | } | 72 | } |
73 | 73 | ||
74 | int ip_forward(struct sk_buff *skb) | 74 | int ip_forward(struct sk_buff *skb) |
@@ -136,8 +136,8 @@ int ip_forward(struct sk_buff *skb) | |||
136 | 136 | ||
137 | skb->priority = rt_tos2priority(iph->tos); | 137 | skb->priority = rt_tos2priority(iph->tos); |
138 | 138 | ||
139 | return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, | 139 | return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb, |
140 | rt->dst.dev, ip_forward_finish); | 140 | skb->dev, rt->dst.dev, ip_forward_finish); |
141 | 141 | ||
142 | sr_failed: | 142 | sr_failed: |
143 | /* | 143 | /* |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 145a50c4d566..cc1da6d9cb35 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
372 | goto err; | 372 | goto err; |
373 | 373 | ||
374 | err = -ENOMEM; | 374 | err = -ENOMEM; |
375 | if (pskb_pull(skb, ihl) == NULL) | 375 | if (!pskb_pull(skb, ihl)) |
376 | goto err; | 376 | goto err; |
377 | 377 | ||
378 | err = pskb_trim_rcsum(skb, end - offset); | 378 | err = pskb_trim_rcsum(skb, end - offset); |
@@ -537,7 +537,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
537 | qp->q.fragments = head; | 537 | qp->q.fragments = head; |
538 | } | 538 | } |
539 | 539 | ||
540 | WARN_ON(head == NULL); | 540 | WARN_ON(!head); |
541 | WARN_ON(FRAG_CB(head)->offset != 0); | 541 | WARN_ON(FRAG_CB(head)->offset != 0); |
542 | 542 | ||
543 | /* Allocate a new buffer for the datagram. */ | 543 | /* Allocate a new buffer for the datagram. */ |
@@ -559,7 +559,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
559 | struct sk_buff *clone; | 559 | struct sk_buff *clone; |
560 | int i, plen = 0; | 560 | int i, plen = 0; |
561 | 561 | ||
562 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) | 562 | clone = alloc_skb(0, GFP_ATOMIC); |
563 | if (!clone) | ||
563 | goto out_nomem; | 564 | goto out_nomem; |
564 | clone->next = head->next; | 565 | clone->next = head->next; |
565 | head->next = clone; | 566 | head->next = clone; |
@@ -638,7 +639,8 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
638 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); | 639 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); |
639 | 640 | ||
640 | /* Lookup (or create) queue header */ | 641 | /* Lookup (or create) queue header */ |
641 | if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { | 642 | qp = ip_find(net, ip_hdr(skb), user); |
643 | if (qp) { | ||
642 | int ret; | 644 | int ret; |
643 | 645 | ||
644 | spin_lock(&qp->q.lock); | 646 | spin_lock(&qp->q.lock); |
@@ -754,7 +756,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) | |||
754 | table = ip4_frags_ns_ctl_table; | 756 | table = ip4_frags_ns_ctl_table; |
755 | if (!net_eq(net, &init_net)) { | 757 | if (!net_eq(net, &init_net)) { |
756 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); | 758 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
757 | if (table == NULL) | 759 | if (!table) |
758 | goto err_alloc; | 760 | goto err_alloc; |
759 | 761 | ||
760 | table[0].data = &net->ipv4.frags.high_thresh; | 762 | table[0].data = &net->ipv4.frags.high_thresh; |
@@ -770,7 +772,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) | |||
770 | } | 772 | } |
771 | 773 | ||
772 | hdr = register_net_sysctl(net, "net/ipv4", table); | 774 | hdr = register_net_sysctl(net, "net/ipv4", table); |
773 | if (hdr == NULL) | 775 | if (!hdr) |
774 | goto err_reg; | 776 | goto err_reg; |
775 | 777 | ||
776 | net->ipv4.frags_hdr = hdr; | 778 | net->ipv4.frags_hdr = hdr; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 0eb2a040a830..5fd706473c73 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -182,7 +182,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info, | |||
182 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, | 182 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, |
183 | iph->daddr, iph->saddr, tpi->key); | 183 | iph->daddr, iph->saddr, tpi->key); |
184 | 184 | ||
185 | if (t == NULL) | 185 | if (!t) |
186 | return PACKET_REJECT; | 186 | return PACKET_REJECT; |
187 | 187 | ||
188 | if (t->parms.iph.daddr == 0 || | 188 | if (t->parms.iph.daddr == 0 || |
@@ -423,7 +423,7 @@ static int ipgre_open(struct net_device *dev) | |||
423 | return -EADDRNOTAVAIL; | 423 | return -EADDRNOTAVAIL; |
424 | dev = rt->dst.dev; | 424 | dev = rt->dst.dev; |
425 | ip_rt_put(rt); | 425 | ip_rt_put(rt); |
426 | if (__in_dev_get_rtnl(dev) == NULL) | 426 | if (!__in_dev_get_rtnl(dev)) |
427 | return -EADDRNOTAVAIL; | 427 | return -EADDRNOTAVAIL; |
428 | t->mlink = dev->ifindex; | 428 | t->mlink = dev->ifindex; |
429 | ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); | 429 | ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); |
@@ -456,6 +456,7 @@ static const struct net_device_ops ipgre_netdev_ops = { | |||
456 | .ndo_do_ioctl = ipgre_tunnel_ioctl, | 456 | .ndo_do_ioctl = ipgre_tunnel_ioctl, |
457 | .ndo_change_mtu = ip_tunnel_change_mtu, | 457 | .ndo_change_mtu = ip_tunnel_change_mtu, |
458 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 458 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
459 | .ndo_get_iflink = ip_tunnel_get_iflink, | ||
459 | }; | 460 | }; |
460 | 461 | ||
461 | #define GRE_FEATURES (NETIF_F_SG | \ | 462 | #define GRE_FEATURES (NETIF_F_SG | \ |
@@ -686,6 +687,7 @@ static const struct net_device_ops gre_tap_netdev_ops = { | |||
686 | .ndo_validate_addr = eth_validate_addr, | 687 | .ndo_validate_addr = eth_validate_addr, |
687 | .ndo_change_mtu = ip_tunnel_change_mtu, | 688 | .ndo_change_mtu = ip_tunnel_change_mtu, |
688 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 689 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
690 | .ndo_get_iflink = ip_tunnel_get_iflink, | ||
689 | }; | 691 | }; |
690 | 692 | ||
691 | static void ipgre_tap_setup(struct net_device *dev) | 693 | static void ipgre_tap_setup(struct net_device *dev) |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 3d4da2c16b6a..2db4c8773c1b 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -187,7 +187,7 @@ bool ip_call_ra_chain(struct sk_buff *skb) | |||
187 | return false; | 187 | return false; |
188 | } | 188 | } |
189 | 189 | ||
190 | static int ip_local_deliver_finish(struct sk_buff *skb) | 190 | static int ip_local_deliver_finish(struct sock *sk, struct sk_buff *skb) |
191 | { | 191 | { |
192 | struct net *net = dev_net(skb->dev); | 192 | struct net *net = dev_net(skb->dev); |
193 | 193 | ||
@@ -203,7 +203,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb) | |||
203 | raw = raw_local_deliver(skb, protocol); | 203 | raw = raw_local_deliver(skb, protocol); |
204 | 204 | ||
205 | ipprot = rcu_dereference(inet_protos[protocol]); | 205 | ipprot = rcu_dereference(inet_protos[protocol]); |
206 | if (ipprot != NULL) { | 206 | if (ipprot) { |
207 | int ret; | 207 | int ret; |
208 | 208 | ||
209 | if (!ipprot->no_policy) { | 209 | if (!ipprot->no_policy) { |
@@ -253,7 +253,8 @@ int ip_local_deliver(struct sk_buff *skb) | |||
253 | return 0; | 253 | return 0; |
254 | } | 254 | } |
255 | 255 | ||
256 | return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL, | 256 | return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, NULL, skb, |
257 | skb->dev, NULL, | ||
257 | ip_local_deliver_finish); | 258 | ip_local_deliver_finish); |
258 | } | 259 | } |
259 | 260 | ||
@@ -309,12 +310,12 @@ drop: | |||
309 | int sysctl_ip_early_demux __read_mostly = 1; | 310 | int sysctl_ip_early_demux __read_mostly = 1; |
310 | EXPORT_SYMBOL(sysctl_ip_early_demux); | 311 | EXPORT_SYMBOL(sysctl_ip_early_demux); |
311 | 312 | ||
312 | static int ip_rcv_finish(struct sk_buff *skb) | 313 | static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb) |
313 | { | 314 | { |
314 | const struct iphdr *iph = ip_hdr(skb); | 315 | const struct iphdr *iph = ip_hdr(skb); |
315 | struct rtable *rt; | 316 | struct rtable *rt; |
316 | 317 | ||
317 | if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { | 318 | if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) { |
318 | const struct net_protocol *ipprot; | 319 | const struct net_protocol *ipprot; |
319 | int protocol = iph->protocol; | 320 | int protocol = iph->protocol; |
320 | 321 | ||
@@ -387,7 +388,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
387 | 388 | ||
388 | IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len); | 389 | IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len); |
389 | 390 | ||
390 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { | 391 | skb = skb_share_check(skb, GFP_ATOMIC); |
392 | if (!skb) { | ||
391 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); | 393 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); |
392 | goto out; | 394 | goto out; |
393 | } | 395 | } |
@@ -450,7 +452,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
450 | /* Must drop socket now because of tproxy. */ | 452 | /* Must drop socket now because of tproxy. */ |
451 | skb_orphan(skb); | 453 | skb_orphan(skb); |
452 | 454 | ||
453 | return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL, | 455 | return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb, |
456 | dev, NULL, | ||
454 | ip_rcv_finish); | 457 | ip_rcv_finish); |
455 | 458 | ||
456 | csum_error: | 459 | csum_error: |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 5b3d91be2db0..bd246792360b 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -264,7 +264,7 @@ int ip_options_compile(struct net *net, | |||
264 | unsigned char *iph; | 264 | unsigned char *iph; |
265 | int optlen, l; | 265 | int optlen, l; |
266 | 266 | ||
267 | if (skb != NULL) { | 267 | if (skb) { |
268 | rt = skb_rtable(skb); | 268 | rt = skb_rtable(skb); |
269 | optptr = (unsigned char *)&(ip_hdr(skb)[1]); | 269 | optptr = (unsigned char *)&(ip_hdr(skb)[1]); |
270 | } else | 270 | } else |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 8259e777b249..5da4d15262fd 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -91,14 +91,19 @@ void ip_send_check(struct iphdr *iph) | |||
91 | } | 91 | } |
92 | EXPORT_SYMBOL(ip_send_check); | 92 | EXPORT_SYMBOL(ip_send_check); |
93 | 93 | ||
94 | int __ip_local_out(struct sk_buff *skb) | 94 | int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb) |
95 | { | 95 | { |
96 | struct iphdr *iph = ip_hdr(skb); | 96 | struct iphdr *iph = ip_hdr(skb); |
97 | 97 | ||
98 | iph->tot_len = htons(skb->len); | 98 | iph->tot_len = htons(skb->len); |
99 | ip_send_check(iph); | 99 | ip_send_check(iph); |
100 | return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, | 100 | return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL, |
101 | skb_dst(skb)->dev, dst_output); | 101 | skb_dst(skb)->dev, dst_output_sk); |
102 | } | ||
103 | |||
104 | int __ip_local_out(struct sk_buff *skb) | ||
105 | { | ||
106 | return __ip_local_out_sk(skb->sk, skb); | ||
102 | } | 107 | } |
103 | 108 | ||
104 | int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) | 109 | int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) |
@@ -163,7 +168,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, | |||
163 | } | 168 | } |
164 | EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); | 169 | EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); |
165 | 170 | ||
166 | static inline int ip_finish_output2(struct sk_buff *skb) | 171 | static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb) |
167 | { | 172 | { |
168 | struct dst_entry *dst = skb_dst(skb); | 173 | struct dst_entry *dst = skb_dst(skb); |
169 | struct rtable *rt = (struct rtable *)dst; | 174 | struct rtable *rt = (struct rtable *)dst; |
@@ -182,7 +187,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
182 | struct sk_buff *skb2; | 187 | struct sk_buff *skb2; |
183 | 188 | ||
184 | skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); | 189 | skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); |
185 | if (skb2 == NULL) { | 190 | if (!skb2) { |
186 | kfree_skb(skb); | 191 | kfree_skb(skb); |
187 | return -ENOMEM; | 192 | return -ENOMEM; |
188 | } | 193 | } |
@@ -211,7 +216,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
211 | return -EINVAL; | 216 | return -EINVAL; |
212 | } | 217 | } |
213 | 218 | ||
214 | static int ip_finish_output_gso(struct sk_buff *skb) | 219 | static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) |
215 | { | 220 | { |
216 | netdev_features_t features; | 221 | netdev_features_t features; |
217 | struct sk_buff *segs; | 222 | struct sk_buff *segs; |
@@ -220,7 +225,7 @@ static int ip_finish_output_gso(struct sk_buff *skb) | |||
220 | /* common case: locally created skb or seglen is <= mtu */ | 225 | /* common case: locally created skb or seglen is <= mtu */ |
221 | if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || | 226 | if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || |
222 | skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb)) | 227 | skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb)) |
223 | return ip_finish_output2(skb); | 228 | return ip_finish_output2(sk, skb); |
224 | 229 | ||
225 | /* Slowpath - GSO segment length is exceeding the dst MTU. | 230 | /* Slowpath - GSO segment length is exceeding the dst MTU. |
226 | * | 231 | * |
@@ -243,7 +248,7 @@ static int ip_finish_output_gso(struct sk_buff *skb) | |||
243 | int err; | 248 | int err; |
244 | 249 | ||
245 | segs->next = NULL; | 250 | segs->next = NULL; |
246 | err = ip_fragment(segs, ip_finish_output2); | 251 | err = ip_fragment(sk, segs, ip_finish_output2); |
247 | 252 | ||
248 | if (err && ret == 0) | 253 | if (err && ret == 0) |
249 | ret = err; | 254 | ret = err; |
@@ -253,22 +258,22 @@ static int ip_finish_output_gso(struct sk_buff *skb) | |||
253 | return ret; | 258 | return ret; |
254 | } | 259 | } |
255 | 260 | ||
256 | static int ip_finish_output(struct sk_buff *skb) | 261 | static int ip_finish_output(struct sock *sk, struct sk_buff *skb) |
257 | { | 262 | { |
258 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 263 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
259 | /* Policy lookup after SNAT yielded a new policy */ | 264 | /* Policy lookup after SNAT yielded a new policy */ |
260 | if (skb_dst(skb)->xfrm != NULL) { | 265 | if (skb_dst(skb)->xfrm) { |
261 | IPCB(skb)->flags |= IPSKB_REROUTED; | 266 | IPCB(skb)->flags |= IPSKB_REROUTED; |
262 | return dst_output(skb); | 267 | return dst_output_sk(sk, skb); |
263 | } | 268 | } |
264 | #endif | 269 | #endif |
265 | if (skb_is_gso(skb)) | 270 | if (skb_is_gso(skb)) |
266 | return ip_finish_output_gso(skb); | 271 | return ip_finish_output_gso(sk, skb); |
267 | 272 | ||
268 | if (skb->len > ip_skb_dst_mtu(skb)) | 273 | if (skb->len > ip_skb_dst_mtu(skb)) |
269 | return ip_fragment(skb, ip_finish_output2); | 274 | return ip_fragment(sk, skb, ip_finish_output2); |
270 | 275 | ||
271 | return ip_finish_output2(skb); | 276 | return ip_finish_output2(sk, skb); |
272 | } | 277 | } |
273 | 278 | ||
274 | int ip_mc_output(struct sock *sk, struct sk_buff *skb) | 279 | int ip_mc_output(struct sock *sk, struct sk_buff *skb) |
@@ -307,7 +312,7 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb) | |||
307 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); | 312 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); |
308 | if (newskb) | 313 | if (newskb) |
309 | NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, | 314 | NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, |
310 | newskb, NULL, newskb->dev, | 315 | sk, newskb, NULL, newskb->dev, |
311 | dev_loopback_xmit); | 316 | dev_loopback_xmit); |
312 | } | 317 | } |
313 | 318 | ||
@@ -322,11 +327,11 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb) | |||
322 | if (rt->rt_flags&RTCF_BROADCAST) { | 327 | if (rt->rt_flags&RTCF_BROADCAST) { |
323 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); | 328 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); |
324 | if (newskb) | 329 | if (newskb) |
325 | NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb, | 330 | NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, newskb, |
326 | NULL, newskb->dev, dev_loopback_xmit); | 331 | NULL, newskb->dev, dev_loopback_xmit); |
327 | } | 332 | } |
328 | 333 | ||
329 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, | 334 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, NULL, |
330 | skb->dev, ip_finish_output, | 335 | skb->dev, ip_finish_output, |
331 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 336 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
332 | } | 337 | } |
@@ -340,7 +345,8 @@ int ip_output(struct sock *sk, struct sk_buff *skb) | |||
340 | skb->dev = dev; | 345 | skb->dev = dev; |
341 | skb->protocol = htons(ETH_P_IP); | 346 | skb->protocol = htons(ETH_P_IP); |
342 | 347 | ||
343 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev, | 348 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, |
349 | NULL, dev, | ||
344 | ip_finish_output, | 350 | ip_finish_output, |
345 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 351 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
346 | } | 352 | } |
@@ -376,12 +382,12 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) | |||
376 | inet_opt = rcu_dereference(inet->inet_opt); | 382 | inet_opt = rcu_dereference(inet->inet_opt); |
377 | fl4 = &fl->u.ip4; | 383 | fl4 = &fl->u.ip4; |
378 | rt = skb_rtable(skb); | 384 | rt = skb_rtable(skb); |
379 | if (rt != NULL) | 385 | if (rt) |
380 | goto packet_routed; | 386 | goto packet_routed; |
381 | 387 | ||
382 | /* Make sure we can route this packet. */ | 388 | /* Make sure we can route this packet. */ |
383 | rt = (struct rtable *)__sk_dst_check(sk, 0); | 389 | rt = (struct rtable *)__sk_dst_check(sk, 0); |
384 | if (rt == NULL) { | 390 | if (!rt) { |
385 | __be32 daddr; | 391 | __be32 daddr; |
386 | 392 | ||
387 | /* Use correct destination address if we have options. */ | 393 | /* Use correct destination address if we have options. */ |
@@ -480,7 +486,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
480 | * single device frame, and queue such a frame for sending. | 486 | * single device frame, and queue such a frame for sending. |
481 | */ | 487 | */ |
482 | 488 | ||
483 | int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | 489 | int ip_fragment(struct sock *sk, struct sk_buff *skb, |
490 | int (*output)(struct sock *, struct sk_buff *)) | ||
484 | { | 491 | { |
485 | struct iphdr *iph; | 492 | struct iphdr *iph; |
486 | int ptr; | 493 | int ptr; |
@@ -587,13 +594,13 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
587 | ip_options_fragment(frag); | 594 | ip_options_fragment(frag); |
588 | offset += skb->len - hlen; | 595 | offset += skb->len - hlen; |
589 | iph->frag_off = htons(offset>>3); | 596 | iph->frag_off = htons(offset>>3); |
590 | if (frag->next != NULL) | 597 | if (frag->next) |
591 | iph->frag_off |= htons(IP_MF); | 598 | iph->frag_off |= htons(IP_MF); |
592 | /* Ready, complete checksum */ | 599 | /* Ready, complete checksum */ |
593 | ip_send_check(iph); | 600 | ip_send_check(iph); |
594 | } | 601 | } |
595 | 602 | ||
596 | err = output(skb); | 603 | err = output(sk, skb); |
597 | 604 | ||
598 | if (!err) | 605 | if (!err) |
599 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES); | 606 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES); |
@@ -730,7 +737,7 @@ slow_path: | |||
730 | 737 | ||
731 | ip_send_check(iph); | 738 | ip_send_check(iph); |
732 | 739 | ||
733 | err = output(skb2); | 740 | err = output(sk, skb2); |
734 | if (err) | 741 | if (err) |
735 | goto fail; | 742 | goto fail; |
736 | 743 | ||
@@ -790,12 +797,13 @@ static inline int ip_ufo_append_data(struct sock *sk, | |||
790 | * device, so create one single skb packet containing complete | 797 | * device, so create one single skb packet containing complete |
791 | * udp datagram | 798 | * udp datagram |
792 | */ | 799 | */ |
793 | if ((skb = skb_peek_tail(queue)) == NULL) { | 800 | skb = skb_peek_tail(queue); |
801 | if (!skb) { | ||
794 | skb = sock_alloc_send_skb(sk, | 802 | skb = sock_alloc_send_skb(sk, |
795 | hh_len + fragheaderlen + transhdrlen + 20, | 803 | hh_len + fragheaderlen + transhdrlen + 20, |
796 | (flags & MSG_DONTWAIT), &err); | 804 | (flags & MSG_DONTWAIT), &err); |
797 | 805 | ||
798 | if (skb == NULL) | 806 | if (!skb) |
799 | return err; | 807 | return err; |
800 | 808 | ||
801 | /* reserve space for Hardware header */ | 809 | /* reserve space for Hardware header */ |
@@ -961,10 +969,10 @@ alloc_new_skb: | |||
961 | skb = sock_wmalloc(sk, | 969 | skb = sock_wmalloc(sk, |
962 | alloclen + hh_len + 15, 1, | 970 | alloclen + hh_len + 15, 1, |
963 | sk->sk_allocation); | 971 | sk->sk_allocation); |
964 | if (unlikely(skb == NULL)) | 972 | if (unlikely(!skb)) |
965 | err = -ENOBUFS; | 973 | err = -ENOBUFS; |
966 | } | 974 | } |
967 | if (skb == NULL) | 975 | if (!skb) |
968 | goto error; | 976 | goto error; |
969 | 977 | ||
970 | /* | 978 | /* |
@@ -1088,10 +1096,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, | |||
1088 | */ | 1096 | */ |
1089 | opt = ipc->opt; | 1097 | opt = ipc->opt; |
1090 | if (opt) { | 1098 | if (opt) { |
1091 | if (cork->opt == NULL) { | 1099 | if (!cork->opt) { |
1092 | cork->opt = kmalloc(sizeof(struct ip_options) + 40, | 1100 | cork->opt = kmalloc(sizeof(struct ip_options) + 40, |
1093 | sk->sk_allocation); | 1101 | sk->sk_allocation); |
1094 | if (unlikely(cork->opt == NULL)) | 1102 | if (unlikely(!cork->opt)) |
1095 | return -ENOBUFS; | 1103 | return -ENOBUFS; |
1096 | } | 1104 | } |
1097 | memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); | 1105 | memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); |
@@ -1198,7 +1206,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, | |||
1198 | return -EMSGSIZE; | 1206 | return -EMSGSIZE; |
1199 | } | 1207 | } |
1200 | 1208 | ||
1201 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) | 1209 | skb = skb_peek_tail(&sk->sk_write_queue); |
1210 | if (!skb) | ||
1202 | return -EINVAL; | 1211 | return -EINVAL; |
1203 | 1212 | ||
1204 | cork->length += size; | 1213 | cork->length += size; |
@@ -1329,7 +1338,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk, | |||
1329 | __be16 df = 0; | 1338 | __be16 df = 0; |
1330 | __u8 ttl; | 1339 | __u8 ttl; |
1331 | 1340 | ||
1332 | if ((skb = __skb_dequeue(queue)) == NULL) | 1341 | skb = __skb_dequeue(queue); |
1342 | if (!skb) | ||
1333 | goto out; | 1343 | goto out; |
1334 | tail_skb = &(skb_shinfo(skb)->frag_list); | 1344 | tail_skb = &(skb_shinfo(skb)->frag_list); |
1335 | 1345 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f6a0d54b308a..7cfb0893f263 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -351,7 +351,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
353 | } | 353 | } |
354 | if (new_ra == NULL) { | 354 | if (!new_ra) { |
355 | spin_unlock_bh(&ip_ra_lock); | 355 | spin_unlock_bh(&ip_ra_lock); |
356 | return -ENOBUFS; | 356 | return -ENOBUFS; |
357 | } | 357 | } |
@@ -387,7 +387,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | |||
387 | skb_network_header(skb); | 387 | skb_network_header(skb); |
388 | serr->port = port; | 388 | serr->port = port; |
389 | 389 | ||
390 | if (skb_pull(skb, payload - skb->data) != NULL) { | 390 | if (skb_pull(skb, payload - skb->data)) { |
391 | skb_reset_transport_header(skb); | 391 | skb_reset_transport_header(skb); |
392 | if (sock_queue_err_skb(sk, skb) == 0) | 392 | if (sock_queue_err_skb(sk, skb) == 0) |
393 | return; | 393 | return; |
@@ -482,7 +482,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
482 | 482 | ||
483 | err = -EAGAIN; | 483 | err = -EAGAIN; |
484 | skb = sock_dequeue_err_skb(sk); | 484 | skb = sock_dequeue_err_skb(sk); |
485 | if (skb == NULL) | 485 | if (!skb) |
486 | goto out; | 486 | goto out; |
487 | 487 | ||
488 | copied = skb->len; | 488 | copied = skb->len; |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 2cd08280c77b..4c2c3ba4ba65 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -389,7 +389,6 @@ static int ip_tunnel_bind_dev(struct net_device *dev) | |||
389 | hlen = tdev->hard_header_len + tdev->needed_headroom; | 389 | hlen = tdev->hard_header_len + tdev->needed_headroom; |
390 | mtu = tdev->mtu; | 390 | mtu = tdev->mtu; |
391 | } | 391 | } |
392 | dev->iflink = tunnel->parms.link; | ||
393 | 392 | ||
394 | dev->needed_headroom = t_hlen + hlen; | 393 | dev->needed_headroom = t_hlen + hlen; |
395 | mtu -= (dev->hard_header_len + t_hlen); | 394 | mtu -= (dev->hard_header_len + t_hlen); |
@@ -655,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
655 | if (dst == 0) { | 654 | if (dst == 0) { |
656 | /* NBMA tunnel */ | 655 | /* NBMA tunnel */ |
657 | 656 | ||
658 | if (skb_dst(skb) == NULL) { | 657 | if (!skb_dst(skb)) { |
659 | dev->stats.tx_fifo_errors++; | 658 | dev->stats.tx_fifo_errors++; |
660 | goto tx_error; | 659 | goto tx_error; |
661 | } | 660 | } |
@@ -673,7 +672,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
673 | 672 | ||
674 | neigh = dst_neigh_lookup(skb_dst(skb), | 673 | neigh = dst_neigh_lookup(skb_dst(skb), |
675 | &ipv6_hdr(skb)->daddr); | 674 | &ipv6_hdr(skb)->daddr); |
676 | if (neigh == NULL) | 675 | if (!neigh) |
677 | goto tx_error; | 676 | goto tx_error; |
678 | 677 | ||
679 | addr6 = (const struct in6_addr *)&neigh->primary_key; | 678 | addr6 = (const struct in6_addr *)&neigh->primary_key; |
@@ -783,7 +782,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
783 | return; | 782 | return; |
784 | } | 783 | } |
785 | 784 | ||
786 | err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol, | 785 | err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, |
787 | tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); | 786 | tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); |
788 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 787 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
789 | 788 | ||
@@ -844,7 +843,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) | |||
844 | case SIOCGETTUNNEL: | 843 | case SIOCGETTUNNEL: |
845 | if (dev == itn->fb_tunnel_dev) { | 844 | if (dev == itn->fb_tunnel_dev) { |
846 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); | 845 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); |
847 | if (t == NULL) | 846 | if (!t) |
848 | t = netdev_priv(dev); | 847 | t = netdev_priv(dev); |
849 | } | 848 | } |
850 | memcpy(p, &t->parms, sizeof(*p)); | 849 | memcpy(p, &t->parms, sizeof(*p)); |
@@ -877,7 +876,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) | |||
877 | break; | 876 | break; |
878 | } | 877 | } |
879 | if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { | 878 | if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { |
880 | if (t != NULL) { | 879 | if (t) { |
881 | if (t->dev != dev) { | 880 | if (t->dev != dev) { |
882 | err = -EEXIST; | 881 | err = -EEXIST; |
883 | break; | 882 | break; |
@@ -915,7 +914,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) | |||
915 | if (dev == itn->fb_tunnel_dev) { | 914 | if (dev == itn->fb_tunnel_dev) { |
916 | err = -ENOENT; | 915 | err = -ENOENT; |
917 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); | 916 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); |
918 | if (t == NULL) | 917 | if (!t) |
919 | goto done; | 918 | goto done; |
920 | err = -EPERM; | 919 | err = -EPERM; |
921 | if (t == netdev_priv(itn->fb_tunnel_dev)) | 920 | if (t == netdev_priv(itn->fb_tunnel_dev)) |
@@ -980,6 +979,14 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev) | |||
980 | } | 979 | } |
981 | EXPORT_SYMBOL(ip_tunnel_get_link_net); | 980 | EXPORT_SYMBOL(ip_tunnel_get_link_net); |
982 | 981 | ||
982 | int ip_tunnel_get_iflink(const struct net_device *dev) | ||
983 | { | ||
984 | struct ip_tunnel *tunnel = netdev_priv(dev); | ||
985 | |||
986 | return tunnel->parms.link; | ||
987 | } | ||
988 | EXPORT_SYMBOL(ip_tunnel_get_iflink); | ||
989 | |||
983 | int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, | 990 | int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, |
984 | struct rtnl_link_ops *ops, char *devname) | 991 | struct rtnl_link_ops *ops, char *devname) |
985 | { | 992 | { |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 5a6e27054f0a..9f7269f3c54a 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -60,7 +60,7 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, | |||
60 | 60 | ||
61 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, | 61 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, |
62 | iph->saddr, iph->daddr, 0); | 62 | iph->saddr, iph->daddr, 0); |
63 | if (tunnel != NULL) { | 63 | if (tunnel) { |
64 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 64 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
65 | goto drop; | 65 | goto drop; |
66 | 66 | ||
@@ -341,6 +341,7 @@ static const struct net_device_ops vti_netdev_ops = { | |||
341 | .ndo_do_ioctl = vti_tunnel_ioctl, | 341 | .ndo_do_ioctl = vti_tunnel_ioctl, |
342 | .ndo_change_mtu = ip_tunnel_change_mtu, | 342 | .ndo_change_mtu = ip_tunnel_change_mtu, |
343 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 343 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
344 | .ndo_get_iflink = ip_tunnel_get_iflink, | ||
344 | }; | 345 | }; |
345 | 346 | ||
346 | static void vti_tunnel_setup(struct net_device *dev) | 347 | static void vti_tunnel_setup(struct net_device *dev) |
@@ -361,7 +362,6 @@ static int vti_tunnel_init(struct net_device *dev) | |||
361 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); | 362 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); |
362 | dev->mtu = ETH_DATA_LEN; | 363 | dev->mtu = ETH_DATA_LEN; |
363 | dev->flags = IFF_NOARP; | 364 | dev->flags = IFF_NOARP; |
364 | dev->iflink = 0; | ||
365 | dev->addr_len = 4; | 365 | dev->addr_len = 4; |
366 | dev->features |= NETIF_F_LLTX; | 366 | dev->features |= NETIF_F_LLTX; |
367 | netif_keep_dst(dev); | 367 | netif_keep_dst(dev); |
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index c0855d50a3fa..d97f4f2787f5 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -63,7 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) | |||
63 | struct xfrm_state *t; | 63 | struct xfrm_state *t; |
64 | 64 | ||
65 | t = xfrm_state_alloc(net); | 65 | t = xfrm_state_alloc(net); |
66 | if (t == NULL) | 66 | if (!t) |
67 | goto out; | 67 | goto out; |
68 | 68 | ||
69 | t->id.proto = IPPROTO_IPIP; | 69 | t->id.proto = IPPROTO_IPIP; |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index b26376ef87f6..8e7328c6a390 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -504,7 +504,8 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
504 | if (!net_eq(dev_net(dev), &init_net)) | 504 | if (!net_eq(dev_net(dev), &init_net)) |
505 | goto drop; | 505 | goto drop; |
506 | 506 | ||
507 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) | 507 | skb = skb_share_check(skb, GFP_ATOMIC); |
508 | if (!skb) | ||
508 | return NET_RX_DROP; | 509 | return NET_RX_DROP; |
509 | 510 | ||
510 | if (!pskb_may_pull(skb, sizeof(struct arphdr))) | 511 | if (!pskb_may_pull(skb, sizeof(struct arphdr))) |
@@ -958,7 +959,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
958 | if (skb->pkt_type == PACKET_OTHERHOST) | 959 | if (skb->pkt_type == PACKET_OTHERHOST) |
959 | goto drop; | 960 | goto drop; |
960 | 961 | ||
961 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) | 962 | skb = skb_share_check(skb, GFP_ATOMIC); |
963 | if (!skb) | ||
962 | return NET_RX_DROP; | 964 | return NET_RX_DROP; |
963 | 965 | ||
964 | if (!pskb_may_pull(skb, | 966 | if (!pskb_may_pull(skb, |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index bfbcc85c02ee..ff96396ebec5 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -144,7 +144,7 @@ static int ipip_err(struct sk_buff *skb, u32 info) | |||
144 | err = -ENOENT; | 144 | err = -ENOENT; |
145 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, | 145 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, |
146 | iph->daddr, iph->saddr, 0); | 146 | iph->daddr, iph->saddr, 0); |
147 | if (t == NULL) | 147 | if (!t) |
148 | goto out; | 148 | goto out; |
149 | 149 | ||
150 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { | 150 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { |
@@ -272,6 +272,7 @@ static const struct net_device_ops ipip_netdev_ops = { | |||
272 | .ndo_do_ioctl = ipip_tunnel_ioctl, | 272 | .ndo_do_ioctl = ipip_tunnel_ioctl, |
273 | .ndo_change_mtu = ip_tunnel_change_mtu, | 273 | .ndo_change_mtu = ip_tunnel_change_mtu, |
274 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 274 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
275 | .ndo_get_iflink = ip_tunnel_get_iflink, | ||
275 | }; | 276 | }; |
276 | 277 | ||
277 | #define IPIP_FEATURES (NETIF_F_SG | \ | 278 | #define IPIP_FEATURES (NETIF_F_SG | \ |
@@ -286,7 +287,6 @@ static void ipip_tunnel_setup(struct net_device *dev) | |||
286 | 287 | ||
287 | dev->type = ARPHRD_TUNNEL; | 288 | dev->type = ARPHRD_TUNNEL; |
288 | dev->flags = IFF_NOARP; | 289 | dev->flags = IFF_NOARP; |
289 | dev->iflink = 0; | ||
290 | dev->addr_len = 4; | 290 | dev->addr_len = 4; |
291 | dev->features |= NETIF_F_LLTX; | 291 | dev->features |= NETIF_F_LLTX; |
292 | netif_keep_dst(dev); | 292 | netif_keep_dst(dev); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b4a545d24adb..3a2c0162c3ba 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -189,7 +189,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
189 | } | 189 | } |
190 | 190 | ||
191 | mrt = ipmr_get_table(rule->fr_net, rule->table); | 191 | mrt = ipmr_get_table(rule->fr_net, rule->table); |
192 | if (mrt == NULL) | 192 | if (!mrt) |
193 | return -EAGAIN; | 193 | return -EAGAIN; |
194 | res->mrt = mrt; | 194 | res->mrt = mrt; |
195 | return 0; | 195 | return 0; |
@@ -253,7 +253,7 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
253 | INIT_LIST_HEAD(&net->ipv4.mr_tables); | 253 | INIT_LIST_HEAD(&net->ipv4.mr_tables); |
254 | 254 | ||
255 | mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); | 255 | mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); |
256 | if (mrt == NULL) { | 256 | if (!mrt) { |
257 | err = -ENOMEM; | 257 | err = -ENOMEM; |
258 | goto err1; | 258 | goto err1; |
259 | } | 259 | } |
@@ -266,7 +266,7 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
266 | return 0; | 266 | return 0; |
267 | 267 | ||
268 | err2: | 268 | err2: |
269 | kfree(mrt); | 269 | ipmr_free_table(mrt); |
270 | err1: | 270 | err1: |
271 | fib_rules_unregister(ops); | 271 | fib_rules_unregister(ops); |
272 | return err; | 272 | return err; |
@@ -276,11 +276,13 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
276 | { | 276 | { |
277 | struct mr_table *mrt, *next; | 277 | struct mr_table *mrt, *next; |
278 | 278 | ||
279 | rtnl_lock(); | ||
279 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { | 280 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
280 | list_del(&mrt->list); | 281 | list_del(&mrt->list); |
281 | ipmr_free_table(mrt); | 282 | ipmr_free_table(mrt); |
282 | } | 283 | } |
283 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 284 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
285 | rtnl_unlock(); | ||
284 | } | 286 | } |
285 | #else | 287 | #else |
286 | #define ipmr_for_each_table(mrt, net) \ | 288 | #define ipmr_for_each_table(mrt, net) \ |
@@ -306,7 +308,10 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
306 | 308 | ||
307 | static void __net_exit ipmr_rules_exit(struct net *net) | 309 | static void __net_exit ipmr_rules_exit(struct net *net) |
308 | { | 310 | { |
311 | rtnl_lock(); | ||
309 | ipmr_free_table(net->ipv4.mrt); | 312 | ipmr_free_table(net->ipv4.mrt); |
313 | net->ipv4.mrt = NULL; | ||
314 | rtnl_unlock(); | ||
310 | } | 315 | } |
311 | #endif | 316 | #endif |
312 | 317 | ||
@@ -316,11 +321,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) | |||
316 | unsigned int i; | 321 | unsigned int i; |
317 | 322 | ||
318 | mrt = ipmr_get_table(net, id); | 323 | mrt = ipmr_get_table(net, id); |
319 | if (mrt != NULL) | 324 | if (mrt) |
320 | return mrt; | 325 | return mrt; |
321 | 326 | ||
322 | mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); | 327 | mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); |
323 | if (mrt == NULL) | 328 | if (!mrt) |
324 | return NULL; | 329 | return NULL; |
325 | write_pnet(&mrt->net, net); | 330 | write_pnet(&mrt->net, net); |
326 | mrt->id = id; | 331 | mrt->id = id; |
@@ -422,7 +427,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) | |||
422 | dev->flags |= IFF_MULTICAST; | 427 | dev->flags |= IFF_MULTICAST; |
423 | 428 | ||
424 | in_dev = __in_dev_get_rtnl(dev); | 429 | in_dev = __in_dev_get_rtnl(dev); |
425 | if (in_dev == NULL) | 430 | if (!in_dev) |
426 | goto failure; | 431 | goto failure; |
427 | 432 | ||
428 | ipv4_devconf_setall(in_dev); | 433 | ipv4_devconf_setall(in_dev); |
@@ -473,8 +478,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | |||
473 | return NETDEV_TX_OK; | 478 | return NETDEV_TX_OK; |
474 | } | 479 | } |
475 | 480 | ||
481 | static int reg_vif_get_iflink(const struct net_device *dev) | ||
482 | { | ||
483 | return 0; | ||
484 | } | ||
485 | |||
476 | static const struct net_device_ops reg_vif_netdev_ops = { | 486 | static const struct net_device_ops reg_vif_netdev_ops = { |
477 | .ndo_start_xmit = reg_vif_xmit, | 487 | .ndo_start_xmit = reg_vif_xmit, |
488 | .ndo_get_iflink = reg_vif_get_iflink, | ||
478 | }; | 489 | }; |
479 | 490 | ||
480 | static void reg_vif_setup(struct net_device *dev) | 491 | static void reg_vif_setup(struct net_device *dev) |
@@ -500,7 +511,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) | |||
500 | 511 | ||
501 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); | 512 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); |
502 | 513 | ||
503 | if (dev == NULL) | 514 | if (!dev) |
504 | return NULL; | 515 | return NULL; |
505 | 516 | ||
506 | dev_net_set(dev, net); | 517 | dev_net_set(dev, net); |
@@ -509,7 +520,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) | |||
509 | free_netdev(dev); | 520 | free_netdev(dev); |
510 | return NULL; | 521 | return NULL; |
511 | } | 522 | } |
512 | dev->iflink = 0; | ||
513 | 523 | ||
514 | rcu_read_lock(); | 524 | rcu_read_lock(); |
515 | in_dev = __in_dev_get_rcu(dev); | 525 | in_dev = __in_dev_get_rcu(dev); |
@@ -757,7 +767,7 @@ static int vif_add(struct net *net, struct mr_table *mrt, | |||
757 | case 0: | 767 | case 0: |
758 | if (vifc->vifc_flags == VIFF_USE_IFINDEX) { | 768 | if (vifc->vifc_flags == VIFF_USE_IFINDEX) { |
759 | dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); | 769 | dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); |
760 | if (dev && __in_dev_get_rtnl(dev) == NULL) { | 770 | if (dev && !__in_dev_get_rtnl(dev)) { |
761 | dev_put(dev); | 771 | dev_put(dev); |
762 | return -EADDRNOTAVAIL; | 772 | return -EADDRNOTAVAIL; |
763 | } | 773 | } |
@@ -801,7 +811,7 @@ static int vif_add(struct net *net, struct mr_table *mrt, | |||
801 | v->pkt_out = 0; | 811 | v->pkt_out = 0; |
802 | v->link = dev->ifindex; | 812 | v->link = dev->ifindex; |
803 | if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) | 813 | if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) |
804 | v->link = dev->iflink; | 814 | v->link = dev_get_iflink(dev); |
805 | 815 | ||
806 | /* And finish update writing critical data */ | 816 | /* And finish update writing critical data */ |
807 | write_lock_bh(&mrt_lock); | 817 | write_lock_bh(&mrt_lock); |
@@ -1003,7 +1013,7 @@ static int ipmr_cache_report(struct mr_table *mrt, | |||
1003 | 1013 | ||
1004 | rcu_read_lock(); | 1014 | rcu_read_lock(); |
1005 | mroute_sk = rcu_dereference(mrt->mroute_sk); | 1015 | mroute_sk = rcu_dereference(mrt->mroute_sk); |
1006 | if (mroute_sk == NULL) { | 1016 | if (!mroute_sk) { |
1007 | rcu_read_unlock(); | 1017 | rcu_read_unlock(); |
1008 | kfree_skb(skb); | 1018 | kfree_skb(skb); |
1009 | return -EINVAL; | 1019 | return -EINVAL; |
@@ -1156,7 +1166,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, | |||
1156 | return -EINVAL; | 1166 | return -EINVAL; |
1157 | 1167 | ||
1158 | c = ipmr_cache_alloc(); | 1168 | c = ipmr_cache_alloc(); |
1159 | if (c == NULL) | 1169 | if (!c) |
1160 | return -ENOMEM; | 1170 | return -ENOMEM; |
1161 | 1171 | ||
1162 | c->mfc_origin = mfc->mfcc_origin.s_addr; | 1172 | c->mfc_origin = mfc->mfcc_origin.s_addr; |
@@ -1278,7 +1288,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1278 | return -EOPNOTSUPP; | 1288 | return -EOPNOTSUPP; |
1279 | 1289 | ||
1280 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1290 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1281 | if (mrt == NULL) | 1291 | if (!mrt) |
1282 | return -ENOENT; | 1292 | return -ENOENT; |
1283 | 1293 | ||
1284 | if (optname != MRT_INIT) { | 1294 | if (optname != MRT_INIT) { |
@@ -1441,7 +1451,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1441 | return -EOPNOTSUPP; | 1451 | return -EOPNOTSUPP; |
1442 | 1452 | ||
1443 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1453 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1444 | if (mrt == NULL) | 1454 | if (!mrt) |
1445 | return -ENOENT; | 1455 | return -ENOENT; |
1446 | 1456 | ||
1447 | if (optname != MRT_VERSION && | 1457 | if (optname != MRT_VERSION && |
@@ -1487,7 +1497,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1487 | struct mr_table *mrt; | 1497 | struct mr_table *mrt; |
1488 | 1498 | ||
1489 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1499 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1490 | if (mrt == NULL) | 1500 | if (!mrt) |
1491 | return -ENOENT; | 1501 | return -ENOENT; |
1492 | 1502 | ||
1493 | switch (cmd) { | 1503 | switch (cmd) { |
@@ -1561,7 +1571,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) | |||
1561 | struct mr_table *mrt; | 1571 | struct mr_table *mrt; |
1562 | 1572 | ||
1563 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1573 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1564 | if (mrt == NULL) | 1574 | if (!mrt) |
1565 | return -ENOENT; | 1575 | return -ENOENT; |
1566 | 1576 | ||
1567 | switch (cmd) { | 1577 | switch (cmd) { |
@@ -1669,7 +1679,7 @@ static void ip_encap(struct net *net, struct sk_buff *skb, | |||
1669 | nf_reset(skb); | 1679 | nf_reset(skb); |
1670 | } | 1680 | } |
1671 | 1681 | ||
1672 | static inline int ipmr_forward_finish(struct sk_buff *skb) | 1682 | static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb) |
1673 | { | 1683 | { |
1674 | struct ip_options *opt = &(IPCB(skb)->opt); | 1684 | struct ip_options *opt = &(IPCB(skb)->opt); |
1675 | 1685 | ||
@@ -1679,7 +1689,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1679 | if (unlikely(opt->optlen)) | 1689 | if (unlikely(opt->optlen)) |
1680 | ip_forward_options(skb); | 1690 | ip_forward_options(skb); |
1681 | 1691 | ||
1682 | return dst_output(skb); | 1692 | return dst_output_sk(sk, skb); |
1683 | } | 1693 | } |
1684 | 1694 | ||
1685 | /* | 1695 | /* |
@@ -1696,7 +1706,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1696 | struct flowi4 fl4; | 1706 | struct flowi4 fl4; |
1697 | int encap = 0; | 1707 | int encap = 0; |
1698 | 1708 | ||
1699 | if (vif->dev == NULL) | 1709 | if (!vif->dev) |
1700 | goto out_free; | 1710 | goto out_free; |
1701 | 1711 | ||
1702 | #ifdef CONFIG_IP_PIMSM | 1712 | #ifdef CONFIG_IP_PIMSM |
@@ -1778,7 +1788,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1778 | * not mrouter) cannot join to more than one interface - it will | 1788 | * not mrouter) cannot join to more than one interface - it will |
1779 | * result in receiving multiple packets. | 1789 | * result in receiving multiple packets. |
1780 | */ | 1790 | */ |
1781 | NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, | 1791 | NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb, |
1792 | skb->dev, dev, | ||
1782 | ipmr_forward_finish); | 1793 | ipmr_forward_finish); |
1783 | return; | 1794 | return; |
1784 | 1795 | ||
@@ -1987,7 +1998,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1987 | 1998 | ||
1988 | /* already under rcu_read_lock() */ | 1999 | /* already under rcu_read_lock() */ |
1989 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | 2000 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); |
1990 | if (cache == NULL) { | 2001 | if (!cache) { |
1991 | int vif = ipmr_find_vif(mrt, skb->dev); | 2002 | int vif = ipmr_find_vif(mrt, skb->dev); |
1992 | 2003 | ||
1993 | if (vif >= 0) | 2004 | if (vif >= 0) |
@@ -1998,13 +2009,13 @@ int ip_mr_input(struct sk_buff *skb) | |||
1998 | /* | 2009 | /* |
1999 | * No usable cache entry | 2010 | * No usable cache entry |
2000 | */ | 2011 | */ |
2001 | if (cache == NULL) { | 2012 | if (!cache) { |
2002 | int vif; | 2013 | int vif; |
2003 | 2014 | ||
2004 | if (local) { | 2015 | if (local) { |
2005 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 2016 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
2006 | ip_local_deliver(skb); | 2017 | ip_local_deliver(skb); |
2007 | if (skb2 == NULL) | 2018 | if (!skb2) |
2008 | return -ENOBUFS; | 2019 | return -ENOBUFS; |
2009 | skb = skb2; | 2020 | skb = skb2; |
2010 | } | 2021 | } |
@@ -2063,7 +2074,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, | |||
2063 | reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; | 2074 | reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; |
2064 | read_unlock(&mrt_lock); | 2075 | read_unlock(&mrt_lock); |
2065 | 2076 | ||
2066 | if (reg_dev == NULL) | 2077 | if (!reg_dev) |
2067 | return 1; | 2078 | return 1; |
2068 | 2079 | ||
2069 | skb->mac_header = skb->network_header; | 2080 | skb->mac_header = skb->network_header; |
@@ -2193,18 +2204,18 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, | |||
2193 | int err; | 2204 | int err; |
2194 | 2205 | ||
2195 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | 2206 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); |
2196 | if (mrt == NULL) | 2207 | if (!mrt) |
2197 | return -ENOENT; | 2208 | return -ENOENT; |
2198 | 2209 | ||
2199 | rcu_read_lock(); | 2210 | rcu_read_lock(); |
2200 | cache = ipmr_cache_find(mrt, saddr, daddr); | 2211 | cache = ipmr_cache_find(mrt, saddr, daddr); |
2201 | if (cache == NULL && skb->dev) { | 2212 | if (!cache && skb->dev) { |
2202 | int vif = ipmr_find_vif(mrt, skb->dev); | 2213 | int vif = ipmr_find_vif(mrt, skb->dev); |
2203 | 2214 | ||
2204 | if (vif >= 0) | 2215 | if (vif >= 0) |
2205 | cache = ipmr_cache_find_any(mrt, daddr, vif); | 2216 | cache = ipmr_cache_find_any(mrt, daddr, vif); |
2206 | } | 2217 | } |
2207 | if (cache == NULL) { | 2218 | if (!cache) { |
2208 | struct sk_buff *skb2; | 2219 | struct sk_buff *skb2; |
2209 | struct iphdr *iph; | 2220 | struct iphdr *iph; |
2210 | struct net_device *dev; | 2221 | struct net_device *dev; |
@@ -2262,7 +2273,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | |||
2262 | int err; | 2273 | int err; |
2263 | 2274 | ||
2264 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); | 2275 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); |
2265 | if (nlh == NULL) | 2276 | if (!nlh) |
2266 | return -EMSGSIZE; | 2277 | return -EMSGSIZE; |
2267 | 2278 | ||
2268 | rtm = nlmsg_data(nlh); | 2279 | rtm = nlmsg_data(nlh); |
@@ -2327,7 +2338,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, | |||
2327 | 2338 | ||
2328 | skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif), | 2339 | skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif), |
2329 | GFP_ATOMIC); | 2340 | GFP_ATOMIC); |
2330 | if (skb == NULL) | 2341 | if (!skb) |
2331 | goto errout; | 2342 | goto errout; |
2332 | 2343 | ||
2333 | err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); | 2344 | err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); |
@@ -2442,7 +2453,7 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) | |||
2442 | struct mr_table *mrt; | 2453 | struct mr_table *mrt; |
2443 | 2454 | ||
2444 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | 2455 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); |
2445 | if (mrt == NULL) | 2456 | if (!mrt) |
2446 | return ERR_PTR(-ENOENT); | 2457 | return ERR_PTR(-ENOENT); |
2447 | 2458 | ||
2448 | iter->mrt = mrt; | 2459 | iter->mrt = mrt; |
@@ -2561,7 +2572,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | |||
2561 | struct mr_table *mrt; | 2572 | struct mr_table *mrt; |
2562 | 2573 | ||
2563 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | 2574 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); |
2564 | if (mrt == NULL) | 2575 | if (!mrt) |
2565 | return ERR_PTR(-ENOENT); | 2576 | return ERR_PTR(-ENOENT); |
2566 | 2577 | ||
2567 | it->mrt = mrt; | 2578 | it->mrt = mrt; |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 7ebd6e37875c..65de0684e22a 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -94,7 +94,7 @@ static void nf_ip_saveroute(const struct sk_buff *skb, | |||
94 | { | 94 | { |
95 | struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); | 95 | struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); |
96 | 96 | ||
97 | if (entry->hook == NF_INET_LOCAL_OUT) { | 97 | if (entry->state.hook == NF_INET_LOCAL_OUT) { |
98 | const struct iphdr *iph = ip_hdr(skb); | 98 | const struct iphdr *iph = ip_hdr(skb); |
99 | 99 | ||
100 | rt_info->tos = iph->tos; | 100 | rt_info->tos = iph->tos; |
@@ -109,7 +109,7 @@ static int nf_ip_reroute(struct sk_buff *skb, | |||
109 | { | 109 | { |
110 | const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); | 110 | const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); |
111 | 111 | ||
112 | if (entry->hook == NF_INET_LOCAL_OUT) { | 112 | if (entry->state.hook == NF_INET_LOCAL_OUT) { |
113 | const struct iphdr *iph = ip_hdr(skb); | 113 | const struct iphdr *iph = ip_hdr(skb); |
114 | 114 | ||
115 | if (!(iph->tos == rt_info->tos && | 115 | if (!(iph->tos == rt_info->tos && |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index f95b6f93814b..13bfe84bf3ca 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -248,8 +248,7 @@ struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) | |||
248 | 248 | ||
249 | unsigned int arpt_do_table(struct sk_buff *skb, | 249 | unsigned int arpt_do_table(struct sk_buff *skb, |
250 | unsigned int hook, | 250 | unsigned int hook, |
251 | const struct net_device *in, | 251 | const struct nf_hook_state *state, |
252 | const struct net_device *out, | ||
253 | struct xt_table *table) | 252 | struct xt_table *table) |
254 | { | 253 | { |
255 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 254 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
@@ -265,8 +264,8 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
265 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) | 264 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) |
266 | return NF_DROP; | 265 | return NF_DROP; |
267 | 266 | ||
268 | indev = in ? in->name : nulldevname; | 267 | indev = state->in ? state->in->name : nulldevname; |
269 | outdev = out ? out->name : nulldevname; | 268 | outdev = state->out ? state->out->name : nulldevname; |
270 | 269 | ||
271 | local_bh_disable(); | 270 | local_bh_disable(); |
272 | addend = xt_write_recseq_begin(); | 271 | addend = xt_write_recseq_begin(); |
@@ -281,8 +280,8 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
281 | e = get_entry(table_base, private->hook_entry[hook]); | 280 | e = get_entry(table_base, private->hook_entry[hook]); |
282 | back = get_entry(table_base, private->underflow[hook]); | 281 | back = get_entry(table_base, private->underflow[hook]); |
283 | 282 | ||
284 | acpar.in = in; | 283 | acpar.in = state->in; |
285 | acpar.out = out; | 284 | acpar.out = state->out; |
286 | acpar.hooknum = hook; | 285 | acpar.hooknum = hook; |
287 | acpar.family = NFPROTO_ARP; | 286 | acpar.family = NFPROTO_ARP; |
288 | acpar.hotdrop = false; | 287 | acpar.hotdrop = false; |
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index 802ddecb30b8..93876d03120c 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c | |||
@@ -28,12 +28,11 @@ static const struct xt_table packet_filter = { | |||
28 | /* The work comes in here from netfilter.c */ | 28 | /* The work comes in here from netfilter.c */ |
29 | static unsigned int | 29 | static unsigned int |
30 | arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 30 | arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
31 | const struct net_device *in, const struct net_device *out, | 31 | const struct nf_hook_state *state) |
32 | int (*okfn)(struct sk_buff *)) | ||
33 | { | 32 | { |
34 | const struct net *net = dev_net((in != NULL) ? in : out); | 33 | const struct net *net = dev_net(state->in ? state->in : state->out); |
35 | 34 | ||
36 | return arpt_do_table(skb, ops->hooknum, in, out, | 35 | return arpt_do_table(skb, ops->hooknum, state, |
37 | net->ipv4.arptable_filter); | 36 | net->ipv4.arptable_filter); |
38 | } | 37 | } |
39 | 38 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index cf5e82f39d3b..c69db7fa25ee 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -288,8 +288,7 @@ struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) | |||
288 | unsigned int | 288 | unsigned int |
289 | ipt_do_table(struct sk_buff *skb, | 289 | ipt_do_table(struct sk_buff *skb, |
290 | unsigned int hook, | 290 | unsigned int hook, |
291 | const struct net_device *in, | 291 | const struct nf_hook_state *state, |
292 | const struct net_device *out, | ||
293 | struct xt_table *table) | 292 | struct xt_table *table) |
294 | { | 293 | { |
295 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 294 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
@@ -306,8 +305,8 @@ ipt_do_table(struct sk_buff *skb, | |||
306 | 305 | ||
307 | /* Initialization */ | 306 | /* Initialization */ |
308 | ip = ip_hdr(skb); | 307 | ip = ip_hdr(skb); |
309 | indev = in ? in->name : nulldevname; | 308 | indev = state->in ? state->in->name : nulldevname; |
310 | outdev = out ? out->name : nulldevname; | 309 | outdev = state->out ? state->out->name : nulldevname; |
311 | /* We handle fragments by dealing with the first fragment as | 310 | /* We handle fragments by dealing with the first fragment as |
312 | * if it was a normal packet. All other fragments are treated | 311 | * if it was a normal packet. All other fragments are treated |
313 | * normally, except that they will NEVER match rules that ask | 312 | * normally, except that they will NEVER match rules that ask |
@@ -317,8 +316,8 @@ ipt_do_table(struct sk_buff *skb, | |||
317 | acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; | 316 | acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; |
318 | acpar.thoff = ip_hdrlen(skb); | 317 | acpar.thoff = ip_hdrlen(skb); |
319 | acpar.hotdrop = false; | 318 | acpar.hotdrop = false; |
320 | acpar.in = in; | 319 | acpar.in = state->in; |
321 | acpar.out = out; | 320 | acpar.out = state->out; |
322 | acpar.family = NFPROTO_IPV4; | 321 | acpar.family = NFPROTO_IPV4; |
323 | acpar.hooknum = hook; | 322 | acpar.hooknum = hook; |
324 | 323 | ||
@@ -370,7 +369,7 @@ ipt_do_table(struct sk_buff *skb, | |||
370 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | 369 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
371 | /* The packet is traced: log it */ | 370 | /* The packet is traced: log it */ |
372 | if (unlikely(skb->nf_trace)) | 371 | if (unlikely(skb->nf_trace)) |
373 | trace_packet(skb, hook, in, out, | 372 | trace_packet(skb, hook, state->in, state->out, |
374 | table->name, private, e); | 373 | table->name, private, e); |
375 | #endif | 374 | #endif |
376 | /* Standard target? */ | 375 | /* Standard target? */ |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index f75e9df5e017..771ab3d01ad3 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -504,14 +504,12 @@ static void arp_print(struct arp_payload *payload) | |||
504 | static unsigned int | 504 | static unsigned int |
505 | arp_mangle(const struct nf_hook_ops *ops, | 505 | arp_mangle(const struct nf_hook_ops *ops, |
506 | struct sk_buff *skb, | 506 | struct sk_buff *skb, |
507 | const struct net_device *in, | 507 | const struct nf_hook_state *state) |
508 | const struct net_device *out, | ||
509 | int (*okfn)(struct sk_buff *)) | ||
510 | { | 508 | { |
511 | struct arphdr *arp = arp_hdr(skb); | 509 | struct arphdr *arp = arp_hdr(skb); |
512 | struct arp_payload *payload; | 510 | struct arp_payload *payload; |
513 | struct clusterip_config *c; | 511 | struct clusterip_config *c; |
514 | struct net *net = dev_net(in ? in : out); | 512 | struct net *net = dev_net(state->in ? state->in : state->out); |
515 | 513 | ||
516 | /* we don't care about non-ethernet and non-ipv4 ARP */ | 514 | /* we don't care about non-ethernet and non-ipv4 ARP */ |
517 | if (arp->ar_hrd != htons(ARPHRD_ETHER) || | 515 | if (arp->ar_hrd != htons(ARPHRD_ETHER) || |
@@ -536,10 +534,10 @@ arp_mangle(const struct nf_hook_ops *ops, | |||
536 | * addresses on different interfacs. However, in the CLUSTERIP case | 534 | * addresses on different interfacs. However, in the CLUSTERIP case |
537 | * this wouldn't work, since we didn't subscribe the mcast group on | 535 | * this wouldn't work, since we didn't subscribe the mcast group on |
538 | * other interfaces */ | 536 | * other interfaces */ |
539 | if (c->dev != out) { | 537 | if (c->dev != state->out) { |
540 | pr_debug("not mangling arp reply on different " | 538 | pr_debug("not mangling arp reply on different " |
541 | "interface: cip'%s'-skb'%s'\n", | 539 | "interface: cip'%s'-skb'%s'\n", |
542 | c->dev->name, out->name); | 540 | c->dev->name, state->out->name); |
543 | clusterip_config_put(c); | 541 | clusterip_config_put(c); |
544 | return NF_ACCEPT; | 542 | return NF_ACCEPT; |
545 | } | 543 | } |
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index a313c3fbeb46..e9e67793055f 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
@@ -300,11 +300,9 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) | |||
300 | 300 | ||
301 | static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops, | 301 | static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops, |
302 | struct sk_buff *skb, | 302 | struct sk_buff *skb, |
303 | const struct net_device *in, | 303 | const struct nf_hook_state *nhs) |
304 | const struct net_device *out, | ||
305 | int (*okfn)(struct sk_buff *)) | ||
306 | { | 304 | { |
307 | struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out)); | 305 | struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out)); |
308 | enum ip_conntrack_info ctinfo; | 306 | enum ip_conntrack_info ctinfo; |
309 | struct nf_conn *ct; | 307 | struct nf_conn *ct; |
310 | struct nf_conn_synproxy *synproxy; | 308 | struct nf_conn_synproxy *synproxy; |
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index e08a74a243a8..a0f3beca52d2 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c | |||
@@ -34,8 +34,7 @@ static const struct xt_table packet_filter = { | |||
34 | 34 | ||
35 | static unsigned int | 35 | static unsigned int |
36 | iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 36 | iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
37 | const struct net_device *in, const struct net_device *out, | 37 | const struct nf_hook_state *state) |
38 | int (*okfn)(struct sk_buff *)) | ||
39 | { | 38 | { |
40 | const struct net *net; | 39 | const struct net *net; |
41 | 40 | ||
@@ -45,9 +44,8 @@ iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
45 | /* root is playing with raw sockets. */ | 44 | /* root is playing with raw sockets. */ |
46 | return NF_ACCEPT; | 45 | return NF_ACCEPT; |
47 | 46 | ||
48 | net = dev_net((in != NULL) ? in : out); | 47 | net = dev_net(state->in ? state->in : state->out); |
49 | return ipt_do_table(skb, ops->hooknum, in, out, | 48 | return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_filter); |
50 | net->ipv4.iptable_filter); | ||
51 | } | 49 | } |
52 | 50 | ||
53 | static struct nf_hook_ops *filter_ops __read_mostly; | 51 | static struct nf_hook_ops *filter_ops __read_mostly; |
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index 6a5079c34bb3..62cbb8c5f4a8 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c | |||
@@ -37,8 +37,9 @@ static const struct xt_table packet_mangler = { | |||
37 | }; | 37 | }; |
38 | 38 | ||
39 | static unsigned int | 39 | static unsigned int |
40 | ipt_mangle_out(struct sk_buff *skb, const struct net_device *out) | 40 | ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) |
41 | { | 41 | { |
42 | struct net_device *out = state->out; | ||
42 | unsigned int ret; | 43 | unsigned int ret; |
43 | const struct iphdr *iph; | 44 | const struct iphdr *iph; |
44 | u_int8_t tos; | 45 | u_int8_t tos; |
@@ -58,7 +59,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out) | |||
58 | daddr = iph->daddr; | 59 | daddr = iph->daddr; |
59 | tos = iph->tos; | 60 | tos = iph->tos; |
60 | 61 | ||
61 | ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, | 62 | ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, state, |
62 | dev_net(out)->ipv4.iptable_mangle); | 63 | dev_net(out)->ipv4.iptable_mangle); |
63 | /* Reroute for ANY change. */ | 64 | /* Reroute for ANY change. */ |
64 | if (ret != NF_DROP && ret != NF_STOLEN) { | 65 | if (ret != NF_DROP && ret != NF_STOLEN) { |
@@ -81,18 +82,16 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out) | |||
81 | static unsigned int | 82 | static unsigned int |
82 | iptable_mangle_hook(const struct nf_hook_ops *ops, | 83 | iptable_mangle_hook(const struct nf_hook_ops *ops, |
83 | struct sk_buff *skb, | 84 | struct sk_buff *skb, |
84 | const struct net_device *in, | 85 | const struct nf_hook_state *state) |
85 | const struct net_device *out, | ||
86 | int (*okfn)(struct sk_buff *)) | ||
87 | { | 86 | { |
88 | if (ops->hooknum == NF_INET_LOCAL_OUT) | 87 | if (ops->hooknum == NF_INET_LOCAL_OUT) |
89 | return ipt_mangle_out(skb, out); | 88 | return ipt_mangle_out(skb, state); |
90 | if (ops->hooknum == NF_INET_POST_ROUTING) | 89 | if (ops->hooknum == NF_INET_POST_ROUTING) |
91 | return ipt_do_table(skb, ops->hooknum, in, out, | 90 | return ipt_do_table(skb, ops->hooknum, state, |
92 | dev_net(out)->ipv4.iptable_mangle); | 91 | dev_net(state->out)->ipv4.iptable_mangle); |
93 | /* PREROUTING/INPUT/FORWARD: */ | 92 | /* PREROUTING/INPUT/FORWARD: */ |
94 | return ipt_do_table(skb, ops->hooknum, in, out, | 93 | return ipt_do_table(skb, ops->hooknum, state, |
95 | dev_net(in)->ipv4.iptable_mangle); | 94 | dev_net(state->in)->ipv4.iptable_mangle); |
96 | } | 95 | } |
97 | 96 | ||
98 | static struct nf_hook_ops *mangle_ops __read_mostly; | 97 | static struct nf_hook_ops *mangle_ops __read_mostly; |
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index 6b67d7e9a75d..0d4d9cdf98a4 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c | |||
@@ -30,49 +30,40 @@ static const struct xt_table nf_nat_ipv4_table = { | |||
30 | 30 | ||
31 | static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops, | 31 | static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops, |
32 | struct sk_buff *skb, | 32 | struct sk_buff *skb, |
33 | const struct net_device *in, | 33 | const struct nf_hook_state *state, |
34 | const struct net_device *out, | ||
35 | struct nf_conn *ct) | 34 | struct nf_conn *ct) |
36 | { | 35 | { |
37 | struct net *net = nf_ct_net(ct); | 36 | struct net *net = nf_ct_net(ct); |
38 | 37 | ||
39 | return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.nat_table); | 38 | return ipt_do_table(skb, ops->hooknum, state, net->ipv4.nat_table); |
40 | } | 39 | } |
41 | 40 | ||
42 | static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops, | 41 | static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops, |
43 | struct sk_buff *skb, | 42 | struct sk_buff *skb, |
44 | const struct net_device *in, | 43 | const struct nf_hook_state *state) |
45 | const struct net_device *out, | ||
46 | int (*okfn)(struct sk_buff *)) | ||
47 | { | 44 | { |
48 | return nf_nat_ipv4_fn(ops, skb, in, out, iptable_nat_do_chain); | 45 | return nf_nat_ipv4_fn(ops, skb, state, iptable_nat_do_chain); |
49 | } | 46 | } |
50 | 47 | ||
51 | static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops, | 48 | static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops, |
52 | struct sk_buff *skb, | 49 | struct sk_buff *skb, |
53 | const struct net_device *in, | 50 | const struct nf_hook_state *state) |
54 | const struct net_device *out, | ||
55 | int (*okfn)(struct sk_buff *)) | ||
56 | { | 51 | { |
57 | return nf_nat_ipv4_in(ops, skb, in, out, iptable_nat_do_chain); | 52 | return nf_nat_ipv4_in(ops, skb, state, iptable_nat_do_chain); |
58 | } | 53 | } |
59 | 54 | ||
60 | static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops, | 55 | static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops, |
61 | struct sk_buff *skb, | 56 | struct sk_buff *skb, |
62 | const struct net_device *in, | 57 | const struct nf_hook_state *state) |
63 | const struct net_device *out, | ||
64 | int (*okfn)(struct sk_buff *)) | ||
65 | { | 58 | { |
66 | return nf_nat_ipv4_out(ops, skb, in, out, iptable_nat_do_chain); | 59 | return nf_nat_ipv4_out(ops, skb, state, iptable_nat_do_chain); |
67 | } | 60 | } |
68 | 61 | ||
69 | static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops, | 62 | static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops, |
70 | struct sk_buff *skb, | 63 | struct sk_buff *skb, |
71 | const struct net_device *in, | 64 | const struct nf_hook_state *state) |
72 | const struct net_device *out, | ||
73 | int (*okfn)(struct sk_buff *)) | ||
74 | { | 65 | { |
75 | return nf_nat_ipv4_local_fn(ops, skb, in, out, iptable_nat_do_chain); | 66 | return nf_nat_ipv4_local_fn(ops, skb, state, iptable_nat_do_chain); |
76 | } | 67 | } |
77 | 68 | ||
78 | static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { | 69 | static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { |
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index b2f7e8f98316..0356e6da4bb7 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c | |||
@@ -21,8 +21,7 @@ static const struct xt_table packet_raw = { | |||
21 | /* The work comes in here from netfilter.c. */ | 21 | /* The work comes in here from netfilter.c. */ |
22 | static unsigned int | 22 | static unsigned int |
23 | iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 23 | iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
24 | const struct net_device *in, const struct net_device *out, | 24 | const struct nf_hook_state *state) |
25 | int (*okfn)(struct sk_buff *)) | ||
26 | { | 25 | { |
27 | const struct net *net; | 26 | const struct net *net; |
28 | 27 | ||
@@ -32,8 +31,8 @@ iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
32 | /* root is playing with raw sockets. */ | 31 | /* root is playing with raw sockets. */ |
33 | return NF_ACCEPT; | 32 | return NF_ACCEPT; |
34 | 33 | ||
35 | net = dev_net((in != NULL) ? in : out); | 34 | net = dev_net(state->in ? state->in : state->out); |
36 | return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw); | 35 | return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_raw); |
37 | } | 36 | } |
38 | 37 | ||
39 | static struct nf_hook_ops *rawtable_ops __read_mostly; | 38 | static struct nf_hook_ops *rawtable_ops __read_mostly; |
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c index c86647ed2078..4bce3980ccd9 100644 --- a/net/ipv4/netfilter/iptable_security.c +++ b/net/ipv4/netfilter/iptable_security.c | |||
@@ -38,9 +38,7 @@ static const struct xt_table security_table = { | |||
38 | 38 | ||
39 | static unsigned int | 39 | static unsigned int |
40 | iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 40 | iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
41 | const struct net_device *in, | 41 | const struct nf_hook_state *state) |
42 | const struct net_device *out, | ||
43 | int (*okfn)(struct sk_buff *)) | ||
44 | { | 42 | { |
45 | const struct net *net; | 43 | const struct net *net; |
46 | 44 | ||
@@ -50,8 +48,8 @@ iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
50 | /* Somebody is playing with raw sockets. */ | 48 | /* Somebody is playing with raw sockets. */ |
51 | return NF_ACCEPT; | 49 | return NF_ACCEPT; |
52 | 50 | ||
53 | net = dev_net((in != NULL) ? in : out); | 51 | net = dev_net(state->in ? state->in : state->out); |
54 | return ipt_do_table(skb, ops->hooknum, in, out, | 52 | return ipt_do_table(skb, ops->hooknum, state, |
55 | net->ipv4.iptable_security); | 53 | net->ipv4.iptable_security); |
56 | } | 54 | } |
57 | 55 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 8c8d6642cbb0..30ad9554b5e9 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -94,9 +94,7 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | |||
94 | 94 | ||
95 | static unsigned int ipv4_helper(const struct nf_hook_ops *ops, | 95 | static unsigned int ipv4_helper(const struct nf_hook_ops *ops, |
96 | struct sk_buff *skb, | 96 | struct sk_buff *skb, |
97 | const struct net_device *in, | 97 | const struct nf_hook_state *state) |
98 | const struct net_device *out, | ||
99 | int (*okfn)(struct sk_buff *)) | ||
100 | { | 98 | { |
101 | struct nf_conn *ct; | 99 | struct nf_conn *ct; |
102 | enum ip_conntrack_info ctinfo; | 100 | enum ip_conntrack_info ctinfo; |
@@ -123,9 +121,7 @@ static unsigned int ipv4_helper(const struct nf_hook_ops *ops, | |||
123 | 121 | ||
124 | static unsigned int ipv4_confirm(const struct nf_hook_ops *ops, | 122 | static unsigned int ipv4_confirm(const struct nf_hook_ops *ops, |
125 | struct sk_buff *skb, | 123 | struct sk_buff *skb, |
126 | const struct net_device *in, | 124 | const struct nf_hook_state *state) |
127 | const struct net_device *out, | ||
128 | int (*okfn)(struct sk_buff *)) | ||
129 | { | 125 | { |
130 | struct nf_conn *ct; | 126 | struct nf_conn *ct; |
131 | enum ip_conntrack_info ctinfo; | 127 | enum ip_conntrack_info ctinfo; |
@@ -149,24 +145,20 @@ out: | |||
149 | 145 | ||
150 | static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops, | 146 | static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops, |
151 | struct sk_buff *skb, | 147 | struct sk_buff *skb, |
152 | const struct net_device *in, | 148 | const struct nf_hook_state *state) |
153 | const struct net_device *out, | ||
154 | int (*okfn)(struct sk_buff *)) | ||
155 | { | 149 | { |
156 | return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb); | 150 | return nf_conntrack_in(dev_net(state->in), PF_INET, ops->hooknum, skb); |
157 | } | 151 | } |
158 | 152 | ||
159 | static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops, | 153 | static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops, |
160 | struct sk_buff *skb, | 154 | struct sk_buff *skb, |
161 | const struct net_device *in, | 155 | const struct nf_hook_state *state) |
162 | const struct net_device *out, | ||
163 | int (*okfn)(struct sk_buff *)) | ||
164 | { | 156 | { |
165 | /* root is playing with raw sockets. */ | 157 | /* root is playing with raw sockets. */ |
166 | if (skb->len < sizeof(struct iphdr) || | 158 | if (skb->len < sizeof(struct iphdr) || |
167 | ip_hdrlen(skb) < sizeof(struct iphdr)) | 159 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
168 | return NF_ACCEPT; | 160 | return NF_ACCEPT; |
169 | return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb); | 161 | return nf_conntrack_in(dev_net(state->out), PF_INET, ops->hooknum, skb); |
170 | } | 162 | } |
171 | 163 | ||
172 | /* Connection tracking may drop packets, but never alters them, so | 164 | /* Connection tracking may drop packets, but never alters them, so |
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index 7e5ca6f2d0cd..c88b7d434718 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c | |||
@@ -63,9 +63,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, | |||
63 | 63 | ||
64 | static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops, | 64 | static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops, |
65 | struct sk_buff *skb, | 65 | struct sk_buff *skb, |
66 | const struct net_device *in, | 66 | const struct nf_hook_state *state) |
67 | const struct net_device *out, | ||
68 | int (*okfn)(struct sk_buff *)) | ||
69 | { | 67 | { |
70 | struct sock *sk = skb->sk; | 68 | struct sock *sk = skb->sk; |
71 | struct inet_sock *inet = inet_sk(skb->sk); | 69 | struct inet_sock *inet = inet_sk(skb->sk); |
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index fc37711e11f3..e59cc05c09e9 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c | |||
@@ -256,11 +256,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation); | |||
256 | 256 | ||
257 | unsigned int | 257 | unsigned int |
258 | nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | 258 | nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, |
259 | const struct net_device *in, const struct net_device *out, | 259 | const struct nf_hook_state *state, |
260 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, | 260 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, |
261 | struct sk_buff *skb, | 261 | struct sk_buff *skb, |
262 | const struct net_device *in, | 262 | const struct nf_hook_state *state, |
263 | const struct net_device *out, | ||
264 | struct nf_conn *ct)) | 263 | struct nf_conn *ct)) |
265 | { | 264 | { |
266 | struct nf_conn *ct; | 265 | struct nf_conn *ct; |
@@ -309,7 +308,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
309 | if (!nf_nat_initialized(ct, maniptype)) { | 308 | if (!nf_nat_initialized(ct, maniptype)) { |
310 | unsigned int ret; | 309 | unsigned int ret; |
311 | 310 | ||
312 | ret = do_chain(ops, skb, in, out, ct); | 311 | ret = do_chain(ops, skb, state, ct); |
313 | if (ret != NF_ACCEPT) | 312 | if (ret != NF_ACCEPT) |
314 | return ret; | 313 | return ret; |
315 | 314 | ||
@@ -323,7 +322,8 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
323 | pr_debug("Already setup manip %s for ct %p\n", | 322 | pr_debug("Already setup manip %s for ct %p\n", |
324 | maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", | 323 | maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", |
325 | ct); | 324 | ct); |
326 | if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) | 325 | if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, |
326 | state->out)) | ||
327 | goto oif_changed; | 327 | goto oif_changed; |
328 | } | 328 | } |
329 | break; | 329 | break; |
@@ -332,7 +332,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
332 | /* ESTABLISHED */ | 332 | /* ESTABLISHED */ |
333 | NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || | 333 | NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || |
334 | ctinfo == IP_CT_ESTABLISHED_REPLY); | 334 | ctinfo == IP_CT_ESTABLISHED_REPLY); |
335 | if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) | 335 | if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out)) |
336 | goto oif_changed; | 336 | goto oif_changed; |
337 | } | 337 | } |
338 | 338 | ||
@@ -346,17 +346,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn); | |||
346 | 346 | ||
347 | unsigned int | 347 | unsigned int |
348 | nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, | 348 | nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, |
349 | const struct net_device *in, const struct net_device *out, | 349 | const struct nf_hook_state *state, |
350 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, | 350 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, |
351 | struct sk_buff *skb, | 351 | struct sk_buff *skb, |
352 | const struct net_device *in, | 352 | const struct nf_hook_state *state, |
353 | const struct net_device *out, | ||
354 | struct nf_conn *ct)) | 353 | struct nf_conn *ct)) |
355 | { | 354 | { |
356 | unsigned int ret; | 355 | unsigned int ret; |
357 | __be32 daddr = ip_hdr(skb)->daddr; | 356 | __be32 daddr = ip_hdr(skb)->daddr; |
358 | 357 | ||
359 | ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); | 358 | ret = nf_nat_ipv4_fn(ops, skb, state, do_chain); |
360 | if (ret != NF_DROP && ret != NF_STOLEN && | 359 | if (ret != NF_DROP && ret != NF_STOLEN && |
361 | daddr != ip_hdr(skb)->daddr) | 360 | daddr != ip_hdr(skb)->daddr) |
362 | skb_dst_drop(skb); | 361 | skb_dst_drop(skb); |
@@ -367,11 +366,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_in); | |||
367 | 366 | ||
368 | unsigned int | 367 | unsigned int |
369 | nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, | 368 | nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, |
370 | const struct net_device *in, const struct net_device *out, | 369 | const struct nf_hook_state *state, |
371 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, | 370 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, |
372 | struct sk_buff *skb, | 371 | struct sk_buff *skb, |
373 | const struct net_device *in, | 372 | const struct nf_hook_state *state, |
374 | const struct net_device *out, | ||
375 | struct nf_conn *ct)) | 373 | struct nf_conn *ct)) |
376 | { | 374 | { |
377 | #ifdef CONFIG_XFRM | 375 | #ifdef CONFIG_XFRM |
@@ -386,7 +384,7 @@ nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
386 | ip_hdrlen(skb) < sizeof(struct iphdr)) | 384 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
387 | return NF_ACCEPT; | 385 | return NF_ACCEPT; |
388 | 386 | ||
389 | ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); | 387 | ret = nf_nat_ipv4_fn(ops, skb, state, do_chain); |
390 | #ifdef CONFIG_XFRM | 388 | #ifdef CONFIG_XFRM |
391 | if (ret != NF_DROP && ret != NF_STOLEN && | 389 | if (ret != NF_DROP && ret != NF_STOLEN && |
392 | !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && | 390 | !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && |
@@ -410,11 +408,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_out); | |||
410 | 408 | ||
411 | unsigned int | 409 | unsigned int |
412 | nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | 410 | nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, |
413 | const struct net_device *in, const struct net_device *out, | 411 | const struct nf_hook_state *state, |
414 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, | 412 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, |
415 | struct sk_buff *skb, | 413 | struct sk_buff *skb, |
416 | const struct net_device *in, | 414 | const struct nf_hook_state *state, |
417 | const struct net_device *out, | ||
418 | struct nf_conn *ct)) | 415 | struct nf_conn *ct)) |
419 | { | 416 | { |
420 | const struct nf_conn *ct; | 417 | const struct nf_conn *ct; |
@@ -427,7 +424,7 @@ nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
427 | ip_hdrlen(skb) < sizeof(struct iphdr)) | 424 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
428 | return NF_ACCEPT; | 425 | return NF_ACCEPT; |
429 | 426 | ||
430 | ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); | 427 | ret = nf_nat_ipv4_fn(ops, skb, state, do_chain); |
431 | if (ret != NF_DROP && ret != NF_STOLEN && | 428 | if (ret != NF_DROP && ret != NF_STOLEN && |
432 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { | 429 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { |
433 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 430 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c index 19412a4063fb..8412268bbad1 100644 --- a/net/ipv4/netfilter/nf_tables_arp.c +++ b/net/ipv4/netfilter/nf_tables_arp.c | |||
@@ -17,13 +17,11 @@ | |||
17 | static unsigned int | 17 | static unsigned int |
18 | nft_do_chain_arp(const struct nf_hook_ops *ops, | 18 | nft_do_chain_arp(const struct nf_hook_ops *ops, |
19 | struct sk_buff *skb, | 19 | struct sk_buff *skb, |
20 | const struct net_device *in, | 20 | const struct nf_hook_state *state) |
21 | const struct net_device *out, | ||
22 | int (*okfn)(struct sk_buff *)) | ||
23 | { | 21 | { |
24 | struct nft_pktinfo pkt; | 22 | struct nft_pktinfo pkt; |
25 | 23 | ||
26 | nft_set_pktinfo(&pkt, ops, skb, in, out); | 24 | nft_set_pktinfo(&pkt, ops, skb, state); |
27 | 25 | ||
28 | return nft_do_chain(&pkt, ops); | 26 | return nft_do_chain(&pkt, ops); |
29 | } | 27 | } |
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c index 6820c8c40842..aa180d3a69a5 100644 --- a/net/ipv4/netfilter/nf_tables_ipv4.c +++ b/net/ipv4/netfilter/nf_tables_ipv4.c | |||
@@ -20,22 +20,18 @@ | |||
20 | 20 | ||
21 | static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops, | 21 | static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops, |
22 | struct sk_buff *skb, | 22 | struct sk_buff *skb, |
23 | const struct net_device *in, | 23 | const struct nf_hook_state *state) |
24 | const struct net_device *out, | ||
25 | int (*okfn)(struct sk_buff *)) | ||
26 | { | 24 | { |
27 | struct nft_pktinfo pkt; | 25 | struct nft_pktinfo pkt; |
28 | 26 | ||
29 | nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); | 27 | nft_set_pktinfo_ipv4(&pkt, ops, skb, state); |
30 | 28 | ||
31 | return nft_do_chain(&pkt, ops); | 29 | return nft_do_chain(&pkt, ops); |
32 | } | 30 | } |
33 | 31 | ||
34 | static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops, | 32 | static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops, |
35 | struct sk_buff *skb, | 33 | struct sk_buff *skb, |
36 | const struct net_device *in, | 34 | const struct nf_hook_state *state) |
37 | const struct net_device *out, | ||
38 | int (*okfn)(struct sk_buff *)) | ||
39 | { | 35 | { |
40 | if (unlikely(skb->len < sizeof(struct iphdr) || | 36 | if (unlikely(skb->len < sizeof(struct iphdr) || |
41 | ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) { | 37 | ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) { |
@@ -45,7 +41,7 @@ static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops, | |||
45 | return NF_ACCEPT; | 41 | return NF_ACCEPT; |
46 | } | 42 | } |
47 | 43 | ||
48 | return nft_do_chain_ipv4(ops, skb, in, out, okfn); | 44 | return nft_do_chain_ipv4(ops, skb, state); |
49 | } | 45 | } |
50 | 46 | ||
51 | struct nft_af_info nft_af_ipv4 __read_mostly = { | 47 | struct nft_af_info nft_af_ipv4 __read_mostly = { |
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c index df547bf50078..bf5c30ae14e4 100644 --- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c +++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c | |||
@@ -28,51 +28,42 @@ | |||
28 | 28 | ||
29 | static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, | 29 | static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, |
30 | struct sk_buff *skb, | 30 | struct sk_buff *skb, |
31 | const struct net_device *in, | 31 | const struct nf_hook_state *state, |
32 | const struct net_device *out, | ||
33 | struct nf_conn *ct) | 32 | struct nf_conn *ct) |
34 | { | 33 | { |
35 | struct nft_pktinfo pkt; | 34 | struct nft_pktinfo pkt; |
36 | 35 | ||
37 | nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); | 36 | nft_set_pktinfo_ipv4(&pkt, ops, skb, state); |
38 | 37 | ||
39 | return nft_do_chain(&pkt, ops); | 38 | return nft_do_chain(&pkt, ops); |
40 | } | 39 | } |
41 | 40 | ||
42 | static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops, | 41 | static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops, |
43 | struct sk_buff *skb, | 42 | struct sk_buff *skb, |
44 | const struct net_device *in, | 43 | const struct nf_hook_state *state) |
45 | const struct net_device *out, | ||
46 | int (*okfn)(struct sk_buff *)) | ||
47 | { | 44 | { |
48 | return nf_nat_ipv4_fn(ops, skb, in, out, nft_nat_do_chain); | 45 | return nf_nat_ipv4_fn(ops, skb, state, nft_nat_do_chain); |
49 | } | 46 | } |
50 | 47 | ||
51 | static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops, | 48 | static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops, |
52 | struct sk_buff *skb, | 49 | struct sk_buff *skb, |
53 | const struct net_device *in, | 50 | const struct nf_hook_state *state) |
54 | const struct net_device *out, | ||
55 | int (*okfn)(struct sk_buff *)) | ||
56 | { | 51 | { |
57 | return nf_nat_ipv4_in(ops, skb, in, out, nft_nat_do_chain); | 52 | return nf_nat_ipv4_in(ops, skb, state, nft_nat_do_chain); |
58 | } | 53 | } |
59 | 54 | ||
60 | static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops, | 55 | static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops, |
61 | struct sk_buff *skb, | 56 | struct sk_buff *skb, |
62 | const struct net_device *in, | 57 | const struct nf_hook_state *state) |
63 | const struct net_device *out, | ||
64 | int (*okfn)(struct sk_buff *)) | ||
65 | { | 58 | { |
66 | return nf_nat_ipv4_out(ops, skb, in, out, nft_nat_do_chain); | 59 | return nf_nat_ipv4_out(ops, skb, state, nft_nat_do_chain); |
67 | } | 60 | } |
68 | 61 | ||
69 | static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops, | 62 | static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops, |
70 | struct sk_buff *skb, | 63 | struct sk_buff *skb, |
71 | const struct net_device *in, | 64 | const struct nf_hook_state *state) |
72 | const struct net_device *out, | ||
73 | int (*okfn)(struct sk_buff *)) | ||
74 | { | 65 | { |
75 | return nf_nat_ipv4_local_fn(ops, skb, in, out, nft_nat_do_chain); | 66 | return nf_nat_ipv4_local_fn(ops, skb, state, nft_nat_do_chain); |
76 | } | 67 | } |
77 | 68 | ||
78 | static const struct nf_chain_type nft_chain_nat_ipv4 = { | 69 | static const struct nf_chain_type nft_chain_nat_ipv4 = { |
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c index 125b66766c0a..e335b0afdaf3 100644 --- a/net/ipv4/netfilter/nft_chain_route_ipv4.c +++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c | |||
@@ -23,9 +23,7 @@ | |||
23 | 23 | ||
24 | static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops, | 24 | static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops, |
25 | struct sk_buff *skb, | 25 | struct sk_buff *skb, |
26 | const struct net_device *in, | 26 | const struct nf_hook_state *state) |
27 | const struct net_device *out, | ||
28 | int (*okfn)(struct sk_buff *)) | ||
29 | { | 27 | { |
30 | unsigned int ret; | 28 | unsigned int ret; |
31 | struct nft_pktinfo pkt; | 29 | struct nft_pktinfo pkt; |
@@ -39,7 +37,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops, | |||
39 | ip_hdrlen(skb) < sizeof(struct iphdr)) | 37 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
40 | return NF_ACCEPT; | 38 | return NF_ACCEPT; |
41 | 39 | ||
42 | nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); | 40 | nft_set_pktinfo_ipv4(&pkt, ops, skb, state); |
43 | 41 | ||
44 | mark = skb->mark; | 42 | mark = skb->mark; |
45 | iph = ip_hdr(skb); | 43 | iph = ip_hdr(skb); |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 344e7cdfb8d4..a93f260cf24c 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -516,7 +516,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info) | |||
516 | ntohs(icmph->un.echo.sequence)); | 516 | ntohs(icmph->un.echo.sequence)); |
517 | 517 | ||
518 | sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); | 518 | sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); |
519 | if (sk == NULL) { | 519 | if (!sk) { |
520 | pr_debug("no socket, dropping\n"); | 520 | pr_debug("no socket, dropping\n"); |
521 | return; /* No socket for error */ | 521 | return; /* No socket for error */ |
522 | } | 522 | } |
@@ -971,7 +971,7 @@ bool ping_rcv(struct sk_buff *skb) | |||
971 | skb_push(skb, skb->data - (u8 *)icmph); | 971 | skb_push(skb, skb->data - (u8 *)icmph); |
972 | 972 | ||
973 | sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); | 973 | sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); |
974 | if (sk != NULL) { | 974 | if (sk) { |
975 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 975 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
976 | 976 | ||
977 | pr_debug("rcv on socket %p\n", sk); | 977 | pr_debug("rcv on socket %p\n", sk); |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 56946f47d446..c0bb648fb2f9 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -293,7 +293,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) | |||
293 | 293 | ||
294 | read_lock(&raw_v4_hashinfo.lock); | 294 | read_lock(&raw_v4_hashinfo.lock); |
295 | raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); | 295 | raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); |
296 | if (raw_sk != NULL) { | 296 | if (raw_sk) { |
297 | iph = (const struct iphdr *)skb->data; | 297 | iph = (const struct iphdr *)skb->data; |
298 | net = dev_net(skb->dev); | 298 | net = dev_net(skb->dev); |
299 | 299 | ||
@@ -363,7 +363,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, | |||
363 | skb = sock_alloc_send_skb(sk, | 363 | skb = sock_alloc_send_skb(sk, |
364 | length + hlen + tlen + 15, | 364 | length + hlen + tlen + 15, |
365 | flags & MSG_DONTWAIT, &err); | 365 | flags & MSG_DONTWAIT, &err); |
366 | if (skb == NULL) | 366 | if (!skb) |
367 | goto error; | 367 | goto error; |
368 | skb_reserve(skb, hlen); | 368 | skb_reserve(skb, hlen); |
369 | 369 | ||
@@ -412,8 +412,8 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, | |||
412 | icmp_out_count(net, ((struct icmphdr *) | 412 | icmp_out_count(net, ((struct icmphdr *) |
413 | skb_transport_header(skb))->type); | 413 | skb_transport_header(skb))->type); |
414 | 414 | ||
415 | err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, | 415 | err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, |
416 | rt->dst.dev, dst_output); | 416 | NULL, rt->dst.dev, dst_output_sk); |
417 | if (err > 0) | 417 | if (err > 0) |
418 | err = net_xmit_errno(err); | 418 | err = net_xmit_errno(err); |
419 | if (err) | 419 | if (err) |
@@ -872,7 +872,7 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
872 | 872 | ||
873 | spin_lock_bh(&sk->sk_receive_queue.lock); | 873 | spin_lock_bh(&sk->sk_receive_queue.lock); |
874 | skb = skb_peek(&sk->sk_receive_queue); | 874 | skb = skb_peek(&sk->sk_receive_queue); |
875 | if (skb != NULL) | 875 | if (skb) |
876 | amount = skb->len; | 876 | amount = skb->len; |
877 | spin_unlock_bh(&sk->sk_receive_queue.lock); | 877 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
878 | return put_user(amount, (int __user *)arg); | 878 | return put_user(amount, (int __user *)arg); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 652b92ebd7ba..a78540f28276 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1056,7 +1056,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) | |||
1056 | __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); | 1056 | __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); |
1057 | 1057 | ||
1058 | rt = (struct rtable *)odst; | 1058 | rt = (struct rtable *)odst; |
1059 | if (odst->obsolete && odst->ops->check(odst, 0) == NULL) { | 1059 | if (odst->obsolete && !odst->ops->check(odst, 0)) { |
1060 | rt = ip_route_output_flow(sock_net(sk), &fl4, sk); | 1060 | rt = ip_route_output_flow(sock_net(sk), &fl4, sk); |
1061 | if (IS_ERR(rt)) | 1061 | if (IS_ERR(rt)) |
1062 | goto out; | 1062 | goto out; |
@@ -1450,7 +1450,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1450 | 1450 | ||
1451 | /* Primary sanity checks. */ | 1451 | /* Primary sanity checks. */ |
1452 | 1452 | ||
1453 | if (in_dev == NULL) | 1453 | if (!in_dev) |
1454 | return -EINVAL; | 1454 | return -EINVAL; |
1455 | 1455 | ||
1456 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || | 1456 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || |
@@ -1553,7 +1553,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1553 | 1553 | ||
1554 | /* get a working reference to the output device */ | 1554 | /* get a working reference to the output device */ |
1555 | out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); | 1555 | out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); |
1556 | if (out_dev == NULL) { | 1556 | if (!out_dev) { |
1557 | net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); | 1557 | net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); |
1558 | return -EINVAL; | 1558 | return -EINVAL; |
1559 | } | 1559 | } |
@@ -1591,7 +1591,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1591 | 1591 | ||
1592 | fnhe = find_exception(&FIB_RES_NH(*res), daddr); | 1592 | fnhe = find_exception(&FIB_RES_NH(*res), daddr); |
1593 | if (do_cache) { | 1593 | if (do_cache) { |
1594 | if (fnhe != NULL) | 1594 | if (fnhe) |
1595 | rth = rcu_dereference(fnhe->fnhe_rth_input); | 1595 | rth = rcu_dereference(fnhe->fnhe_rth_input); |
1596 | else | 1596 | else |
1597 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); | 1597 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); |
@@ -2054,7 +2054,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2054 | ipv4_is_lbcast(fl4->daddr))) { | 2054 | ipv4_is_lbcast(fl4->daddr))) { |
2055 | /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ | 2055 | /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ |
2056 | dev_out = __ip_dev_find(net, fl4->saddr, false); | 2056 | dev_out = __ip_dev_find(net, fl4->saddr, false); |
2057 | if (dev_out == NULL) | 2057 | if (!dev_out) |
2058 | goto out; | 2058 | goto out; |
2059 | 2059 | ||
2060 | /* Special hack: user can direct multicasts | 2060 | /* Special hack: user can direct multicasts |
@@ -2087,7 +2087,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2087 | if (fl4->flowi4_oif) { | 2087 | if (fl4->flowi4_oif) { |
2088 | dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); | 2088 | dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); |
2089 | rth = ERR_PTR(-ENODEV); | 2089 | rth = ERR_PTR(-ENODEV); |
2090 | if (dev_out == NULL) | 2090 | if (!dev_out) |
2091 | goto out; | 2091 | goto out; |
2092 | 2092 | ||
2093 | /* RACE: Check return value of inet_select_addr instead. */ | 2093 | /* RACE: Check return value of inet_select_addr instead. */ |
@@ -2299,7 +2299,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
2299 | u32 metrics[RTAX_MAX]; | 2299 | u32 metrics[RTAX_MAX]; |
2300 | 2300 | ||
2301 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); | 2301 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); |
2302 | if (nlh == NULL) | 2302 | if (!nlh) |
2303 | return -EMSGSIZE; | 2303 | return -EMSGSIZE; |
2304 | 2304 | ||
2305 | r = nlmsg_data(nlh); | 2305 | r = nlmsg_data(nlh); |
@@ -2421,7 +2421,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) | |||
2421 | rtm = nlmsg_data(nlh); | 2421 | rtm = nlmsg_data(nlh); |
2422 | 2422 | ||
2423 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 2423 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
2424 | if (skb == NULL) { | 2424 | if (!skb) { |
2425 | err = -ENOBUFS; | 2425 | err = -ENOBUFS; |
2426 | goto errout; | 2426 | goto errout; |
2427 | } | 2427 | } |
@@ -2452,7 +2452,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) | |||
2452 | struct net_device *dev; | 2452 | struct net_device *dev; |
2453 | 2453 | ||
2454 | dev = __dev_get_by_index(net, iif); | 2454 | dev = __dev_get_by_index(net, iif); |
2455 | if (dev == NULL) { | 2455 | if (!dev) { |
2456 | err = -ENODEV; | 2456 | err = -ENODEV; |
2457 | goto errout_free; | 2457 | goto errout_free; |
2458 | } | 2458 | } |
@@ -2651,7 +2651,7 @@ static __net_init int sysctl_route_net_init(struct net *net) | |||
2651 | tbl = ipv4_route_flush_table; | 2651 | tbl = ipv4_route_flush_table; |
2652 | if (!net_eq(net, &init_net)) { | 2652 | if (!net_eq(net, &init_net)) { |
2653 | tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); | 2653 | tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); |
2654 | if (tbl == NULL) | 2654 | if (!tbl) |
2655 | goto err_dup; | 2655 | goto err_dup; |
2656 | 2656 | ||
2657 | /* Don't export sysctls to unprivileged users */ | 2657 | /* Don't export sysctls to unprivileged users */ |
@@ -2661,7 +2661,7 @@ static __net_init int sysctl_route_net_init(struct net *net) | |||
2661 | tbl[0].extra1 = net; | 2661 | tbl[0].extra1 = net; |
2662 | 2662 | ||
2663 | net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); | 2663 | net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); |
2664 | if (net->ipv4.route_hdr == NULL) | 2664 | if (!net->ipv4.route_hdr) |
2665 | goto err_reg; | 2665 | goto err_reg; |
2666 | return 0; | 2666 | return 0; |
2667 | 2667 | ||
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index fdf899163d44..c3852a7ff3c7 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -909,7 +909,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
909 | int i; | 909 | int i; |
910 | 910 | ||
911 | table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); | 911 | table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); |
912 | if (table == NULL) | 912 | if (!table) |
913 | goto err_alloc; | 913 | goto err_alloc; |
914 | 914 | ||
915 | /* Update the variables to point into the current struct net */ | 915 | /* Update the variables to point into the current struct net */ |
@@ -918,7 +918,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
918 | } | 918 | } |
919 | 919 | ||
920 | net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); | 920 | net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); |
921 | if (net->ipv4.ipv4_hdr == NULL) | 921 | if (!net->ipv4.ipv4_hdr) |
922 | goto err_reg; | 922 | goto err_reg; |
923 | 923 | ||
924 | net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); | 924 | net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); |
@@ -956,7 +956,7 @@ static __init int sysctl_ipv4_init(void) | |||
956 | struct ctl_table_header *hdr; | 956 | struct ctl_table_header *hdr; |
957 | 957 | ||
958 | hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); | 958 | hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); |
959 | if (hdr == NULL) | 959 | if (!hdr) |
960 | return -ENOMEM; | 960 | return -ENOMEM; |
961 | 961 | ||
962 | if (register_pernet_subsys(&ipv4_sysctl_ops)) { | 962 | if (register_pernet_subsys(&ipv4_sysctl_ops)) { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index dbd51cefaf02..094a6822c71d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -496,7 +496,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
496 | 496 | ||
497 | /* Connected or passive Fast Open socket? */ | 497 | /* Connected or passive Fast Open socket? */ |
498 | if (sk->sk_state != TCP_SYN_SENT && | 498 | if (sk->sk_state != TCP_SYN_SENT && |
499 | (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) { | 499 | (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { |
500 | int target = sock_rcvlowat(sk, 0, INT_MAX); | 500 | int target = sock_rcvlowat(sk, 0, INT_MAX); |
501 | 501 | ||
502 | if (tp->urg_seq == tp->copied_seq && | 502 | if (tp->urg_seq == tp->copied_seq && |
@@ -1028,7 +1028,7 @@ static inline int select_size(const struct sock *sk, bool sg) | |||
1028 | 1028 | ||
1029 | void tcp_free_fastopen_req(struct tcp_sock *tp) | 1029 | void tcp_free_fastopen_req(struct tcp_sock *tp) |
1030 | { | 1030 | { |
1031 | if (tp->fastopen_req != NULL) { | 1031 | if (tp->fastopen_req) { |
1032 | kfree(tp->fastopen_req); | 1032 | kfree(tp->fastopen_req); |
1033 | tp->fastopen_req = NULL; | 1033 | tp->fastopen_req = NULL; |
1034 | } | 1034 | } |
@@ -1042,12 +1042,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, | |||
1042 | 1042 | ||
1043 | if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) | 1043 | if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) |
1044 | return -EOPNOTSUPP; | 1044 | return -EOPNOTSUPP; |
1045 | if (tp->fastopen_req != NULL) | 1045 | if (tp->fastopen_req) |
1046 | return -EALREADY; /* Another Fast Open is in progress */ | 1046 | return -EALREADY; /* Another Fast Open is in progress */ |
1047 | 1047 | ||
1048 | tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), | 1048 | tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), |
1049 | sk->sk_allocation); | 1049 | sk->sk_allocation); |
1050 | if (unlikely(tp->fastopen_req == NULL)) | 1050 | if (unlikely(!tp->fastopen_req)) |
1051 | return -ENOBUFS; | 1051 | return -ENOBUFS; |
1052 | tp->fastopen_req->data = msg; | 1052 | tp->fastopen_req->data = msg; |
1053 | tp->fastopen_req->size = size; | 1053 | tp->fastopen_req->size = size; |
@@ -2138,7 +2138,7 @@ adjudge_to_death: | |||
2138 | * aborted (e.g., closed with unread data) before 3WHS | 2138 | * aborted (e.g., closed with unread data) before 3WHS |
2139 | * finishes. | 2139 | * finishes. |
2140 | */ | 2140 | */ |
2141 | if (req != NULL) | 2141 | if (req) |
2142 | reqsk_fastopen_remove(sk, req, false); | 2142 | reqsk_fastopen_remove(sk, req, false); |
2143 | inet_csk_destroy_sock(sk); | 2143 | inet_csk_destroy_sock(sk); |
2144 | } | 2144 | } |
@@ -2776,7 +2776,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, | |||
2776 | break; | 2776 | break; |
2777 | 2777 | ||
2778 | case TCP_FASTOPEN: | 2778 | case TCP_FASTOPEN: |
2779 | if (icsk->icsk_accept_queue.fastopenq != NULL) | 2779 | if (icsk->icsk_accept_queue.fastopenq) |
2780 | val = icsk->icsk_accept_queue.fastopenq->max_qlen; | 2780 | val = icsk->icsk_accept_queue.fastopenq->max_qlen; |
2781 | else | 2781 | else |
2782 | val = 0; | 2782 | val = 0; |
@@ -2960,7 +2960,7 @@ void tcp_done(struct sock *sk) | |||
2960 | 2960 | ||
2961 | tcp_set_state(sk, TCP_CLOSE); | 2961 | tcp_set_state(sk, TCP_CLOSE); |
2962 | tcp_clear_xmit_timers(sk); | 2962 | tcp_clear_xmit_timers(sk); |
2963 | if (req != NULL) | 2963 | if (req) |
2964 | reqsk_fastopen_remove(sk, req, false); | 2964 | reqsk_fastopen_remove(sk, req, false); |
2965 | 2965 | ||
2966 | sk->sk_shutdown = SHUTDOWN_MASK; | 2966 | sk->sk_shutdown = SHUTDOWN_MASK; |
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 86dc119a3815..79b34a0f4a4a 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c | |||
@@ -29,7 +29,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, | |||
29 | r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); | 29 | r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); |
30 | r->idiag_wqueue = tp->write_seq - tp->snd_una; | 30 | r->idiag_wqueue = tp->write_seq - tp->snd_una; |
31 | } | 31 | } |
32 | if (info != NULL) | 32 | if (info) |
33 | tcp_get_info(sk, info); | 33 | tcp_get_info(sk, info); |
34 | } | 34 | } |
35 | 35 | ||
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 2eb887ec0ce3..e3d87aca6be8 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c | |||
@@ -141,7 +141,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
141 | req->sk = NULL; | 141 | req->sk = NULL; |
142 | 142 | ||
143 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | 143 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); |
144 | if (child == NULL) | 144 | if (!child) |
145 | return false; | 145 | return false; |
146 | 146 | ||
147 | spin_lock(&queue->fastopenq->lock); | 147 | spin_lock(&queue->fastopenq->lock); |
@@ -214,7 +214,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
214 | sk->sk_data_ready(sk); | 214 | sk->sk_data_ready(sk); |
215 | bh_unlock_sock(child); | 215 | bh_unlock_sock(child); |
216 | sock_put(child); | 216 | sock_put(child); |
217 | WARN_ON(req->sk == NULL); | 217 | WARN_ON(!req->sk); |
218 | return true; | 218 | return true; |
219 | } | 219 | } |
220 | 220 | ||
@@ -233,7 +233,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk) | |||
233 | * temporarily vs a server not supporting Fast Open at all. | 233 | * temporarily vs a server not supporting Fast Open at all. |
234 | */ | 234 | */ |
235 | fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq; | 235 | fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq; |
236 | if (fastopenq == NULL || fastopenq->max_qlen == 0) | 236 | if (!fastopenq || fastopenq->max_qlen == 0) |
237 | return false; | 237 | return false; |
238 | 238 | ||
239 | if (fastopenq->qlen >= fastopenq->max_qlen) { | 239 | if (fastopenq->qlen >= fastopenq->max_qlen) { |
@@ -303,6 +303,7 @@ fastopen: | |||
303 | } else if (foc->len > 0) /* Client presents an invalid cookie */ | 303 | } else if (foc->len > 0) /* Client presents an invalid cookie */ |
304 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); | 304 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
305 | 305 | ||
306 | valid_foc.exp = foc->exp; | ||
306 | *foc = valid_foc; | 307 | *foc = valid_foc; |
307 | return false; | 308 | return false; |
308 | } | 309 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 18b80e8bc533..031cf72cd05c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
866 | /* This must be called before lost_out is incremented */ | 866 | /* This must be called before lost_out is incremented */ |
867 | static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) | 867 | static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) |
868 | { | 868 | { |
869 | if ((tp->retransmit_skb_hint == NULL) || | 869 | if (!tp->retransmit_skb_hint || |
870 | before(TCP_SKB_CB(skb)->seq, | 870 | before(TCP_SKB_CB(skb)->seq, |
871 | TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) | 871 | TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) |
872 | tp->retransmit_skb_hint = skb; | 872 | tp->retransmit_skb_hint = skb; |
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk, | |||
1256 | fack_count += pcount; | 1256 | fack_count += pcount; |
1257 | 1257 | ||
1258 | /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ | 1258 | /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ |
1259 | if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && | 1259 | if (!tcp_is_fack(tp) && tp->lost_skb_hint && |
1260 | before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) | 1260 | before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) |
1261 | tp->lost_cnt_hint += pcount; | 1261 | tp->lost_cnt_hint += pcount; |
1262 | 1262 | ||
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, | |||
1535 | if (!before(TCP_SKB_CB(skb)->seq, end_seq)) | 1535 | if (!before(TCP_SKB_CB(skb)->seq, end_seq)) |
1536 | break; | 1536 | break; |
1537 | 1537 | ||
1538 | if ((next_dup != NULL) && | 1538 | if (next_dup && |
1539 | before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { | 1539 | before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { |
1540 | in_sack = tcp_match_skb_to_sack(sk, skb, | 1540 | in_sack = tcp_match_skb_to_sack(sk, skb, |
1541 | next_dup->start_seq, | 1541 | next_dup->start_seq, |
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, | |||
1551 | if (in_sack <= 0) { | 1551 | if (in_sack <= 0) { |
1552 | tmp = tcp_shift_skb_data(sk, skb, state, | 1552 | tmp = tcp_shift_skb_data(sk, skb, state, |
1553 | start_seq, end_seq, dup_sack); | 1553 | start_seq, end_seq, dup_sack); |
1554 | if (tmp != NULL) { | 1554 | if (tmp) { |
1555 | if (tmp != skb) { | 1555 | if (tmp != skb) { |
1556 | skb = tmp; | 1556 | skb = tmp; |
1557 | continue; | 1557 | continue; |
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, | |||
1614 | struct tcp_sacktag_state *state, | 1614 | struct tcp_sacktag_state *state, |
1615 | u32 skip_to_seq) | 1615 | u32 skip_to_seq) |
1616 | { | 1616 | { |
1617 | if (next_dup == NULL) | 1617 | if (!next_dup) |
1618 | return skb; | 1618 | return skb; |
1619 | 1619 | ||
1620 | if (before(next_dup->start_seq, skip_to_seq)) { | 1620 | if (before(next_dup->start_seq, skip_to_seq)) { |
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, | |||
1783 | if (tcp_highest_sack_seq(tp) == cache->end_seq) { | 1783 | if (tcp_highest_sack_seq(tp) == cache->end_seq) { |
1784 | /* ...but better entrypoint exists! */ | 1784 | /* ...but better entrypoint exists! */ |
1785 | skb = tcp_highest_sack(sk); | 1785 | skb = tcp_highest_sack(sk); |
1786 | if (skb == NULL) | 1786 | if (!skb) |
1787 | break; | 1787 | break; |
1788 | state.fack_count = tp->fackets_out; | 1788 | state.fack_count = tp->fackets_out; |
1789 | cache++; | 1789 | cache++; |
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, | |||
1798 | 1798 | ||
1799 | if (!before(start_seq, tcp_highest_sack_seq(tp))) { | 1799 | if (!before(start_seq, tcp_highest_sack_seq(tp))) { |
1800 | skb = tcp_highest_sack(sk); | 1800 | skb = tcp_highest_sack(sk); |
1801 | if (skb == NULL) | 1801 | if (!skb) |
1802 | break; | 1802 | break; |
1803 | state.fack_count = tp->fackets_out; | 1803 | state.fack_count = tp->fackets_out; |
1804 | } | 1804 | } |
@@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3105 | if (!first_ackt.v64) | 3105 | if (!first_ackt.v64) |
3106 | first_ackt = last_ackt; | 3106 | first_ackt = last_ackt; |
3107 | 3107 | ||
3108 | if (!(sacked & TCPCB_SACKED_ACKED)) | 3108 | if (!(sacked & TCPCB_SACKED_ACKED)) { |
3109 | reord = min(pkts_acked, reord); | 3109 | reord = min(pkts_acked, reord); |
3110 | if (!after(scb->end_seq, tp->high_seq)) | 3110 | if (!after(scb->end_seq, tp->high_seq)) |
3111 | flag |= FLAG_ORIG_SACK_ACKED; | 3111 | flag |= FLAG_ORIG_SACK_ACKED; |
3112 | } | ||
3112 | } | 3113 | } |
3113 | 3114 | ||
3114 | if (sacked & TCPCB_SACKED_ACKED) | 3115 | if (sacked & TCPCB_SACKED_ACKED) |
@@ -3602,6 +3603,23 @@ old_ack: | |||
3602 | return 0; | 3603 | return 0; |
3603 | } | 3604 | } |
3604 | 3605 | ||
3606 | static void tcp_parse_fastopen_option(int len, const unsigned char *cookie, | ||
3607 | bool syn, struct tcp_fastopen_cookie *foc, | ||
3608 | bool exp_opt) | ||
3609 | { | ||
3610 | /* Valid only in SYN or SYN-ACK with an even length. */ | ||
3611 | if (!foc || !syn || len < 0 || (len & 1)) | ||
3612 | return; | ||
3613 | |||
3614 | if (len >= TCP_FASTOPEN_COOKIE_MIN && | ||
3615 | len <= TCP_FASTOPEN_COOKIE_MAX) | ||
3616 | memcpy(foc->val, cookie, len); | ||
3617 | else if (len != 0) | ||
3618 | len = -1; | ||
3619 | foc->len = len; | ||
3620 | foc->exp = exp_opt; | ||
3621 | } | ||
3622 | |||
3605 | /* Look for tcp options. Normally only called on SYN and SYNACK packets. | 3623 | /* Look for tcp options. Normally only called on SYN and SYNACK packets. |
3606 | * But, this can also be called on packets in the established flow when | 3624 | * But, this can also be called on packets in the established flow when |
3607 | * the fast version below fails. | 3625 | * the fast version below fails. |
@@ -3691,21 +3709,22 @@ void tcp_parse_options(const struct sk_buff *skb, | |||
3691 | */ | 3709 | */ |
3692 | break; | 3710 | break; |
3693 | #endif | 3711 | #endif |
3712 | case TCPOPT_FASTOPEN: | ||
3713 | tcp_parse_fastopen_option( | ||
3714 | opsize - TCPOLEN_FASTOPEN_BASE, | ||
3715 | ptr, th->syn, foc, false); | ||
3716 | break; | ||
3717 | |||
3694 | case TCPOPT_EXP: | 3718 | case TCPOPT_EXP: |
3695 | /* Fast Open option shares code 254 using a | 3719 | /* Fast Open option shares code 254 using a |
3696 | * 16 bits magic number. It's valid only in | 3720 | * 16 bits magic number. |
3697 | * SYN or SYN-ACK with an even size. | ||
3698 | */ | 3721 | */ |
3699 | if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || | 3722 | if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE && |
3700 | get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || | 3723 | get_unaligned_be16(ptr) == |
3701 | foc == NULL || !th->syn || (opsize & 1)) | 3724 | TCPOPT_FASTOPEN_MAGIC) |
3702 | break; | 3725 | tcp_parse_fastopen_option(opsize - |
3703 | foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; | 3726 | TCPOLEN_EXP_FASTOPEN_BASE, |
3704 | if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && | 3727 | ptr + 2, th->syn, foc, true); |
3705 | foc->len <= TCP_FASTOPEN_COOKIE_MAX) | ||
3706 | memcpy(foc->val, ptr + 2, foc->len); | ||
3707 | else if (foc->len != 0) | ||
3708 | foc->len = -1; | ||
3709 | break; | 3728 | break; |
3710 | 3729 | ||
3711 | } | 3730 | } |
@@ -4669,7 +4688,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
4669 | struct sk_buff *head; | 4688 | struct sk_buff *head; |
4670 | u32 start, end; | 4689 | u32 start, end; |
4671 | 4690 | ||
4672 | if (skb == NULL) | 4691 | if (!skb) |
4673 | return; | 4692 | return; |
4674 | 4693 | ||
4675 | start = TCP_SKB_CB(skb)->seq; | 4694 | start = TCP_SKB_CB(skb)->seq; |
@@ -5124,7 +5143,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5124 | { | 5143 | { |
5125 | struct tcp_sock *tp = tcp_sk(sk); | 5144 | struct tcp_sock *tp = tcp_sk(sk); |
5126 | 5145 | ||
5127 | if (unlikely(sk->sk_rx_dst == NULL)) | 5146 | if (unlikely(!sk->sk_rx_dst)) |
5128 | inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); | 5147 | inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); |
5129 | /* | 5148 | /* |
5130 | * Header prediction. | 5149 | * Header prediction. |
@@ -5321,7 +5340,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) | |||
5321 | 5340 | ||
5322 | tcp_set_state(sk, TCP_ESTABLISHED); | 5341 | tcp_set_state(sk, TCP_ESTABLISHED); |
5323 | 5342 | ||
5324 | if (skb != NULL) { | 5343 | if (skb) { |
5325 | icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); | 5344 | icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); |
5326 | security_inet_conn_established(sk, skb); | 5345 | security_inet_conn_established(sk, skb); |
5327 | } | 5346 | } |
@@ -5359,8 +5378,8 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, | |||
5359 | { | 5378 | { |
5360 | struct tcp_sock *tp = tcp_sk(sk); | 5379 | struct tcp_sock *tp = tcp_sk(sk); |
5361 | struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; | 5380 | struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; |
5362 | u16 mss = tp->rx_opt.mss_clamp; | 5381 | u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; |
5363 | bool syn_drop; | 5382 | bool syn_drop = false; |
5364 | 5383 | ||
5365 | if (mss == tp->rx_opt.user_mss) { | 5384 | if (mss == tp->rx_opt.user_mss) { |
5366 | struct tcp_options_received opt; | 5385 | struct tcp_options_received opt; |
@@ -5372,16 +5391,25 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, | |||
5372 | mss = opt.mss_clamp; | 5391 | mss = opt.mss_clamp; |
5373 | } | 5392 | } |
5374 | 5393 | ||
5375 | if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */ | 5394 | if (!tp->syn_fastopen) { |
5395 | /* Ignore an unsolicited cookie */ | ||
5376 | cookie->len = -1; | 5396 | cookie->len = -1; |
5397 | } else if (tp->total_retrans) { | ||
5398 | /* SYN timed out and the SYN-ACK neither has a cookie nor | ||
5399 | * acknowledges data. Presumably the remote received only | ||
5400 | * the retransmitted (regular) SYNs: either the original | ||
5401 | * SYN-data or the corresponding SYN-ACK was dropped. | ||
5402 | */ | ||
5403 | syn_drop = (cookie->len < 0 && data); | ||
5404 | } else if (cookie->len < 0 && !tp->syn_data) { | ||
5405 | /* We requested a cookie but didn't get it. If we did not use | ||
5406 | * the (old) exp opt format then try so next time (try_exp=1). | ||
5407 | * Otherwise we go back to use the RFC7413 opt (try_exp=2). | ||
5408 | */ | ||
5409 | try_exp = tp->syn_fastopen_exp ? 2 : 1; | ||
5410 | } | ||
5377 | 5411 | ||
5378 | /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably | 5412 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); |
5379 | * the remote receives only the retransmitted (regular) SYNs: either | ||
5380 | * the original SYN-data or the corresponding SYN-ACK is lost. | ||
5381 | */ | ||
5382 | syn_drop = (cookie->len <= 0 && data && tp->total_retrans); | ||
5383 | |||
5384 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); | ||
5385 | 5413 | ||
5386 | if (data) { /* Retransmit unacked data in SYN */ | 5414 | if (data) { /* Retransmit unacked data in SYN */ |
5387 | tcp_for_write_queue_from(data, sk) { | 5415 | tcp_for_write_queue_from(data, sk) { |
@@ -5690,11 +5718,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5690 | } | 5718 | } |
5691 | 5719 | ||
5692 | req = tp->fastopen_rsk; | 5720 | req = tp->fastopen_rsk; |
5693 | if (req != NULL) { | 5721 | if (req) { |
5694 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && | 5722 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
5695 | sk->sk_state != TCP_FIN_WAIT1); | 5723 | sk->sk_state != TCP_FIN_WAIT1); |
5696 | 5724 | ||
5697 | if (tcp_check_req(sk, skb, req, true) == NULL) | 5725 | if (!tcp_check_req(sk, skb, req, true)) |
5698 | goto discard; | 5726 | goto discard; |
5699 | } | 5727 | } |
5700 | 5728 | ||
@@ -5780,7 +5808,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5780 | * ACK we have received, this would have acknowledged | 5808 | * ACK we have received, this would have acknowledged |
5781 | * our SYNACK so stop the SYNACK timer. | 5809 | * our SYNACK so stop the SYNACK timer. |
5782 | */ | 5810 | */ |
5783 | if (req != NULL) { | 5811 | if (req) { |
5784 | /* Return RST if ack_seq is invalid. | 5812 | /* Return RST if ack_seq is invalid. |
5785 | * Note that RFC793 only says to generate a | 5813 | * Note that RFC793 only says to generate a |
5786 | * DUPACK for it but for TCP Fast Open it seems | 5814 | * DUPACK for it but for TCP Fast Open it seems |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5aababa20a21..560f9571f7c4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -122,7 +122,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) | |||
122 | and use initial timestamp retrieved from peer table. | 122 | and use initial timestamp retrieved from peer table. |
123 | */ | 123 | */ |
124 | if (tcptw->tw_ts_recent_stamp && | 124 | if (tcptw->tw_ts_recent_stamp && |
125 | (twp == NULL || (sysctl_tcp_tw_reuse && | 125 | (!twp || (sysctl_tcp_tw_reuse && |
126 | get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { | 126 | get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { |
127 | tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; | 127 | tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; |
128 | if (tp->write_seq == 0) | 128 | if (tp->write_seq == 0) |
@@ -494,7 +494,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
494 | /* Only in fast or simultaneous open. If a fast open socket is | 494 | /* Only in fast or simultaneous open. If a fast open socket is |
495 | * is already accepted it is treated as a connected one below. | 495 | * is already accepted it is treated as a connected one below. |
496 | */ | 496 | */ |
497 | if (fastopen && fastopen->sk == NULL) | 497 | if (fastopen && !fastopen->sk) |
498 | break; | 498 | break; |
499 | 499 | ||
500 | if (!sock_owned_by_user(sk)) { | 500 | if (!sock_owned_by_user(sk)) { |
@@ -1305,7 +1305,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1305 | /* Copy over the MD5 key from the original socket */ | 1305 | /* Copy over the MD5 key from the original socket */ |
1306 | key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, | 1306 | key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, |
1307 | AF_INET); | 1307 | AF_INET); |
1308 | if (key != NULL) { | 1308 | if (key) { |
1309 | /* | 1309 | /* |
1310 | * We're using one, so create a matching key | 1310 | * We're using one, so create a matching key |
1311 | * on the newsk structure. If we fail to get | 1311 | * on the newsk structure. If we fail to get |
@@ -1390,7 +1390,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1390 | sk_mark_napi_id(sk, skb); | 1390 | sk_mark_napi_id(sk, skb); |
1391 | if (dst) { | 1391 | if (dst) { |
1392 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || | 1392 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || |
1393 | dst->ops->check(dst, 0) == NULL) { | 1393 | !dst->ops->check(dst, 0)) { |
1394 | dst_release(dst); | 1394 | dst_release(dst); |
1395 | sk->sk_rx_dst = NULL; | 1395 | sk->sk_rx_dst = NULL; |
1396 | } | 1396 | } |
@@ -1469,7 +1469,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) | |||
1469 | skb->sk = sk; | 1469 | skb->sk = sk; |
1470 | skb->destructor = sock_edemux; | 1470 | skb->destructor = sock_edemux; |
1471 | if (sk_fullsock(sk)) { | 1471 | if (sk_fullsock(sk)) { |
1472 | struct dst_entry *dst = sk->sk_rx_dst; | 1472 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
1473 | 1473 | ||
1474 | if (dst) | 1474 | if (dst) |
1475 | dst = dst_check(dst, 0); | 1475 | dst = dst_check(dst, 0); |
@@ -1797,7 +1797,7 @@ void tcp_v4_destroy_sock(struct sock *sk) | |||
1797 | if (inet_csk(sk)->icsk_bind_hash) | 1797 | if (inet_csk(sk)->icsk_bind_hash) |
1798 | inet_put_port(sk); | 1798 | inet_put_port(sk); |
1799 | 1799 | ||
1800 | BUG_ON(tp->fastopen_rsk != NULL); | 1800 | BUG_ON(tp->fastopen_rsk); |
1801 | 1801 | ||
1802 | /* If socket is aborted during connect operation */ | 1802 | /* If socket is aborted during connect operation */ |
1803 | tcp_free_fastopen_req(tp); | 1803 | tcp_free_fastopen_req(tp); |
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 71ec14c87579..a51d63a43e33 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c | |||
@@ -28,7 +28,8 @@ static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *s | |||
28 | 28 | ||
29 | struct tcp_fastopen_metrics { | 29 | struct tcp_fastopen_metrics { |
30 | u16 mss; | 30 | u16 mss; |
31 | u16 syn_loss:10; /* Recurring Fast Open SYN losses */ | 31 | u16 syn_loss:10, /* Recurring Fast Open SYN losses */ |
32 | try_exp:2; /* Request w/ exp. option (once) */ | ||
32 | unsigned long last_syn_loss; /* Last Fast Open SYN loss */ | 33 | unsigned long last_syn_loss; /* Last Fast Open SYN loss */ |
33 | struct tcp_fastopen_cookie cookie; | 34 | struct tcp_fastopen_cookie cookie; |
34 | }; | 35 | }; |
@@ -131,6 +132,8 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, | |||
131 | if (fastopen_clear) { | 132 | if (fastopen_clear) { |
132 | tm->tcpm_fastopen.mss = 0; | 133 | tm->tcpm_fastopen.mss = 0; |
133 | tm->tcpm_fastopen.syn_loss = 0; | 134 | tm->tcpm_fastopen.syn_loss = 0; |
135 | tm->tcpm_fastopen.try_exp = 0; | ||
136 | tm->tcpm_fastopen.cookie.exp = false; | ||
134 | tm->tcpm_fastopen.cookie.len = 0; | 137 | tm->tcpm_fastopen.cookie.len = 0; |
135 | } | 138 | } |
136 | } | 139 | } |
@@ -505,7 +508,7 @@ void tcp_init_metrics(struct sock *sk) | |||
505 | struct tcp_metrics_block *tm; | 508 | struct tcp_metrics_block *tm; |
506 | u32 val, crtt = 0; /* cached RTT scaled by 8 */ | 509 | u32 val, crtt = 0; /* cached RTT scaled by 8 */ |
507 | 510 | ||
508 | if (dst == NULL) | 511 | if (!dst) |
509 | goto reset; | 512 | goto reset; |
510 | 513 | ||
511 | dst_confirm(dst); | 514 | dst_confirm(dst); |
@@ -713,6 +716,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, | |||
713 | if (tfom->mss) | 716 | if (tfom->mss) |
714 | *mss = tfom->mss; | 717 | *mss = tfom->mss; |
715 | *cookie = tfom->cookie; | 718 | *cookie = tfom->cookie; |
719 | if (cookie->len <= 0 && tfom->try_exp == 1) | ||
720 | cookie->exp = true; | ||
716 | *syn_loss = tfom->syn_loss; | 721 | *syn_loss = tfom->syn_loss; |
717 | *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0; | 722 | *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0; |
718 | } while (read_seqretry(&fastopen_seqlock, seq)); | 723 | } while (read_seqretry(&fastopen_seqlock, seq)); |
@@ -721,7 +726,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, | |||
721 | } | 726 | } |
722 | 727 | ||
723 | void tcp_fastopen_cache_set(struct sock *sk, u16 mss, | 728 | void tcp_fastopen_cache_set(struct sock *sk, u16 mss, |
724 | struct tcp_fastopen_cookie *cookie, bool syn_lost) | 729 | struct tcp_fastopen_cookie *cookie, bool syn_lost, |
730 | u16 try_exp) | ||
725 | { | 731 | { |
726 | struct dst_entry *dst = __sk_dst_get(sk); | 732 | struct dst_entry *dst = __sk_dst_get(sk); |
727 | struct tcp_metrics_block *tm; | 733 | struct tcp_metrics_block *tm; |
@@ -738,6 +744,9 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss, | |||
738 | tfom->mss = mss; | 744 | tfom->mss = mss; |
739 | if (cookie && cookie->len > 0) | 745 | if (cookie && cookie->len > 0) |
740 | tfom->cookie = *cookie; | 746 | tfom->cookie = *cookie; |
747 | else if (try_exp > tfom->try_exp && | ||
748 | tfom->cookie.len <= 0 && !tfom->cookie.exp) | ||
749 | tfom->try_exp = try_exp; | ||
741 | if (syn_lost) { | 750 | if (syn_lost) { |
742 | ++tfom->syn_loss; | 751 | ++tfom->syn_loss; |
743 | tfom->last_syn_loss = jiffies; | 752 | tfom->last_syn_loss = jiffies; |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 274e96fb369b..d7003911c894 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -294,7 +294,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
294 | if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) | 294 | if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) |
295 | tw = inet_twsk_alloc(sk, state); | 295 | tw = inet_twsk_alloc(sk, state); |
296 | 296 | ||
297 | if (tw != NULL) { | 297 | if (tw) { |
298 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | 298 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
299 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); | 299 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); |
300 | struct inet_sock *inet = inet_sk(sk); | 300 | struct inet_sock *inet = inet_sk(sk); |
@@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
332 | struct tcp_md5sig_key *key; | 332 | struct tcp_md5sig_key *key; |
333 | tcptw->tw_md5_key = NULL; | 333 | tcptw->tw_md5_key = NULL; |
334 | key = tp->af_specific->md5_lookup(sk, sk); | 334 | key = tp->af_specific->md5_lookup(sk, sk); |
335 | if (key != NULL) { | 335 | if (key) { |
336 | tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); | 336 | tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); |
337 | if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()) | 337 | if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()) |
338 | BUG(); | 338 | BUG(); |
@@ -454,7 +454,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
454 | { | 454 | { |
455 | struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); | 455 | struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); |
456 | 456 | ||
457 | if (newsk != NULL) { | 457 | if (newsk) { |
458 | const struct inet_request_sock *ireq = inet_rsk(req); | 458 | const struct inet_request_sock *ireq = inet_rsk(req); |
459 | struct tcp_request_sock *treq = tcp_rsk(req); | 459 | struct tcp_request_sock *treq = tcp_rsk(req); |
460 | struct inet_connection_sock *newicsk = inet_csk(newsk); | 460 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
@@ -763,7 +763,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
763 | * socket is created, wait for troubles. | 763 | * socket is created, wait for troubles. |
764 | */ | 764 | */ |
765 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | 765 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); |
766 | if (child == NULL) | 766 | if (!child) |
767 | goto listen_overflow; | 767 | goto listen_overflow; |
768 | 768 | ||
769 | inet_csk_reqsk_queue_unlink(sk, req); | 769 | inet_csk_reqsk_queue_unlink(sk, req); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2e69b8d16e68..e662d85d1635 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -518,17 +518,26 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | |||
518 | 518 | ||
519 | if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { | 519 | if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { |
520 | struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; | 520 | struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; |
521 | u8 *p = (u8 *)ptr; | ||
522 | u32 len; /* Fast Open option length */ | ||
523 | |||
524 | if (foc->exp) { | ||
525 | len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; | ||
526 | *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) | | ||
527 | TCPOPT_FASTOPEN_MAGIC); | ||
528 | p += TCPOLEN_EXP_FASTOPEN_BASE; | ||
529 | } else { | ||
530 | len = TCPOLEN_FASTOPEN_BASE + foc->len; | ||
531 | *p++ = TCPOPT_FASTOPEN; | ||
532 | *p++ = len; | ||
533 | } | ||
521 | 534 | ||
522 | *ptr++ = htonl((TCPOPT_EXP << 24) | | 535 | memcpy(p, foc->val, foc->len); |
523 | ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) | | 536 | if ((len & 3) == 2) { |
524 | TCPOPT_FASTOPEN_MAGIC); | 537 | p[foc->len] = TCPOPT_NOP; |
525 | 538 | p[foc->len + 1] = TCPOPT_NOP; | |
526 | memcpy(ptr, foc->val, foc->len); | ||
527 | if ((foc->len & 3) == 2) { | ||
528 | u8 *align = ((u8 *)ptr) + foc->len; | ||
529 | align[0] = align[1] = TCPOPT_NOP; | ||
530 | } | 539 | } |
531 | ptr += (foc->len + 3) >> 2; | 540 | ptr += (len + 3) >> 2; |
532 | } | 541 | } |
533 | } | 542 | } |
534 | 543 | ||
@@ -565,7 +574,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
565 | opts->mss = tcp_advertise_mss(sk); | 574 | opts->mss = tcp_advertise_mss(sk); |
566 | remaining -= TCPOLEN_MSS_ALIGNED; | 575 | remaining -= TCPOLEN_MSS_ALIGNED; |
567 | 576 | ||
568 | if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { | 577 | if (likely(sysctl_tcp_timestamps && !*md5)) { |
569 | opts->options |= OPTION_TS; | 578 | opts->options |= OPTION_TS; |
570 | opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; | 579 | opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; |
571 | opts->tsecr = tp->rx_opt.ts_recent; | 580 | opts->tsecr = tp->rx_opt.ts_recent; |
@@ -583,13 +592,17 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
583 | } | 592 | } |
584 | 593 | ||
585 | if (fastopen && fastopen->cookie.len >= 0) { | 594 | if (fastopen && fastopen->cookie.len >= 0) { |
586 | u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len; | 595 | u32 need = fastopen->cookie.len; |
596 | |||
597 | need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : | ||
598 | TCPOLEN_FASTOPEN_BASE; | ||
587 | need = (need + 3) & ~3U; /* Align to 32 bits */ | 599 | need = (need + 3) & ~3U; /* Align to 32 bits */ |
588 | if (remaining >= need) { | 600 | if (remaining >= need) { |
589 | opts->options |= OPTION_FAST_OPEN_COOKIE; | 601 | opts->options |= OPTION_FAST_OPEN_COOKIE; |
590 | opts->fastopen_cookie = &fastopen->cookie; | 602 | opts->fastopen_cookie = &fastopen->cookie; |
591 | remaining -= need; | 603 | remaining -= need; |
592 | tp->syn_fastopen = 1; | 604 | tp->syn_fastopen = 1; |
605 | tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; | ||
593 | } | 606 | } |
594 | } | 607 | } |
595 | 608 | ||
@@ -642,7 +655,10 @@ static unsigned int tcp_synack_options(struct sock *sk, | |||
642 | remaining -= TCPOLEN_SACKPERM_ALIGNED; | 655 | remaining -= TCPOLEN_SACKPERM_ALIGNED; |
643 | } | 656 | } |
644 | if (foc != NULL && foc->len >= 0) { | 657 | if (foc != NULL && foc->len >= 0) { |
645 | u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; | 658 | u32 need = foc->len; |
659 | |||
660 | need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : | ||
661 | TCPOLEN_FASTOPEN_BASE; | ||
646 | need = (need + 3) & ~3U; /* Align to 32 bits */ | 662 | need = (need + 3) & ~3U; /* Align to 32 bits */ |
647 | if (remaining >= need) { | 663 | if (remaining >= need) { |
648 | opts->options |= OPTION_FAST_OPEN_COOKIE; | 664 | opts->options |= OPTION_FAST_OPEN_COOKIE; |
@@ -1148,7 +1164,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, | |||
1148 | 1164 | ||
1149 | /* Get a new skb... force flag on. */ | 1165 | /* Get a new skb... force flag on. */ |
1150 | buff = sk_stream_alloc_skb(sk, nsize, gfp); | 1166 | buff = sk_stream_alloc_skb(sk, nsize, gfp); |
1151 | if (buff == NULL) | 1167 | if (!buff) |
1152 | return -ENOMEM; /* We'll just try again later. */ | 1168 | return -ENOMEM; /* We'll just try again later. */ |
1153 | 1169 | ||
1154 | sk->sk_wmem_queued += buff->truesize; | 1170 | sk->sk_wmem_queued += buff->truesize; |
@@ -1707,7 +1723,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
1707 | return tcp_fragment(sk, skb, len, mss_now, gfp); | 1723 | return tcp_fragment(sk, skb, len, mss_now, gfp); |
1708 | 1724 | ||
1709 | buff = sk_stream_alloc_skb(sk, 0, gfp); | 1725 | buff = sk_stream_alloc_skb(sk, 0, gfp); |
1710 | if (unlikely(buff == NULL)) | 1726 | if (unlikely(!buff)) |
1711 | return -ENOMEM; | 1727 | return -ENOMEM; |
1712 | 1728 | ||
1713 | sk->sk_wmem_queued += buff->truesize; | 1729 | sk->sk_wmem_queued += buff->truesize; |
@@ -1925,7 +1941,8 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1925 | } | 1941 | } |
1926 | 1942 | ||
1927 | /* We're allowed to probe. Build it now. */ | 1943 | /* We're allowed to probe. Build it now. */ |
1928 | if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) | 1944 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC); |
1945 | if (!nskb) | ||
1929 | return -1; | 1946 | return -1; |
1930 | sk->sk_wmem_queued += nskb->truesize; | 1947 | sk->sk_wmem_queued += nskb->truesize; |
1931 | sk_mem_charge(sk, nskb->truesize); | 1948 | sk_mem_charge(sk, nskb->truesize); |
@@ -2223,7 +2240,7 @@ void tcp_send_loss_probe(struct sock *sk) | |||
2223 | int mss = tcp_current_mss(sk); | 2240 | int mss = tcp_current_mss(sk); |
2224 | int err = -1; | 2241 | int err = -1; |
2225 | 2242 | ||
2226 | if (tcp_send_head(sk) != NULL) { | 2243 | if (tcp_send_head(sk)) { |
2227 | err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); | 2244 | err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); |
2228 | goto rearm_timer; | 2245 | goto rearm_timer; |
2229 | } | 2246 | } |
@@ -2733,7 +2750,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2733 | if (skb == tcp_send_head(sk)) | 2750 | if (skb == tcp_send_head(sk)) |
2734 | break; | 2751 | break; |
2735 | /* we could do better than to assign each time */ | 2752 | /* we could do better than to assign each time */ |
2736 | if (hole == NULL) | 2753 | if (!hole) |
2737 | tp->retransmit_skb_hint = skb; | 2754 | tp->retransmit_skb_hint = skb; |
2738 | 2755 | ||
2739 | /* Assume this retransmit will generate | 2756 | /* Assume this retransmit will generate |
@@ -2757,7 +2774,7 @@ begin_fwd: | |||
2757 | if (!tcp_can_forward_retransmit(sk)) | 2774 | if (!tcp_can_forward_retransmit(sk)) |
2758 | break; | 2775 | break; |
2759 | /* Backtrack if necessary to non-L'ed skb */ | 2776 | /* Backtrack if necessary to non-L'ed skb */ |
2760 | if (hole != NULL) { | 2777 | if (hole) { |
2761 | skb = hole; | 2778 | skb = hole; |
2762 | hole = NULL; | 2779 | hole = NULL; |
2763 | } | 2780 | } |
@@ -2765,7 +2782,7 @@ begin_fwd: | |||
2765 | goto begin_fwd; | 2782 | goto begin_fwd; |
2766 | 2783 | ||
2767 | } else if (!(sacked & TCPCB_LOST)) { | 2784 | } else if (!(sacked & TCPCB_LOST)) { |
2768 | if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) | 2785 | if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) |
2769 | hole = skb; | 2786 | hole = skb; |
2770 | continue; | 2787 | continue; |
2771 | 2788 | ||
@@ -2810,7 +2827,7 @@ void tcp_send_fin(struct sock *sk) | |||
2810 | */ | 2827 | */ |
2811 | mss_now = tcp_current_mss(sk); | 2828 | mss_now = tcp_current_mss(sk); |
2812 | 2829 | ||
2813 | if (tcp_send_head(sk) != NULL) { | 2830 | if (tcp_send_head(sk)) { |
2814 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; | 2831 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; |
2815 | TCP_SKB_CB(skb)->end_seq++; | 2832 | TCP_SKB_CB(skb)->end_seq++; |
2816 | tp->write_seq++; | 2833 | tp->write_seq++; |
@@ -2868,14 +2885,14 @@ int tcp_send_synack(struct sock *sk) | |||
2868 | struct sk_buff *skb; | 2885 | struct sk_buff *skb; |
2869 | 2886 | ||
2870 | skb = tcp_write_queue_head(sk); | 2887 | skb = tcp_write_queue_head(sk); |
2871 | if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { | 2888 | if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
2872 | pr_debug("%s: wrong queue state\n", __func__); | 2889 | pr_debug("%s: wrong queue state\n", __func__); |
2873 | return -EFAULT; | 2890 | return -EFAULT; |
2874 | } | 2891 | } |
2875 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { | 2892 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { |
2876 | if (skb_cloned(skb)) { | 2893 | if (skb_cloned(skb)) { |
2877 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); | 2894 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); |
2878 | if (nskb == NULL) | 2895 | if (!nskb) |
2879 | return -ENOMEM; | 2896 | return -ENOMEM; |
2880 | tcp_unlink_write_queue(skb, sk); | 2897 | tcp_unlink_write_queue(skb, sk); |
2881 | __skb_header_release(nskb); | 2898 | __skb_header_release(nskb); |
@@ -3014,7 +3031,7 @@ static void tcp_connect_init(struct sock *sk) | |||
3014 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); | 3031 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); |
3015 | 3032 | ||
3016 | #ifdef CONFIG_TCP_MD5SIG | 3033 | #ifdef CONFIG_TCP_MD5SIG |
3017 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) | 3034 | if (tp->af_specific->md5_lookup(sk, sk)) |
3018 | tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | 3035 | tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; |
3019 | #endif | 3036 | #endif |
3020 | 3037 | ||
@@ -3300,7 +3317,7 @@ void tcp_send_ack(struct sock *sk) | |||
3300 | * sock. | 3317 | * sock. |
3301 | */ | 3318 | */ |
3302 | buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); | 3319 | buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3303 | if (buff == NULL) { | 3320 | if (!buff) { |
3304 | inet_csk_schedule_ack(sk); | 3321 | inet_csk_schedule_ack(sk); |
3305 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; | 3322 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
3306 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | 3323 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
@@ -3344,7 +3361,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) | |||
3344 | 3361 | ||
3345 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ | 3362 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ |
3346 | skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); | 3363 | skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3347 | if (skb == NULL) | 3364 | if (!skb) |
3348 | return -1; | 3365 | return -1; |
3349 | 3366 | ||
3350 | /* Reserve space for headers and set control bits. */ | 3367 | /* Reserve space for headers and set control bits. */ |
@@ -3375,8 +3392,8 @@ int tcp_write_wakeup(struct sock *sk) | |||
3375 | if (sk->sk_state == TCP_CLOSE) | 3392 | if (sk->sk_state == TCP_CLOSE) |
3376 | return -1; | 3393 | return -1; |
3377 | 3394 | ||
3378 | if ((skb = tcp_send_head(sk)) != NULL && | 3395 | skb = tcp_send_head(sk); |
3379 | before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { | 3396 | if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { |
3380 | int err; | 3397 | int err; |
3381 | unsigned int mss = tcp_current_mss(sk); | 3398 | unsigned int mss = tcp_current_mss(sk); |
3382 | unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 3399 | unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 2568fd282873..8c65dc147d8b 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -167,7 +167,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
167 | if (icsk->icsk_retransmits) { | 167 | if (icsk->icsk_retransmits) { |
168 | dst_negative_advice(sk); | 168 | dst_negative_advice(sk); |
169 | if (tp->syn_fastopen || tp->syn_data) | 169 | if (tp->syn_fastopen || tp->syn_data) |
170 | tcp_fastopen_cache_set(sk, 0, NULL, true); | 170 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); |
171 | if (tp->syn_data) | 171 | if (tp->syn_data) |
172 | NET_INC_STATS_BH(sock_net(sk), | 172 | NET_INC_STATS_BH(sock_net(sk), |
173 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | 173 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 294af16633af..2162fc6ce1c1 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -633,7 +633,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
633 | 633 | ||
634 | sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, | 634 | sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, |
635 | iph->saddr, uh->source, skb->dev->ifindex, udptable); | 635 | iph->saddr, uh->source, skb->dev->ifindex, udptable); |
636 | if (sk == NULL) { | 636 | if (!sk) { |
637 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); | 637 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
638 | return; /* No socket for error */ | 638 | return; /* No socket for error */ |
639 | } | 639 | } |
@@ -1011,7 +1011,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
1011 | if (connected) | 1011 | if (connected) |
1012 | rt = (struct rtable *)sk_dst_check(sk, 0); | 1012 | rt = (struct rtable *)sk_dst_check(sk, 0); |
1013 | 1013 | ||
1014 | if (rt == NULL) { | 1014 | if (!rt) { |
1015 | struct net *net = sock_net(sk); | 1015 | struct net *net = sock_net(sk); |
1016 | 1016 | ||
1017 | fl4 = &fl4_stack; | 1017 | fl4 = &fl4_stack; |
@@ -1522,7 +1522,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1522 | 1522 | ||
1523 | /* if we're overly short, let UDP handle it */ | 1523 | /* if we're overly short, let UDP handle it */ |
1524 | encap_rcv = ACCESS_ONCE(up->encap_rcv); | 1524 | encap_rcv = ACCESS_ONCE(up->encap_rcv); |
1525 | if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { | 1525 | if (skb->len > sizeof(struct udphdr) && encap_rcv) { |
1526 | int ret; | 1526 | int ret; |
1527 | 1527 | ||
1528 | /* Verify checksum before giving to encap */ | 1528 | /* Verify checksum before giving to encap */ |
@@ -1619,7 +1619,7 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
1619 | 1619 | ||
1620 | for (i = 0; i < count; i++) { | 1620 | for (i = 0; i < count; i++) { |
1621 | sk = stack[i]; | 1621 | sk = stack[i]; |
1622 | if (likely(skb1 == NULL)) | 1622 | if (likely(!skb1)) |
1623 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | 1623 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); |
1624 | 1624 | ||
1625 | if (!skb1) { | 1625 | if (!skb1) { |
@@ -1802,7 +1802,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
1802 | saddr, daddr, udptable, proto); | 1802 | saddr, daddr, udptable, proto); |
1803 | 1803 | ||
1804 | sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); | 1804 | sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); |
1805 | if (sk != NULL) { | 1805 | if (sk) { |
1806 | int ret; | 1806 | int ret; |
1807 | 1807 | ||
1808 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | 1808 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) |
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 2dbfc1f1f7b3..b763c39ae1d7 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c | |||
@@ -58,7 +58,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, | |||
58 | goto out_nosk; | 58 | goto out_nosk; |
59 | 59 | ||
60 | err = -ENOENT; | 60 | err = -ENOENT; |
61 | if (sk == NULL) | 61 | if (!sk) |
62 | goto out_nosk; | 62 | goto out_nosk; |
63 | 63 | ||
64 | err = sock_diag_check_cookie(sk, req->id.idiag_cookie); | 64 | err = sock_diag_check_cookie(sk, req->id.idiag_cookie); |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 4915d8284a86..f9386160cbee 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -285,7 +285,7 @@ void udp_del_offload(struct udp_offload *uo) | |||
285 | pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); | 285 | pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); |
286 | unlock: | 286 | unlock: |
287 | spin_unlock(&udp_offload_lock); | 287 | spin_unlock(&udp_offload_lock); |
288 | if (uo_priv != NULL) | 288 | if (uo_priv) |
289 | call_rcu(&uo_priv->rcu, udp_offload_free_routine); | 289 | call_rcu(&uo_priv->rcu, udp_offload_free_routine); |
290 | } | 290 | } |
291 | EXPORT_SYMBOL(udp_del_offload); | 291 | EXPORT_SYMBOL(udp_del_offload); |
@@ -394,7 +394,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff) | |||
394 | break; | 394 | break; |
395 | } | 395 | } |
396 | 396 | ||
397 | if (uo_priv != NULL) { | 397 | if (uo_priv) { |
398 | NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; | 398 | NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; |
399 | err = uo_priv->offload->callbacks.gro_complete(skb, | 399 | err = uo_priv->offload->callbacks.gro_complete(skb, |
400 | nhoff + sizeof(struct udphdr), | 400 | nhoff + sizeof(struct udphdr), |
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index c83b35485056..6bb98cc193c9 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c | |||
@@ -75,7 +75,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, | |||
75 | } | 75 | } |
76 | EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); | 76 | EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); |
77 | 77 | ||
78 | int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb, | 78 | int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, |
79 | __be32 src, __be32 dst, __u8 tos, __u8 ttl, | 79 | __be32 src, __be32 dst, __u8 tos, __u8 ttl, |
80 | __be16 df, __be16 src_port, __be16 dst_port, | 80 | __be16 df, __be16 src_port, __be16 dst_port, |
81 | bool xnet, bool nocheck) | 81 | bool xnet, bool nocheck) |
@@ -92,7 +92,7 @@ int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb, | |||
92 | 92 | ||
93 | udp_set_csum(nocheck, skb, src, dst, skb->len); | 93 | udp_set_csum(nocheck, skb, src, dst, skb->len); |
94 | 94 | ||
95 | return iptunnel_xmit(skb->sk, rt, skb, src, dst, IPPROTO_UDP, | 95 | return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, |
96 | tos, ttl, df, xnet); | 96 | tos, ttl, df, xnet); |
97 | } | 97 | } |
98 | EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); | 98 | EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); |
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index aac6197b7a71..60b032f58ccc 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -22,9 +22,9 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) | |||
22 | return xfrm4_extract_header(skb); | 22 | return xfrm4_extract_header(skb); |
23 | } | 23 | } |
24 | 24 | ||
25 | static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) | 25 | static inline int xfrm4_rcv_encap_finish(struct sock *sk, struct sk_buff *skb) |
26 | { | 26 | { |
27 | if (skb_dst(skb) == NULL) { | 27 | if (!skb_dst(skb)) { |
28 | const struct iphdr *iph = ip_hdr(skb); | 28 | const struct iphdr *iph = ip_hdr(skb); |
29 | 29 | ||
30 | if (ip_route_input_noref(skb, iph->daddr, iph->saddr, | 30 | if (ip_route_input_noref(skb, iph->daddr, iph->saddr, |
@@ -52,7 +52,8 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async) | |||
52 | iph->tot_len = htons(skb->len); | 52 | iph->tot_len = htons(skb->len); |
53 | ip_send_check(iph); | 53 | ip_send_check(iph); |
54 | 54 | ||
55 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | 55 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb, |
56 | skb->dev, NULL, | ||
56 | xfrm4_rcv_encap_finish); | 57 | xfrm4_rcv_encap_finish); |
57 | return 0; | 58 | return 0; |
58 | } | 59 | } |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index dab73813cb92..2878dbfffeb7 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
69 | } | 69 | } |
70 | EXPORT_SYMBOL(xfrm4_prepare_output); | 70 | EXPORT_SYMBOL(xfrm4_prepare_output); |
71 | 71 | ||
72 | int xfrm4_output_finish(struct sk_buff *skb) | 72 | int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb) |
73 | { | 73 | { |
74 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | 74 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
75 | 75 | ||
@@ -77,26 +77,26 @@ int xfrm4_output_finish(struct sk_buff *skb) | |||
77 | IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; | 77 | IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; |
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | return xfrm_output(skb); | 80 | return xfrm_output(sk, skb); |
81 | } | 81 | } |
82 | 82 | ||
83 | static int __xfrm4_output(struct sk_buff *skb) | 83 | static int __xfrm4_output(struct sock *sk, struct sk_buff *skb) |
84 | { | 84 | { |
85 | struct xfrm_state *x = skb_dst(skb)->xfrm; | 85 | struct xfrm_state *x = skb_dst(skb)->xfrm; |
86 | 86 | ||
87 | #ifdef CONFIG_NETFILTER | 87 | #ifdef CONFIG_NETFILTER |
88 | if (!x) { | 88 | if (!x) { |
89 | IPCB(skb)->flags |= IPSKB_REROUTED; | 89 | IPCB(skb)->flags |= IPSKB_REROUTED; |
90 | return dst_output(skb); | 90 | return dst_output_sk(sk, skb); |
91 | } | 91 | } |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | return x->outer_mode->afinfo->output_finish(skb); | 94 | return x->outer_mode->afinfo->output_finish(sk, skb); |
95 | } | 95 | } |
96 | 96 | ||
97 | int xfrm4_output(struct sock *sk, struct sk_buff *skb) | 97 | int xfrm4_output(struct sock *sk, struct sk_buff *skb) |
98 | { | 98 | { |
99 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, | 99 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, |
100 | NULL, skb_dst(skb)->dev, __xfrm4_output, | 100 | NULL, skb_dst(skb)->dev, __xfrm4_output, |
101 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 101 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
102 | } | 102 | } |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index c224c856247b..bff69746e05f 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -298,7 +298,7 @@ static void __net_exit xfrm4_net_exit(struct net *net) | |||
298 | { | 298 | { |
299 | struct ctl_table *table; | 299 | struct ctl_table *table; |
300 | 300 | ||
301 | if (net->ipv4.xfrm4_hdr == NULL) | 301 | if (!net->ipv4.xfrm4_hdr) |
302 | return; | 302 | return; |
303 | 303 | ||
304 | table = net->ipv4.xfrm4_hdr->ctl_table_arg; | 304 | table = net->ipv4.xfrm4_hdr->ctl_table_arg; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5c9e94cb1b2c..37b70e82bff8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -4858,8 +4858,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, | |||
4858 | (dev->addr_len && | 4858 | (dev->addr_len && |
4859 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || | 4859 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || |
4860 | nla_put_u32(skb, IFLA_MTU, dev->mtu) || | 4860 | nla_put_u32(skb, IFLA_MTU, dev->mtu) || |
4861 | (dev->ifindex != dev->iflink && | 4861 | (dev->ifindex != dev_get_iflink(dev) && |
4862 | nla_put_u32(skb, IFLA_LINK, dev->iflink))) | 4862 | nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) |
4863 | goto nla_put_failure; | 4863 | goto nla_put_failure; |
4864 | protoinfo = nla_nest_start(skb, IFLA_PROTINFO); | 4864 | protoinfo = nla_nest_start(skb, IFLA_PROTINFO); |
4865 | if (!protoinfo) | 4865 | if (!protoinfo) |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 61fb184b818d..2367a16eae58 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -315,7 +315,9 @@ out_fib6_rules_ops: | |||
315 | 315 | ||
316 | static void __net_exit fib6_rules_net_exit(struct net *net) | 316 | static void __net_exit fib6_rules_net_exit(struct net *net) |
317 | { | 317 | { |
318 | rtnl_lock(); | ||
318 | fib_rules_unregister(net->ipv6.fib6_rules_ops); | 319 | fib_rules_unregister(net->ipv6.fib6_rules_ops); |
320 | rtnl_unlock(); | ||
319 | } | 321 | } |
320 | 322 | ||
321 | static struct pernet_operations fib6_rules_net_ops = { | 323 | static struct pernet_operations fib6_rules_net_ops = { |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 0f4e73da14e4..b5e6cc1d4a73 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -760,7 +760,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
760 | 760 | ||
761 | skb_set_inner_protocol(skb, protocol); | 761 | skb_set_inner_protocol(skb, protocol); |
762 | 762 | ||
763 | ip6tunnel_xmit(skb, dev); | 763 | ip6tunnel_xmit(NULL, skb, dev); |
764 | if (ndst) | 764 | if (ndst) |
765 | ip6_tnl_dst_store(tunnel, ndst); | 765 | ip6_tnl_dst_store(tunnel, ndst); |
766 | return 0; | 766 | return 0; |
@@ -1216,6 +1216,7 @@ static const struct net_device_ops ip6gre_netdev_ops = { | |||
1216 | .ndo_do_ioctl = ip6gre_tunnel_ioctl, | 1216 | .ndo_do_ioctl = ip6gre_tunnel_ioctl, |
1217 | .ndo_change_mtu = ip6gre_tunnel_change_mtu, | 1217 | .ndo_change_mtu = ip6gre_tunnel_change_mtu, |
1218 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 1218 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
1219 | .ndo_get_iflink = ip6_tnl_get_iflink, | ||
1219 | }; | 1220 | }; |
1220 | 1221 | ||
1221 | static void ip6gre_dev_free(struct net_device *dev) | 1222 | static void ip6gre_dev_free(struct net_device *dev) |
@@ -1238,7 +1239,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev) | |||
1238 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | 1239 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
1239 | dev->mtu -= 8; | 1240 | dev->mtu -= 8; |
1240 | dev->flags |= IFF_NOARP; | 1241 | dev->flags |= IFF_NOARP; |
1241 | dev->iflink = 0; | ||
1242 | dev->addr_len = sizeof(struct in6_addr); | 1242 | dev->addr_len = sizeof(struct in6_addr); |
1243 | netif_keep_dst(dev); | 1243 | netif_keep_dst(dev); |
1244 | } | 1244 | } |
@@ -1270,8 +1270,6 @@ static int ip6gre_tunnel_init(struct net_device *dev) | |||
1270 | u64_stats_init(&ip6gre_tunnel_stats->syncp); | 1270 | u64_stats_init(&ip6gre_tunnel_stats->syncp); |
1271 | } | 1271 | } |
1272 | 1272 | ||
1273 | dev->iflink = tunnel->parms.link; | ||
1274 | |||
1275 | return 0; | 1273 | return 0; |
1276 | } | 1274 | } |
1277 | 1275 | ||
@@ -1480,8 +1478,6 @@ static int ip6gre_tap_init(struct net_device *dev) | |||
1480 | if (!dev->tstats) | 1478 | if (!dev->tstats) |
1481 | return -ENOMEM; | 1479 | return -ENOMEM; |
1482 | 1480 | ||
1483 | dev->iflink = tunnel->parms.link; | ||
1484 | |||
1485 | return 0; | 1481 | return 0; |
1486 | } | 1482 | } |
1487 | 1483 | ||
@@ -1493,6 +1489,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = { | |||
1493 | .ndo_validate_addr = eth_validate_addr, | 1489 | .ndo_validate_addr = eth_validate_addr, |
1494 | .ndo_change_mtu = ip6gre_tunnel_change_mtu, | 1490 | .ndo_change_mtu = ip6gre_tunnel_change_mtu, |
1495 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 1491 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
1492 | .ndo_get_iflink = ip6_tnl_get_iflink, | ||
1496 | }; | 1493 | }; |
1497 | 1494 | ||
1498 | static void ip6gre_tap_setup(struct net_device *dev) | 1495 | static void ip6gre_tap_setup(struct net_device *dev) |
@@ -1503,7 +1500,6 @@ static void ip6gre_tap_setup(struct net_device *dev) | |||
1503 | dev->netdev_ops = &ip6gre_tap_netdev_ops; | 1500 | dev->netdev_ops = &ip6gre_tap_netdev_ops; |
1504 | dev->destructor = ip6gre_dev_free; | 1501 | dev->destructor = ip6gre_dev_free; |
1505 | 1502 | ||
1506 | dev->iflink = 0; | ||
1507 | dev->features |= NETIF_F_NETNS_LOCAL; | 1503 | dev->features |= NETIF_F_NETNS_LOCAL; |
1508 | } | 1504 | } |
1509 | 1505 | ||
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index fb97f7f8d4ed..f2e464eba5ef 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -46,8 +46,7 @@ | |||
46 | #include <net/xfrm.h> | 46 | #include <net/xfrm.h> |
47 | #include <net/inet_ecn.h> | 47 | #include <net/inet_ecn.h> |
48 | 48 | ||
49 | 49 | int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb) | |
50 | int ip6_rcv_finish(struct sk_buff *skb) | ||
51 | { | 50 | { |
52 | if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { | 51 | if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { |
53 | const struct inet6_protocol *ipprot; | 52 | const struct inet6_protocol *ipprot; |
@@ -183,7 +182,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
183 | /* Must drop socket now because of tproxy. */ | 182 | /* Must drop socket now because of tproxy. */ |
184 | skb_orphan(skb); | 183 | skb_orphan(skb); |
185 | 184 | ||
186 | return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL, | 185 | return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb, |
186 | dev, NULL, | ||
187 | ip6_rcv_finish); | 187 | ip6_rcv_finish); |
188 | err: | 188 | err: |
189 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); | 189 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); |
@@ -198,7 +198,7 @@ drop: | |||
198 | */ | 198 | */ |
199 | 199 | ||
200 | 200 | ||
201 | static int ip6_input_finish(struct sk_buff *skb) | 201 | static int ip6_input_finish(struct sock *sk, struct sk_buff *skb) |
202 | { | 202 | { |
203 | struct net *net = dev_net(skb_dst(skb)->dev); | 203 | struct net *net = dev_net(skb_dst(skb)->dev); |
204 | const struct inet6_protocol *ipprot; | 204 | const struct inet6_protocol *ipprot; |
@@ -277,7 +277,8 @@ discard: | |||
277 | 277 | ||
278 | int ip6_input(struct sk_buff *skb) | 278 | int ip6_input(struct sk_buff *skb) |
279 | { | 279 | { |
280 | return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL, | 280 | return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, NULL, skb, |
281 | skb->dev, NULL, | ||
281 | ip6_input_finish); | 282 | ip6_input_finish); |
282 | } | 283 | } |
283 | 284 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 84c58da10f5c..7fde1f265c90 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -56,7 +56,7 @@ | |||
56 | #include <net/checksum.h> | 56 | #include <net/checksum.h> |
57 | #include <linux/mroute6.h> | 57 | #include <linux/mroute6.h> |
58 | 58 | ||
59 | static int ip6_finish_output2(struct sk_buff *skb) | 59 | static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb) |
60 | { | 60 | { |
61 | struct dst_entry *dst = skb_dst(skb); | 61 | struct dst_entry *dst = skb_dst(skb); |
62 | struct net_device *dev = dst->dev; | 62 | struct net_device *dev = dst->dev; |
@@ -70,7 +70,7 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
70 | if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { | 70 | if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { |
71 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); | 71 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
72 | 72 | ||
73 | if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && | 73 | if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) && |
74 | ((mroute6_socket(dev_net(dev), skb) && | 74 | ((mroute6_socket(dev_net(dev), skb) && |
75 | !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || | 75 | !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || |
76 | ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, | 76 | ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, |
@@ -82,7 +82,7 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
82 | */ | 82 | */ |
83 | if (newskb) | 83 | if (newskb) |
84 | NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, | 84 | NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, |
85 | newskb, NULL, newskb->dev, | 85 | sk, newskb, NULL, newskb->dev, |
86 | dev_loopback_xmit); | 86 | dev_loopback_xmit); |
87 | 87 | ||
88 | if (ipv6_hdr(skb)->hop_limit == 0) { | 88 | if (ipv6_hdr(skb)->hop_limit == 0) { |
@@ -122,14 +122,14 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
122 | return -EINVAL; | 122 | return -EINVAL; |
123 | } | 123 | } |
124 | 124 | ||
125 | static int ip6_finish_output(struct sk_buff *skb) | 125 | static int ip6_finish_output(struct sock *sk, struct sk_buff *skb) |
126 | { | 126 | { |
127 | if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || | 127 | if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || |
128 | dst_allfrag(skb_dst(skb)) || | 128 | dst_allfrag(skb_dst(skb)) || |
129 | (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size)) | 129 | (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size)) |
130 | return ip6_fragment(skb, ip6_finish_output2); | 130 | return ip6_fragment(sk, skb, ip6_finish_output2); |
131 | else | 131 | else |
132 | return ip6_finish_output2(skb); | 132 | return ip6_finish_output2(sk, skb); |
133 | } | 133 | } |
134 | 134 | ||
135 | int ip6_output(struct sock *sk, struct sk_buff *skb) | 135 | int ip6_output(struct sock *sk, struct sk_buff *skb) |
@@ -143,7 +143,8 @@ int ip6_output(struct sock *sk, struct sk_buff *skb) | |||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev, | 146 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb, |
147 | NULL, dev, | ||
147 | ip6_finish_output, | 148 | ip6_finish_output, |
148 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); | 149 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); |
149 | } | 150 | } |
@@ -223,8 +224,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
223 | if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { | 224 | if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { |
224 | IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), | 225 | IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), |
225 | IPSTATS_MIB_OUT, skb->len); | 226 | IPSTATS_MIB_OUT, skb->len); |
226 | return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, | 227 | return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, |
227 | dst->dev, dst_output); | 228 | NULL, dst->dev, dst_output_sk); |
228 | } | 229 | } |
229 | 230 | ||
230 | skb->dev = dst->dev; | 231 | skb->dev = dst->dev; |
@@ -316,10 +317,10 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) | |||
316 | return 0; | 317 | return 0; |
317 | } | 318 | } |
318 | 319 | ||
319 | static inline int ip6_forward_finish(struct sk_buff *skb) | 320 | static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb) |
320 | { | 321 | { |
321 | skb_sender_cpu_clear(skb); | 322 | skb_sender_cpu_clear(skb); |
322 | return dst_output(skb); | 323 | return dst_output_sk(sk, skb); |
323 | } | 324 | } |
324 | 325 | ||
325 | static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) | 326 | static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) |
@@ -511,7 +512,8 @@ int ip6_forward(struct sk_buff *skb) | |||
511 | 512 | ||
512 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); | 513 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); |
513 | IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); | 514 | IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); |
514 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, | 515 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb, |
516 | skb->dev, dst->dev, | ||
515 | ip6_forward_finish); | 517 | ip6_forward_finish); |
516 | 518 | ||
517 | error: | 519 | error: |
@@ -538,11 +540,13 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
538 | skb_copy_secmark(to, from); | 540 | skb_copy_secmark(to, from); |
539 | } | 541 | } |
540 | 542 | ||
541 | int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | 543 | int ip6_fragment(struct sock *sk, struct sk_buff *skb, |
544 | int (*output)(struct sock *, struct sk_buff *)) | ||
542 | { | 545 | { |
543 | struct sk_buff *frag; | 546 | struct sk_buff *frag; |
544 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); | 547 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); |
545 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; | 548 | struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? |
549 | inet6_sk(skb->sk) : NULL; | ||
546 | struct ipv6hdr *tmp_hdr; | 550 | struct ipv6hdr *tmp_hdr; |
547 | struct frag_hdr *fh; | 551 | struct frag_hdr *fh; |
548 | unsigned int mtu, hlen, left, len; | 552 | unsigned int mtu, hlen, left, len; |
@@ -666,7 +670,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
666 | ip6_copy_metadata(frag, skb); | 670 | ip6_copy_metadata(frag, skb); |
667 | } | 671 | } |
668 | 672 | ||
669 | err = output(skb); | 673 | err = output(sk, skb); |
670 | if (!err) | 674 | if (!err) |
671 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), | 675 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), |
672 | IPSTATS_MIB_FRAGCREATES); | 676 | IPSTATS_MIB_FRAGCREATES); |
@@ -799,7 +803,7 @@ slow_path: | |||
799 | /* | 803 | /* |
800 | * Put this fragment into the sending queue. | 804 | * Put this fragment into the sending queue. |
801 | */ | 805 | */ |
802 | err = output(frag); | 806 | err = output(sk, frag); |
803 | if (err) | 807 | if (err) |
804 | goto fail; | 808 | goto fail; |
805 | 809 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9bd85f0dff69..5cafd92c2312 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1100,7 +1100,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1100 | ipv6h->nexthdr = proto; | 1100 | ipv6h->nexthdr = proto; |
1101 | ipv6h->saddr = fl6->saddr; | 1101 | ipv6h->saddr = fl6->saddr; |
1102 | ipv6h->daddr = fl6->daddr; | 1102 | ipv6h->daddr = fl6->daddr; |
1103 | ip6tunnel_xmit(skb, dev); | 1103 | ip6tunnel_xmit(NULL, skb, dev); |
1104 | if (ndst) | 1104 | if (ndst) |
1105 | ip6_tnl_dst_store(t, ndst); | 1105 | ip6_tnl_dst_store(t, ndst); |
1106 | return 0; | 1106 | return 0; |
@@ -1264,8 +1264,6 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) | |||
1264 | else | 1264 | else |
1265 | dev->flags &= ~IFF_POINTOPOINT; | 1265 | dev->flags &= ~IFF_POINTOPOINT; |
1266 | 1266 | ||
1267 | dev->iflink = p->link; | ||
1268 | |||
1269 | if (p->flags & IP6_TNL_F_CAP_XMIT) { | 1267 | if (p->flags & IP6_TNL_F_CAP_XMIT) { |
1270 | int strict = (ipv6_addr_type(&p->raddr) & | 1268 | int strict = (ipv6_addr_type(&p->raddr) & |
1271 | (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); | 1269 | (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); |
@@ -1517,6 +1515,13 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) | |||
1517 | return 0; | 1515 | return 0; |
1518 | } | 1516 | } |
1519 | 1517 | ||
1518 | int ip6_tnl_get_iflink(const struct net_device *dev) | ||
1519 | { | ||
1520 | struct ip6_tnl *t = netdev_priv(dev); | ||
1521 | |||
1522 | return t->parms.link; | ||
1523 | } | ||
1524 | EXPORT_SYMBOL(ip6_tnl_get_iflink); | ||
1520 | 1525 | ||
1521 | static const struct net_device_ops ip6_tnl_netdev_ops = { | 1526 | static const struct net_device_ops ip6_tnl_netdev_ops = { |
1522 | .ndo_init = ip6_tnl_dev_init, | 1527 | .ndo_init = ip6_tnl_dev_init, |
@@ -1525,6 +1530,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = { | |||
1525 | .ndo_do_ioctl = ip6_tnl_ioctl, | 1530 | .ndo_do_ioctl = ip6_tnl_ioctl, |
1526 | .ndo_change_mtu = ip6_tnl_change_mtu, | 1531 | .ndo_change_mtu = ip6_tnl_change_mtu, |
1527 | .ndo_get_stats = ip6_get_stats, | 1532 | .ndo_get_stats = ip6_get_stats, |
1533 | .ndo_get_iflink = ip6_tnl_get_iflink, | ||
1528 | }; | 1534 | }; |
1529 | 1535 | ||
1530 | 1536 | ||
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index 32d9b268e7d8..bba8903e871f 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c | |||
@@ -62,7 +62,8 @@ error: | |||
62 | } | 62 | } |
63 | EXPORT_SYMBOL_GPL(udp_sock_create6); | 63 | EXPORT_SYMBOL_GPL(udp_sock_create6); |
64 | 64 | ||
65 | int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, | 65 | int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, |
66 | struct sk_buff *skb, | ||
66 | struct net_device *dev, struct in6_addr *saddr, | 67 | struct net_device *dev, struct in6_addr *saddr, |
67 | struct in6_addr *daddr, | 68 | struct in6_addr *daddr, |
68 | __u8 prio, __u8 ttl, __be16 src_port, | 69 | __u8 prio, __u8 ttl, __be16 src_port, |
@@ -97,7 +98,7 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, | |||
97 | ip6h->daddr = *daddr; | 98 | ip6h->daddr = *daddr; |
98 | ip6h->saddr = *saddr; | 99 | ip6h->saddr = *saddr; |
99 | 100 | ||
100 | ip6tunnel_xmit(skb, dev); | 101 | ip6tunnel_xmit(sk, skb, dev); |
101 | return 0; | 102 | return 0; |
102 | } | 103 | } |
103 | EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb); | 104 | EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb); |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 53d90ed68905..b53148444e15 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -601,8 +601,6 @@ static void vti6_link_config(struct ip6_tnl *t) | |||
601 | dev->flags |= IFF_POINTOPOINT; | 601 | dev->flags |= IFF_POINTOPOINT; |
602 | else | 602 | else |
603 | dev->flags &= ~IFF_POINTOPOINT; | 603 | dev->flags &= ~IFF_POINTOPOINT; |
604 | |||
605 | dev->iflink = p->link; | ||
606 | } | 604 | } |
607 | 605 | ||
608 | /** | 606 | /** |
@@ -808,6 +806,7 @@ static const struct net_device_ops vti6_netdev_ops = { | |||
808 | .ndo_do_ioctl = vti6_ioctl, | 806 | .ndo_do_ioctl = vti6_ioctl, |
809 | .ndo_change_mtu = vti6_change_mtu, | 807 | .ndo_change_mtu = vti6_change_mtu, |
810 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 808 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
809 | .ndo_get_iflink = ip6_tnl_get_iflink, | ||
811 | }; | 810 | }; |
812 | 811 | ||
813 | /** | 812 | /** |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index caf6b99374e6..74ceb73c1c9a 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -250,7 +250,7 @@ static int __net_init ip6mr_rules_init(struct net *net) | |||
250 | return 0; | 250 | return 0; |
251 | 251 | ||
252 | err2: | 252 | err2: |
253 | kfree(mrt); | 253 | ip6mr_free_table(mrt); |
254 | err1: | 254 | err1: |
255 | fib_rules_unregister(ops); | 255 | fib_rules_unregister(ops); |
256 | return err; | 256 | return err; |
@@ -265,8 +265,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net) | |||
265 | list_del(&mrt->list); | 265 | list_del(&mrt->list); |
266 | ip6mr_free_table(mrt); | 266 | ip6mr_free_table(mrt); |
267 | } | 267 | } |
268 | rtnl_unlock(); | ||
269 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | 268 | fib_rules_unregister(net->ipv6.mr6_rules_ops); |
269 | rtnl_unlock(); | ||
270 | } | 270 | } |
271 | #else | 271 | #else |
272 | #define ip6mr_for_each_table(mrt, net) \ | 272 | #define ip6mr_for_each_table(mrt, net) \ |
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) | |||
334 | 334 | ||
335 | static void ip6mr_free_table(struct mr6_table *mrt) | 335 | static void ip6mr_free_table(struct mr6_table *mrt) |
336 | { | 336 | { |
337 | del_timer(&mrt->ipmr_expire_timer); | 337 | del_timer_sync(&mrt->ipmr_expire_timer); |
338 | mroute_clean_tables(mrt); | 338 | mroute_clean_tables(mrt); |
339 | kfree(mrt); | 339 | kfree(mrt); |
340 | } | 340 | } |
@@ -718,8 +718,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, | |||
718 | return NETDEV_TX_OK; | 718 | return NETDEV_TX_OK; |
719 | } | 719 | } |
720 | 720 | ||
721 | static int reg_vif_get_iflink(const struct net_device *dev) | ||
722 | { | ||
723 | return 0; | ||
724 | } | ||
725 | |||
721 | static const struct net_device_ops reg_vif_netdev_ops = { | 726 | static const struct net_device_ops reg_vif_netdev_ops = { |
722 | .ndo_start_xmit = reg_vif_xmit, | 727 | .ndo_start_xmit = reg_vif_xmit, |
728 | .ndo_get_iflink = reg_vif_get_iflink, | ||
723 | }; | 729 | }; |
724 | 730 | ||
725 | static void reg_vif_setup(struct net_device *dev) | 731 | static void reg_vif_setup(struct net_device *dev) |
@@ -752,7 +758,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) | |||
752 | free_netdev(dev); | 758 | free_netdev(dev); |
753 | return NULL; | 759 | return NULL; |
754 | } | 760 | } |
755 | dev->iflink = 0; | ||
756 | 761 | ||
757 | if (dev_open(dev)) | 762 | if (dev_open(dev)) |
758 | goto failure; | 763 | goto failure; |
@@ -992,7 +997,7 @@ static int mif6_add(struct net *net, struct mr6_table *mrt, | |||
992 | v->pkt_out = 0; | 997 | v->pkt_out = 0; |
993 | v->link = dev->ifindex; | 998 | v->link = dev->ifindex; |
994 | if (v->flags & MIFF_REGISTER) | 999 | if (v->flags & MIFF_REGISTER) |
995 | v->link = dev->iflink; | 1000 | v->link = dev_get_iflink(dev); |
996 | 1001 | ||
997 | /* And finish update writing critical data */ | 1002 | /* And finish update writing critical data */ |
998 | write_lock_bh(&mrt_lock); | 1003 | write_lock_bh(&mrt_lock); |
@@ -1981,13 +1986,13 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) | |||
1981 | } | 1986 | } |
1982 | #endif | 1987 | #endif |
1983 | 1988 | ||
1984 | static inline int ip6mr_forward2_finish(struct sk_buff *skb) | 1989 | static inline int ip6mr_forward2_finish(struct sock *sk, struct sk_buff *skb) |
1985 | { | 1990 | { |
1986 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), | 1991 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), |
1987 | IPSTATS_MIB_OUTFORWDATAGRAMS); | 1992 | IPSTATS_MIB_OUTFORWDATAGRAMS); |
1988 | IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), | 1993 | IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), |
1989 | IPSTATS_MIB_OUTOCTETS, skb->len); | 1994 | IPSTATS_MIB_OUTOCTETS, skb->len); |
1990 | return dst_output(skb); | 1995 | return dst_output_sk(sk, skb); |
1991 | } | 1996 | } |
1992 | 1997 | ||
1993 | /* | 1998 | /* |
@@ -2059,7 +2064,8 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt, | |||
2059 | 2064 | ||
2060 | IP6CB(skb)->flags |= IP6SKB_FORWARDED; | 2065 | IP6CB(skb)->flags |= IP6SKB_FORWARDED; |
2061 | 2066 | ||
2062 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev, | 2067 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb, |
2068 | skb->dev, dev, | ||
2063 | ip6mr_forward2_finish); | 2069 | ip6mr_forward2_finish); |
2064 | 2070 | ||
2065 | out_free: | 2071 | out_free: |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index fac1f27e428e..083b2927fc67 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1644,8 +1644,9 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1644 | 1644 | ||
1645 | payload_len = skb->len; | 1645 | payload_len = skb->len; |
1646 | 1646 | ||
1647 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, | 1647 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, |
1648 | dst_output); | 1648 | net->ipv6.igmp_sk, skb, NULL, skb->dev, |
1649 | dst_output_sk); | ||
1649 | out: | 1650 | out: |
1650 | if (!err) { | 1651 | if (!err) { |
1651 | ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); | 1652 | ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); |
@@ -2007,8 +2008,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
2007 | } | 2008 | } |
2008 | 2009 | ||
2009 | skb_dst_set(skb, dst); | 2010 | skb_dst_set(skb, dst); |
2010 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, | 2011 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, |
2011 | dst_output); | 2012 | NULL, skb->dev, dst_output_sk); |
2012 | out: | 2013 | out: |
2013 | if (!err) { | 2014 | if (!err) { |
2014 | ICMP6MSGOUT_INC_STATS(net, idev, type); | 2015 | ICMP6MSGOUT_INC_STATS(net, idev, type); |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index c283827d60e2..96f153c0846b 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -463,8 +463,9 @@ static void ndisc_send_skb(struct sk_buff *skb, | |||
463 | idev = __in6_dev_get(dst->dev); | 463 | idev = __in6_dev_get(dst->dev); |
464 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); | 464 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); |
465 | 465 | ||
466 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, | 466 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, |
467 | dst_output); | 467 | NULL, dst->dev, |
468 | dst_output_sk); | ||
468 | if (!err) { | 469 | if (!err) { |
469 | ICMP6MSGOUT_INC_STATS(net, idev, type); | 470 | ICMP6MSGOUT_INC_STATS(net, idev, type); |
470 | ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); | 471 | ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); |
@@ -1225,7 +1226,14 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1225 | if (rt) | 1226 | if (rt) |
1226 | rt6_set_expires(rt, jiffies + (HZ * lifetime)); | 1227 | rt6_set_expires(rt, jiffies + (HZ * lifetime)); |
1227 | if (ra_msg->icmph.icmp6_hop_limit) { | 1228 | if (ra_msg->icmph.icmp6_hop_limit) { |
1228 | in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; | 1229 | /* Only set hop_limit on the interface if it is higher than |
1230 | * the current hop_limit. | ||
1231 | */ | ||
1232 | if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) { | ||
1233 | in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; | ||
1234 | } else { | ||
1235 | ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n"); | ||
1236 | } | ||
1229 | if (rt) | 1237 | if (rt) |
1230 | dst_metric_set(&rt->dst, RTAX_HOPLIMIT, | 1238 | dst_metric_set(&rt->dst, RTAX_HOPLIMIT, |
1231 | ra_msg->icmph.icmp6_hop_limit); | 1239 | ra_msg->icmph.icmp6_hop_limit); |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 398377a9d018..d958718b5031 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -84,7 +84,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb, | |||
84 | { | 84 | { |
85 | struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); | 85 | struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); |
86 | 86 | ||
87 | if (entry->hook == NF_INET_LOCAL_OUT) { | 87 | if (entry->state.hook == NF_INET_LOCAL_OUT) { |
88 | const struct ipv6hdr *iph = ipv6_hdr(skb); | 88 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
89 | 89 | ||
90 | rt_info->daddr = iph->daddr; | 90 | rt_info->daddr = iph->daddr; |
@@ -98,7 +98,7 @@ static int nf_ip6_reroute(struct sk_buff *skb, | |||
98 | { | 98 | { |
99 | struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); | 99 | struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); |
100 | 100 | ||
101 | if (entry->hook == NF_INET_LOCAL_OUT) { | 101 | if (entry->state.hook == NF_INET_LOCAL_OUT) { |
102 | const struct ipv6hdr *iph = ipv6_hdr(skb); | 102 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
103 | if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || | 103 | if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || |
104 | !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || | 104 | !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 83f59dc3cccc..1a732a1d3c8e 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -317,8 +317,7 @@ ip6t_next_entry(const struct ip6t_entry *entry) | |||
317 | unsigned int | 317 | unsigned int |
318 | ip6t_do_table(struct sk_buff *skb, | 318 | ip6t_do_table(struct sk_buff *skb, |
319 | unsigned int hook, | 319 | unsigned int hook, |
320 | const struct net_device *in, | 320 | const struct nf_hook_state *state, |
321 | const struct net_device *out, | ||
322 | struct xt_table *table) | 321 | struct xt_table *table) |
323 | { | 322 | { |
324 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 323 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
@@ -333,8 +332,8 @@ ip6t_do_table(struct sk_buff *skb, | |||
333 | unsigned int addend; | 332 | unsigned int addend; |
334 | 333 | ||
335 | /* Initialization */ | 334 | /* Initialization */ |
336 | indev = in ? in->name : nulldevname; | 335 | indev = state->in ? state->in->name : nulldevname; |
337 | outdev = out ? out->name : nulldevname; | 336 | outdev = state->out ? state->out->name : nulldevname; |
338 | /* We handle fragments by dealing with the first fragment as | 337 | /* We handle fragments by dealing with the first fragment as |
339 | * if it was a normal packet. All other fragments are treated | 338 | * if it was a normal packet. All other fragments are treated |
340 | * normally, except that they will NEVER match rules that ask | 339 | * normally, except that they will NEVER match rules that ask |
@@ -342,8 +341,8 @@ ip6t_do_table(struct sk_buff *skb, | |||
342 | * rule is also a fragment-specific rule, non-fragments won't | 341 | * rule is also a fragment-specific rule, non-fragments won't |
343 | * match it. */ | 342 | * match it. */ |
344 | acpar.hotdrop = false; | 343 | acpar.hotdrop = false; |
345 | acpar.in = in; | 344 | acpar.in = state->in; |
346 | acpar.out = out; | 345 | acpar.out = state->out; |
347 | acpar.family = NFPROTO_IPV6; | 346 | acpar.family = NFPROTO_IPV6; |
348 | acpar.hooknum = hook; | 347 | acpar.hooknum = hook; |
349 | 348 | ||
@@ -393,7 +392,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
393 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | 392 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
394 | /* The packet is traced: log it */ | 393 | /* The packet is traced: log it */ |
395 | if (unlikely(skb->nf_trace)) | 394 | if (unlikely(skb->nf_trace)) |
396 | trace_packet(skb, hook, in, out, | 395 | trace_packet(skb, hook, state->in, state->out, |
397 | table->name, private, e); | 396 | table->name, private, e); |
398 | #endif | 397 | #endif |
399 | /* Standard target? */ | 398 | /* Standard target? */ |
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index a0d17270117c..6edb7b106de7 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c | |||
@@ -315,11 +315,9 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) | |||
315 | 315 | ||
316 | static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops, | 316 | static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops, |
317 | struct sk_buff *skb, | 317 | struct sk_buff *skb, |
318 | const struct net_device *in, | 318 | const struct nf_hook_state *nhs) |
319 | const struct net_device *out, | ||
320 | int (*okfn)(struct sk_buff *)) | ||
321 | { | 319 | { |
322 | struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out)); | 320 | struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out)); |
323 | enum ip_conntrack_info ctinfo; | 321 | enum ip_conntrack_info ctinfo; |
324 | struct nf_conn *ct; | 322 | struct nf_conn *ct; |
325 | struct nf_conn_synproxy *synproxy; | 323 | struct nf_conn_synproxy *synproxy; |
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index ca7f6c128086..5c33d8abc077 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c | |||
@@ -33,13 +33,11 @@ static const struct xt_table packet_filter = { | |||
33 | /* The work comes in here from netfilter.c. */ | 33 | /* The work comes in here from netfilter.c. */ |
34 | static unsigned int | 34 | static unsigned int |
35 | ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 35 | ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
36 | const struct net_device *in, const struct net_device *out, | 36 | const struct nf_hook_state *state) |
37 | int (*okfn)(struct sk_buff *)) | ||
38 | { | 37 | { |
39 | const struct net *net = dev_net((in != NULL) ? in : out); | 38 | const struct net *net = dev_net(state->in ? state->in : state->out); |
40 | 39 | ||
41 | return ip6t_do_table(skb, ops->hooknum, in, out, | 40 | return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_filter); |
42 | net->ipv6.ip6table_filter); | ||
43 | } | 41 | } |
44 | 42 | ||
45 | static struct nf_hook_ops *filter_ops __read_mostly; | 43 | static struct nf_hook_ops *filter_ops __read_mostly; |
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 307bbb782d14..b551f5b79fe2 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c | |||
@@ -32,7 +32,7 @@ static const struct xt_table packet_mangler = { | |||
32 | }; | 32 | }; |
33 | 33 | ||
34 | static unsigned int | 34 | static unsigned int |
35 | ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) | 35 | ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) |
36 | { | 36 | { |
37 | unsigned int ret; | 37 | unsigned int ret; |
38 | struct in6_addr saddr, daddr; | 38 | struct in6_addr saddr, daddr; |
@@ -57,8 +57,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) | |||
57 | /* flowlabel and prio (includes version, which shouldn't change either */ | 57 | /* flowlabel and prio (includes version, which shouldn't change either */ |
58 | flowlabel = *((u_int32_t *)ipv6_hdr(skb)); | 58 | flowlabel = *((u_int32_t *)ipv6_hdr(skb)); |
59 | 59 | ||
60 | ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, | 60 | ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state, |
61 | dev_net(out)->ipv6.ip6table_mangle); | 61 | dev_net(state->out)->ipv6.ip6table_mangle); |
62 | 62 | ||
63 | if (ret != NF_DROP && ret != NF_STOLEN && | 63 | if (ret != NF_DROP && ret != NF_STOLEN && |
64 | (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) || | 64 | (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) || |
@@ -77,17 +77,16 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) | |||
77 | /* The work comes in here from netfilter.c. */ | 77 | /* The work comes in here from netfilter.c. */ |
78 | static unsigned int | 78 | static unsigned int |
79 | ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 79 | ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
80 | const struct net_device *in, const struct net_device *out, | 80 | const struct nf_hook_state *state) |
81 | int (*okfn)(struct sk_buff *)) | ||
82 | { | 81 | { |
83 | if (ops->hooknum == NF_INET_LOCAL_OUT) | 82 | if (ops->hooknum == NF_INET_LOCAL_OUT) |
84 | return ip6t_mangle_out(skb, out); | 83 | return ip6t_mangle_out(skb, state); |
85 | if (ops->hooknum == NF_INET_POST_ROUTING) | 84 | if (ops->hooknum == NF_INET_POST_ROUTING) |
86 | return ip6t_do_table(skb, ops->hooknum, in, out, | 85 | return ip6t_do_table(skb, ops->hooknum, state, |
87 | dev_net(out)->ipv6.ip6table_mangle); | 86 | dev_net(state->out)->ipv6.ip6table_mangle); |
88 | /* INPUT/FORWARD */ | 87 | /* INPUT/FORWARD */ |
89 | return ip6t_do_table(skb, ops->hooknum, in, out, | 88 | return ip6t_do_table(skb, ops->hooknum, state, |
90 | dev_net(in)->ipv6.ip6table_mangle); | 89 | dev_net(state->in)->ipv6.ip6table_mangle); |
91 | } | 90 | } |
92 | 91 | ||
93 | static struct nf_hook_ops *mangle_ops __read_mostly; | 92 | static struct nf_hook_ops *mangle_ops __read_mostly; |
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index b0634ac996b7..c3a7f7af0ed4 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c | |||
@@ -32,49 +32,40 @@ static const struct xt_table nf_nat_ipv6_table = { | |||
32 | 32 | ||
33 | static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops, | 33 | static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops, |
34 | struct sk_buff *skb, | 34 | struct sk_buff *skb, |
35 | const struct net_device *in, | 35 | const struct nf_hook_state *state, |
36 | const struct net_device *out, | ||
37 | struct nf_conn *ct) | 36 | struct nf_conn *ct) |
38 | { | 37 | { |
39 | struct net *net = nf_ct_net(ct); | 38 | struct net *net = nf_ct_net(ct); |
40 | 39 | ||
41 | return ip6t_do_table(skb, ops->hooknum, in, out, net->ipv6.ip6table_nat); | 40 | return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_nat); |
42 | } | 41 | } |
43 | 42 | ||
44 | static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops, | 43 | static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops, |
45 | struct sk_buff *skb, | 44 | struct sk_buff *skb, |
46 | const struct net_device *in, | 45 | const struct nf_hook_state *state) |
47 | const struct net_device *out, | ||
48 | int (*okfn)(struct sk_buff *)) | ||
49 | { | 46 | { |
50 | return nf_nat_ipv6_fn(ops, skb, in, out, ip6table_nat_do_chain); | 47 | return nf_nat_ipv6_fn(ops, skb, state, ip6table_nat_do_chain); |
51 | } | 48 | } |
52 | 49 | ||
53 | static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops, | 50 | static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops, |
54 | struct sk_buff *skb, | 51 | struct sk_buff *skb, |
55 | const struct net_device *in, | 52 | const struct nf_hook_state *state) |
56 | const struct net_device *out, | ||
57 | int (*okfn)(struct sk_buff *)) | ||
58 | { | 53 | { |
59 | return nf_nat_ipv6_in(ops, skb, in, out, ip6table_nat_do_chain); | 54 | return nf_nat_ipv6_in(ops, skb, state, ip6table_nat_do_chain); |
60 | } | 55 | } |
61 | 56 | ||
62 | static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops, | 57 | static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops, |
63 | struct sk_buff *skb, | 58 | struct sk_buff *skb, |
64 | const struct net_device *in, | 59 | const struct nf_hook_state *state) |
65 | const struct net_device *out, | ||
66 | int (*okfn)(struct sk_buff *)) | ||
67 | { | 60 | { |
68 | return nf_nat_ipv6_out(ops, skb, in, out, ip6table_nat_do_chain); | 61 | return nf_nat_ipv6_out(ops, skb, state, ip6table_nat_do_chain); |
69 | } | 62 | } |
70 | 63 | ||
71 | static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops, | 64 | static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops, |
72 | struct sk_buff *skb, | 65 | struct sk_buff *skb, |
73 | const struct net_device *in, | 66 | const struct nf_hook_state *state) |
74 | const struct net_device *out, | ||
75 | int (*okfn)(struct sk_buff *)) | ||
76 | { | 67 | { |
77 | return nf_nat_ipv6_local_fn(ops, skb, in, out, ip6table_nat_do_chain); | 68 | return nf_nat_ipv6_local_fn(ops, skb, state, ip6table_nat_do_chain); |
78 | } | 69 | } |
79 | 70 | ||
80 | static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { | 71 | static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { |
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index 5274740acecc..0b33caad2b69 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c | |||
@@ -20,13 +20,11 @@ static const struct xt_table packet_raw = { | |||
20 | /* The work comes in here from netfilter.c. */ | 20 | /* The work comes in here from netfilter.c. */ |
21 | static unsigned int | 21 | static unsigned int |
22 | ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 22 | ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
23 | const struct net_device *in, const struct net_device *out, | 23 | const struct nf_hook_state *state) |
24 | int (*okfn)(struct sk_buff *)) | ||
25 | { | 24 | { |
26 | const struct net *net = dev_net((in != NULL) ? in : out); | 25 | const struct net *net = dev_net(state->in ? state->in : state->out); |
27 | 26 | ||
28 | return ip6t_do_table(skb, ops->hooknum, in, out, | 27 | return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_raw); |
29 | net->ipv6.ip6table_raw); | ||
30 | } | 28 | } |
31 | 29 | ||
32 | static struct nf_hook_ops *rawtable_ops __read_mostly; | 30 | static struct nf_hook_ops *rawtable_ops __read_mostly; |
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c index ab3b0219ecfa..fcef83c25f7b 100644 --- a/net/ipv6/netfilter/ip6table_security.c +++ b/net/ipv6/netfilter/ip6table_security.c | |||
@@ -37,13 +37,11 @@ static const struct xt_table security_table = { | |||
37 | 37 | ||
38 | static unsigned int | 38 | static unsigned int |
39 | ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, | 39 | ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, |
40 | const struct net_device *in, | 40 | const struct nf_hook_state *state) |
41 | const struct net_device *out, | ||
42 | int (*okfn)(struct sk_buff *)) | ||
43 | { | 41 | { |
44 | const struct net *net = dev_net((in != NULL) ? in : out); | 42 | const struct net *net = dev_net(state->in ? state->in : state->out); |
45 | 43 | ||
46 | return ip6t_do_table(skb, ops->hooknum, in, out, | 44 | return ip6t_do_table(skb, ops->hooknum, state, |
47 | net->ipv6.ip6table_security); | 45 | net->ipv6.ip6table_security); |
48 | } | 46 | } |
49 | 47 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index fba91c6fc7ca..4ba0c34c627b 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -97,9 +97,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | |||
97 | 97 | ||
98 | static unsigned int ipv6_helper(const struct nf_hook_ops *ops, | 98 | static unsigned int ipv6_helper(const struct nf_hook_ops *ops, |
99 | struct sk_buff *skb, | 99 | struct sk_buff *skb, |
100 | const struct net_device *in, | 100 | const struct nf_hook_state *state) |
101 | const struct net_device *out, | ||
102 | int (*okfn)(struct sk_buff *)) | ||
103 | { | 101 | { |
104 | struct nf_conn *ct; | 102 | struct nf_conn *ct; |
105 | const struct nf_conn_help *help; | 103 | const struct nf_conn_help *help; |
@@ -135,9 +133,7 @@ static unsigned int ipv6_helper(const struct nf_hook_ops *ops, | |||
135 | 133 | ||
136 | static unsigned int ipv6_confirm(const struct nf_hook_ops *ops, | 134 | static unsigned int ipv6_confirm(const struct nf_hook_ops *ops, |
137 | struct sk_buff *skb, | 135 | struct sk_buff *skb, |
138 | const struct net_device *in, | 136 | const struct nf_hook_state *state) |
139 | const struct net_device *out, | ||
140 | int (*okfn)(struct sk_buff *)) | ||
141 | { | 137 | { |
142 | struct nf_conn *ct; | 138 | struct nf_conn *ct; |
143 | enum ip_conntrack_info ctinfo; | 139 | enum ip_conntrack_info ctinfo; |
@@ -171,25 +167,21 @@ out: | |||
171 | 167 | ||
172 | static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops, | 168 | static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops, |
173 | struct sk_buff *skb, | 169 | struct sk_buff *skb, |
174 | const struct net_device *in, | 170 | const struct nf_hook_state *state) |
175 | const struct net_device *out, | ||
176 | int (*okfn)(struct sk_buff *)) | ||
177 | { | 171 | { |
178 | return nf_conntrack_in(dev_net(in), PF_INET6, ops->hooknum, skb); | 172 | return nf_conntrack_in(dev_net(state->in), PF_INET6, ops->hooknum, skb); |
179 | } | 173 | } |
180 | 174 | ||
181 | static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops, | 175 | static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops, |
182 | struct sk_buff *skb, | 176 | struct sk_buff *skb, |
183 | const struct net_device *in, | 177 | const struct nf_hook_state *state) |
184 | const struct net_device *out, | ||
185 | int (*okfn)(struct sk_buff *)) | ||
186 | { | 178 | { |
187 | /* root is playing with raw sockets. */ | 179 | /* root is playing with raw sockets. */ |
188 | if (skb->len < sizeof(struct ipv6hdr)) { | 180 | if (skb->len < sizeof(struct ipv6hdr)) { |
189 | net_notice_ratelimited("ipv6_conntrack_local: packet too short\n"); | 181 | net_notice_ratelimited("ipv6_conntrack_local: packet too short\n"); |
190 | return NF_ACCEPT; | 182 | return NF_ACCEPT; |
191 | } | 183 | } |
192 | return nf_conntrack_in(dev_net(out), PF_INET6, ops->hooknum, skb); | 184 | return nf_conntrack_in(dev_net(state->out), PF_INET6, ops->hooknum, skb); |
193 | } | 185 | } |
194 | 186 | ||
195 | static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { | 187 | static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { |
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index e70382e4dfb5..a45db0b4785c 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | |||
@@ -54,9 +54,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | |||
54 | 54 | ||
55 | static unsigned int ipv6_defrag(const struct nf_hook_ops *ops, | 55 | static unsigned int ipv6_defrag(const struct nf_hook_ops *ops, |
56 | struct sk_buff *skb, | 56 | struct sk_buff *skb, |
57 | const struct net_device *in, | 57 | const struct nf_hook_state *state) |
58 | const struct net_device *out, | ||
59 | int (*okfn)(struct sk_buff *)) | ||
60 | { | 58 | { |
61 | struct sk_buff *reasm; | 59 | struct sk_buff *reasm; |
62 | 60 | ||
@@ -77,9 +75,9 @@ static unsigned int ipv6_defrag(const struct nf_hook_ops *ops, | |||
77 | 75 | ||
78 | nf_ct_frag6_consume_orig(reasm); | 76 | nf_ct_frag6_consume_orig(reasm); |
79 | 77 | ||
80 | NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, reasm, | 78 | NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, state->sk, reasm, |
81 | (struct net_device *) in, (struct net_device *) out, | 79 | state->in, state->out, |
82 | okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1); | 80 | state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1); |
83 | 81 | ||
84 | return NF_STOLEN; | 82 | return NF_STOLEN; |
85 | } | 83 | } |
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index c5812e1c1ffb..e76900e0aa92 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c | |||
@@ -263,11 +263,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation); | |||
263 | 263 | ||
264 | unsigned int | 264 | unsigned int |
265 | nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | 265 | nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, |
266 | const struct net_device *in, const struct net_device *out, | 266 | const struct nf_hook_state *state, |
267 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, | 267 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, |
268 | struct sk_buff *skb, | 268 | struct sk_buff *skb, |
269 | const struct net_device *in, | 269 | const struct nf_hook_state *state, |
270 | const struct net_device *out, | ||
271 | struct nf_conn *ct)) | 270 | struct nf_conn *ct)) |
272 | { | 271 | { |
273 | struct nf_conn *ct; | 272 | struct nf_conn *ct; |
@@ -318,7 +317,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
318 | if (!nf_nat_initialized(ct, maniptype)) { | 317 | if (!nf_nat_initialized(ct, maniptype)) { |
319 | unsigned int ret; | 318 | unsigned int ret; |
320 | 319 | ||
321 | ret = do_chain(ops, skb, in, out, ct); | 320 | ret = do_chain(ops, skb, state, ct); |
322 | if (ret != NF_ACCEPT) | 321 | if (ret != NF_ACCEPT) |
323 | return ret; | 322 | return ret; |
324 | 323 | ||
@@ -332,7 +331,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
332 | pr_debug("Already setup manip %s for ct %p\n", | 331 | pr_debug("Already setup manip %s for ct %p\n", |
333 | maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", | 332 | maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", |
334 | ct); | 333 | ct); |
335 | if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) | 334 | if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out)) |
336 | goto oif_changed; | 335 | goto oif_changed; |
337 | } | 336 | } |
338 | break; | 337 | break; |
@@ -341,7 +340,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
341 | /* ESTABLISHED */ | 340 | /* ESTABLISHED */ |
342 | NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || | 341 | NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || |
343 | ctinfo == IP_CT_ESTABLISHED_REPLY); | 342 | ctinfo == IP_CT_ESTABLISHED_REPLY); |
344 | if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) | 343 | if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out)) |
345 | goto oif_changed; | 344 | goto oif_changed; |
346 | } | 345 | } |
347 | 346 | ||
@@ -355,17 +354,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn); | |||
355 | 354 | ||
356 | unsigned int | 355 | unsigned int |
357 | nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, | 356 | nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, |
358 | const struct net_device *in, const struct net_device *out, | 357 | const struct nf_hook_state *state, |
359 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, | 358 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, |
360 | struct sk_buff *skb, | 359 | struct sk_buff *skb, |
361 | const struct net_device *in, | 360 | const struct nf_hook_state *state, |
362 | const struct net_device *out, | ||
363 | struct nf_conn *ct)) | 361 | struct nf_conn *ct)) |
364 | { | 362 | { |
365 | unsigned int ret; | 363 | unsigned int ret; |
366 | struct in6_addr daddr = ipv6_hdr(skb)->daddr; | 364 | struct in6_addr daddr = ipv6_hdr(skb)->daddr; |
367 | 365 | ||
368 | ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); | 366 | ret = nf_nat_ipv6_fn(ops, skb, state, do_chain); |
369 | if (ret != NF_DROP && ret != NF_STOLEN && | 367 | if (ret != NF_DROP && ret != NF_STOLEN && |
370 | ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) | 368 | ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) |
371 | skb_dst_drop(skb); | 369 | skb_dst_drop(skb); |
@@ -376,11 +374,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_in); | |||
376 | 374 | ||
377 | unsigned int | 375 | unsigned int |
378 | nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, | 376 | nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, |
379 | const struct net_device *in, const struct net_device *out, | 377 | const struct nf_hook_state *state, |
380 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, | 378 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, |
381 | struct sk_buff *skb, | 379 | struct sk_buff *skb, |
382 | const struct net_device *in, | 380 | const struct nf_hook_state *state, |
383 | const struct net_device *out, | ||
384 | struct nf_conn *ct)) | 381 | struct nf_conn *ct)) |
385 | { | 382 | { |
386 | #ifdef CONFIG_XFRM | 383 | #ifdef CONFIG_XFRM |
@@ -394,7 +391,7 @@ nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
394 | if (skb->len < sizeof(struct ipv6hdr)) | 391 | if (skb->len < sizeof(struct ipv6hdr)) |
395 | return NF_ACCEPT; | 392 | return NF_ACCEPT; |
396 | 393 | ||
397 | ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); | 394 | ret = nf_nat_ipv6_fn(ops, skb, state, do_chain); |
398 | #ifdef CONFIG_XFRM | 395 | #ifdef CONFIG_XFRM |
399 | if (ret != NF_DROP && ret != NF_STOLEN && | 396 | if (ret != NF_DROP && ret != NF_STOLEN && |
400 | !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && | 397 | !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && |
@@ -418,11 +415,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_out); | |||
418 | 415 | ||
419 | unsigned int | 416 | unsigned int |
420 | nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | 417 | nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, |
421 | const struct net_device *in, const struct net_device *out, | 418 | const struct nf_hook_state *state, |
422 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, | 419 | unsigned int (*do_chain)(const struct nf_hook_ops *ops, |
423 | struct sk_buff *skb, | 420 | struct sk_buff *skb, |
424 | const struct net_device *in, | 421 | const struct nf_hook_state *state, |
425 | const struct net_device *out, | ||
426 | struct nf_conn *ct)) | 422 | struct nf_conn *ct)) |
427 | { | 423 | { |
428 | const struct nf_conn *ct; | 424 | const struct nf_conn *ct; |
@@ -434,7 +430,7 @@ nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
434 | if (skb->len < sizeof(struct ipv6hdr)) | 430 | if (skb->len < sizeof(struct ipv6hdr)) |
435 | return NF_ACCEPT; | 431 | return NF_ACCEPT; |
436 | 432 | ||
437 | ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); | 433 | ret = nf_nat_ipv6_fn(ops, skb, state, do_chain); |
438 | if (ret != NF_DROP && ret != NF_STOLEN && | 434 | if (ret != NF_DROP && ret != NF_STOLEN && |
439 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { | 435 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { |
440 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 436 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c index 0d812b31277d..c8148ba76d1a 100644 --- a/net/ipv6/netfilter/nf_tables_ipv6.c +++ b/net/ipv6/netfilter/nf_tables_ipv6.c | |||
@@ -18,14 +18,12 @@ | |||
18 | 18 | ||
19 | static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops, | 19 | static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops, |
20 | struct sk_buff *skb, | 20 | struct sk_buff *skb, |
21 | const struct net_device *in, | 21 | const struct nf_hook_state *state) |
22 | const struct net_device *out, | ||
23 | int (*okfn)(struct sk_buff *)) | ||
24 | { | 22 | { |
25 | struct nft_pktinfo pkt; | 23 | struct nft_pktinfo pkt; |
26 | 24 | ||
27 | /* malformed packet, drop it */ | 25 | /* malformed packet, drop it */ |
28 | if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0) | 26 | if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0) |
29 | return NF_DROP; | 27 | return NF_DROP; |
30 | 28 | ||
31 | return nft_do_chain(&pkt, ops); | 29 | return nft_do_chain(&pkt, ops); |
@@ -33,9 +31,7 @@ static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops, | |||
33 | 31 | ||
34 | static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops, | 32 | static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops, |
35 | struct sk_buff *skb, | 33 | struct sk_buff *skb, |
36 | const struct net_device *in, | 34 | const struct nf_hook_state *state) |
37 | const struct net_device *out, | ||
38 | int (*okfn)(struct sk_buff *)) | ||
39 | { | 35 | { |
40 | if (unlikely(skb->len < sizeof(struct ipv6hdr))) { | 36 | if (unlikely(skb->len < sizeof(struct ipv6hdr))) { |
41 | if (net_ratelimit()) | 37 | if (net_ratelimit()) |
@@ -44,7 +40,7 @@ static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops, | |||
44 | return NF_ACCEPT; | 40 | return NF_ACCEPT; |
45 | } | 41 | } |
46 | 42 | ||
47 | return nft_do_chain_ipv6(ops, skb, in, out, okfn); | 43 | return nft_do_chain_ipv6(ops, skb, state); |
48 | } | 44 | } |
49 | 45 | ||
50 | struct nft_af_info nft_af_ipv6 __read_mostly = { | 46 | struct nft_af_info nft_af_ipv6 __read_mostly = { |
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c index 1c4b75dd425b..951bb458b7bd 100644 --- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c +++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c | |||
@@ -26,51 +26,42 @@ | |||
26 | 26 | ||
27 | static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, | 27 | static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, |
28 | struct sk_buff *skb, | 28 | struct sk_buff *skb, |
29 | const struct net_device *in, | 29 | const struct nf_hook_state *state, |
30 | const struct net_device *out, | ||
31 | struct nf_conn *ct) | 30 | struct nf_conn *ct) |
32 | { | 31 | { |
33 | struct nft_pktinfo pkt; | 32 | struct nft_pktinfo pkt; |
34 | 33 | ||
35 | nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out); | 34 | nft_set_pktinfo_ipv6(&pkt, ops, skb, state); |
36 | 35 | ||
37 | return nft_do_chain(&pkt, ops); | 36 | return nft_do_chain(&pkt, ops); |
38 | } | 37 | } |
39 | 38 | ||
40 | static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops, | 39 | static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops, |
41 | struct sk_buff *skb, | 40 | struct sk_buff *skb, |
42 | const struct net_device *in, | 41 | const struct nf_hook_state *state) |
43 | const struct net_device *out, | ||
44 | int (*okfn)(struct sk_buff *)) | ||
45 | { | 42 | { |
46 | return nf_nat_ipv6_fn(ops, skb, in, out, nft_nat_do_chain); | 43 | return nf_nat_ipv6_fn(ops, skb, state, nft_nat_do_chain); |
47 | } | 44 | } |
48 | 45 | ||
49 | static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops, | 46 | static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops, |
50 | struct sk_buff *skb, | 47 | struct sk_buff *skb, |
51 | const struct net_device *in, | 48 | const struct nf_hook_state *state) |
52 | const struct net_device *out, | ||
53 | int (*okfn)(struct sk_buff *)) | ||
54 | { | 49 | { |
55 | return nf_nat_ipv6_in(ops, skb, in, out, nft_nat_do_chain); | 50 | return nf_nat_ipv6_in(ops, skb, state, nft_nat_do_chain); |
56 | } | 51 | } |
57 | 52 | ||
58 | static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops, | 53 | static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops, |
59 | struct sk_buff *skb, | 54 | struct sk_buff *skb, |
60 | const struct net_device *in, | 55 | const struct nf_hook_state *state) |
61 | const struct net_device *out, | ||
62 | int (*okfn)(struct sk_buff *)) | ||
63 | { | 56 | { |
64 | return nf_nat_ipv6_out(ops, skb, in, out, nft_nat_do_chain); | 57 | return nf_nat_ipv6_out(ops, skb, state, nft_nat_do_chain); |
65 | } | 58 | } |
66 | 59 | ||
67 | static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops, | 60 | static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops, |
68 | struct sk_buff *skb, | 61 | struct sk_buff *skb, |
69 | const struct net_device *in, | 62 | const struct nf_hook_state *state) |
70 | const struct net_device *out, | ||
71 | int (*okfn)(struct sk_buff *)) | ||
72 | { | 63 | { |
73 | return nf_nat_ipv6_local_fn(ops, skb, in, out, nft_nat_do_chain); | 64 | return nf_nat_ipv6_local_fn(ops, skb, state, nft_nat_do_chain); |
74 | } | 65 | } |
75 | 66 | ||
76 | static const struct nf_chain_type nft_chain_nat_ipv6 = { | 67 | static const struct nf_chain_type nft_chain_nat_ipv6 = { |
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c index 42031299585e..0dafdaac5e17 100644 --- a/net/ipv6/netfilter/nft_chain_route_ipv6.c +++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c | |||
@@ -24,9 +24,7 @@ | |||
24 | 24 | ||
25 | static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops, | 25 | static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops, |
26 | struct sk_buff *skb, | 26 | struct sk_buff *skb, |
27 | const struct net_device *in, | 27 | const struct nf_hook_state *state) |
28 | const struct net_device *out, | ||
29 | int (*okfn)(struct sk_buff *)) | ||
30 | { | 28 | { |
31 | unsigned int ret; | 29 | unsigned int ret; |
32 | struct nft_pktinfo pkt; | 30 | struct nft_pktinfo pkt; |
@@ -35,7 +33,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops, | |||
35 | u32 mark, flowlabel; | 33 | u32 mark, flowlabel; |
36 | 34 | ||
37 | /* malformed packet, drop it */ | 35 | /* malformed packet, drop it */ |
38 | if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0) | 36 | if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0) |
39 | return NF_DROP; | 37 | return NF_DROP; |
40 | 38 | ||
41 | /* save source/dest address, mark, hoplimit, flowlabel, priority */ | 39 | /* save source/dest address, mark, hoplimit, flowlabel, priority */ |
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 4016a6ef9d61..85892af57364 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c | |||
@@ -136,7 +136,7 @@ int ip6_dst_hoplimit(struct dst_entry *dst) | |||
136 | EXPORT_SYMBOL(ip6_dst_hoplimit); | 136 | EXPORT_SYMBOL(ip6_dst_hoplimit); |
137 | #endif | 137 | #endif |
138 | 138 | ||
139 | int __ip6_local_out(struct sk_buff *skb) | 139 | static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) |
140 | { | 140 | { |
141 | int len; | 141 | int len; |
142 | 142 | ||
@@ -146,19 +146,30 @@ int __ip6_local_out(struct sk_buff *skb) | |||
146 | ipv6_hdr(skb)->payload_len = htons(len); | 146 | ipv6_hdr(skb)->payload_len = htons(len); |
147 | IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); | 147 | IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); |
148 | 148 | ||
149 | return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, | 149 | return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, |
150 | skb_dst(skb)->dev, dst_output); | 150 | NULL, skb_dst(skb)->dev, dst_output_sk); |
151 | } | ||
152 | |||
153 | int __ip6_local_out(struct sk_buff *skb) | ||
154 | { | ||
155 | return __ip6_local_out_sk(skb->sk, skb); | ||
151 | } | 156 | } |
152 | EXPORT_SYMBOL_GPL(__ip6_local_out); | 157 | EXPORT_SYMBOL_GPL(__ip6_local_out); |
153 | 158 | ||
154 | int ip6_local_out(struct sk_buff *skb) | 159 | int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) |
155 | { | 160 | { |
156 | int err; | 161 | int err; |
157 | 162 | ||
158 | err = __ip6_local_out(skb); | 163 | err = __ip6_local_out_sk(sk, skb); |
159 | if (likely(err == 1)) | 164 | if (likely(err == 1)) |
160 | err = dst_output(skb); | 165 | err = dst_output_sk(sk, skb); |
161 | 166 | ||
162 | return err; | 167 | return err; |
163 | } | 168 | } |
169 | EXPORT_SYMBOL_GPL(ip6_local_out_sk); | ||
170 | |||
171 | int ip6_local_out(struct sk_buff *skb) | ||
172 | { | ||
173 | return ip6_local_out_sk(skb->sk, skb); | ||
174 | } | ||
164 | EXPORT_SYMBOL_GPL(ip6_local_out); | 175 | EXPORT_SYMBOL_GPL(ip6_local_out); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 79ccdb4c1b33..8072bd4139b7 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -652,8 +652,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, | |||
652 | goto error_fault; | 652 | goto error_fault; |
653 | 653 | ||
654 | IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); | 654 | IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); |
655 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, | 655 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, |
656 | rt->dst.dev, dst_output); | 656 | NULL, rt->dst.dev, dst_output_sk); |
657 | if (err > 0) | 657 | if (err > 0) |
658 | err = net_xmit_errno(err); | 658 | err = net_xmit_errno(err); |
659 | if (err) | 659 | if (err) |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e6b9f51b15e8..6cf2026a9cea 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -1076,7 +1076,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) | |||
1076 | if (dev->mtu < IPV6_MIN_MTU) | 1076 | if (dev->mtu < IPV6_MIN_MTU) |
1077 | dev->mtu = IPV6_MIN_MTU; | 1077 | dev->mtu = IPV6_MIN_MTU; |
1078 | } | 1078 | } |
1079 | dev->iflink = tunnel->parms.link; | ||
1080 | } | 1079 | } |
1081 | 1080 | ||
1082 | static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) | 1081 | static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) |
@@ -1336,6 +1335,7 @@ static const struct net_device_ops ipip6_netdev_ops = { | |||
1336 | .ndo_do_ioctl = ipip6_tunnel_ioctl, | 1335 | .ndo_do_ioctl = ipip6_tunnel_ioctl, |
1337 | .ndo_change_mtu = ipip6_tunnel_change_mtu, | 1336 | .ndo_change_mtu = ipip6_tunnel_change_mtu, |
1338 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 1337 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
1338 | .ndo_get_iflink = ip_tunnel_get_iflink, | ||
1339 | }; | 1339 | }; |
1340 | 1340 | ||
1341 | static void ipip6_dev_free(struct net_device *dev) | 1341 | static void ipip6_dev_free(struct net_device *dev) |
@@ -1366,7 +1366,6 @@ static void ipip6_tunnel_setup(struct net_device *dev) | |||
1366 | dev->mtu = ETH_DATA_LEN - t_hlen; | 1366 | dev->mtu = ETH_DATA_LEN - t_hlen; |
1367 | dev->flags = IFF_NOARP; | 1367 | dev->flags = IFF_NOARP; |
1368 | netif_keep_dst(dev); | 1368 | netif_keep_dst(dev); |
1369 | dev->iflink = 0; | ||
1370 | dev->addr_len = 4; | 1369 | dev->addr_len = 4; |
1371 | dev->features |= NETIF_F_LLTX; | 1370 | dev->features |= NETIF_F_LLTX; |
1372 | dev->features |= SIT_FEATURES; | 1371 | dev->features |= SIT_FEATURES; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7cdad8401434..f73a97f6e68e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1348,6 +1348,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, | |||
1348 | TCP_SKB_CB(skb)->sacked = 0; | 1348 | TCP_SKB_CB(skb)->sacked = 0; |
1349 | } | 1349 | } |
1350 | 1350 | ||
1351 | static void tcp_v6_restore_cb(struct sk_buff *skb) | ||
1352 | { | ||
1353 | /* We need to move header back to the beginning if xfrm6_policy_check() | ||
1354 | * and tcp_v6_fill_cb() are going to be called again. | ||
1355 | */ | ||
1356 | memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, | ||
1357 | sizeof(struct inet6_skb_parm)); | ||
1358 | } | ||
1359 | |||
1351 | static int tcp_v6_rcv(struct sk_buff *skb) | 1360 | static int tcp_v6_rcv(struct sk_buff *skb) |
1352 | { | 1361 | { |
1353 | const struct tcphdr *th; | 1362 | const struct tcphdr *th; |
@@ -1480,6 +1489,7 @@ do_time_wait: | |||
1480 | inet_twsk_deschedule(tw, &tcp_death_row); | 1489 | inet_twsk_deschedule(tw, &tcp_death_row); |
1481 | inet_twsk_put(tw); | 1490 | inet_twsk_put(tw); |
1482 | sk = sk2; | 1491 | sk = sk2; |
1492 | tcp_v6_restore_cb(skb); | ||
1483 | goto process; | 1493 | goto process; |
1484 | } | 1494 | } |
1485 | /* Fall through to ACK */ | 1495 | /* Fall through to ACK */ |
@@ -1488,6 +1498,7 @@ do_time_wait: | |||
1488 | tcp_v6_timewait_ack(sk, skb); | 1498 | tcp_v6_timewait_ack(sk, skb); |
1489 | break; | 1499 | break; |
1490 | case TCP_TW_RST: | 1500 | case TCP_TW_RST: |
1501 | tcp_v6_restore_cb(skb); | ||
1491 | goto no_tcp_socket; | 1502 | goto no_tcp_socket; |
1492 | case TCP_TW_SUCCESS: | 1503 | case TCP_TW_SUCCESS: |
1493 | ; | 1504 | ; |
@@ -1522,7 +1533,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
1522 | skb->sk = sk; | 1533 | skb->sk = sk; |
1523 | skb->destructor = sock_edemux; | 1534 | skb->destructor = sock_edemux; |
1524 | if (sk_fullsock(sk)) { | 1535 | if (sk_fullsock(sk)) { |
1525 | struct dst_entry *dst = sk->sk_rx_dst; | 1536 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
1526 | 1537 | ||
1527 | if (dst) | 1538 | if (dst) |
1528 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); | 1539 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index f48fbe4d16f5..74bd17882a2f 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c | |||
@@ -42,7 +42,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) | |||
42 | ipv6_hdr(skb)->payload_len = htons(skb->len); | 42 | ipv6_hdr(skb)->payload_len = htons(skb->len); |
43 | __skb_push(skb, skb->data - skb_network_header(skb)); | 43 | __skb_push(skb, skb->data - skb_network_header(skb)); |
44 | 44 | ||
45 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, | 45 | NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb, |
46 | skb->dev, NULL, | ||
46 | ip6_rcv_finish); | 47 | ip6_rcv_finish); |
47 | return -1; | 48 | return -1; |
48 | } | 49 | } |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 010f8bd2d577..09c76a7b474d 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -120,7 +120,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
120 | } | 120 | } |
121 | EXPORT_SYMBOL(xfrm6_prepare_output); | 121 | EXPORT_SYMBOL(xfrm6_prepare_output); |
122 | 122 | ||
123 | int xfrm6_output_finish(struct sk_buff *skb) | 123 | int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb) |
124 | { | 124 | { |
125 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); | 125 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); |
126 | 126 | ||
@@ -128,10 +128,10 @@ int xfrm6_output_finish(struct sk_buff *skb) | |||
128 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; | 128 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; |
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | return xfrm_output(skb); | 131 | return xfrm_output(sk, skb); |
132 | } | 132 | } |
133 | 133 | ||
134 | static int __xfrm6_output(struct sk_buff *skb) | 134 | static int __xfrm6_output(struct sock *sk, struct sk_buff *skb) |
135 | { | 135 | { |
136 | struct dst_entry *dst = skb_dst(skb); | 136 | struct dst_entry *dst = skb_dst(skb); |
137 | struct xfrm_state *x = dst->xfrm; | 137 | struct xfrm_state *x = dst->xfrm; |
@@ -140,7 +140,7 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
140 | #ifdef CONFIG_NETFILTER | 140 | #ifdef CONFIG_NETFILTER |
141 | if (!x) { | 141 | if (!x) { |
142 | IP6CB(skb)->flags |= IP6SKB_REROUTED; | 142 | IP6CB(skb)->flags |= IP6SKB_REROUTED; |
143 | return dst_output(skb); | 143 | return dst_output_sk(sk, skb); |
144 | } | 144 | } |
145 | #endif | 145 | #endif |
146 | 146 | ||
@@ -160,14 +160,15 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
160 | if (x->props.mode == XFRM_MODE_TUNNEL && | 160 | if (x->props.mode == XFRM_MODE_TUNNEL && |
161 | ((skb->len > mtu && !skb_is_gso(skb)) || | 161 | ((skb->len > mtu && !skb_is_gso(skb)) || |
162 | dst_allfrag(skb_dst(skb)))) { | 162 | dst_allfrag(skb_dst(skb)))) { |
163 | return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); | 163 | return ip6_fragment(sk, skb, |
164 | x->outer_mode->afinfo->output_finish); | ||
164 | } | 165 | } |
165 | return x->outer_mode->afinfo->output_finish(skb); | 166 | return x->outer_mode->afinfo->output_finish(sk, skb); |
166 | } | 167 | } |
167 | 168 | ||
168 | int xfrm6_output(struct sock *sk, struct sk_buff *skb) | 169 | int xfrm6_output(struct sock *sk, struct sk_buff *skb) |
169 | { | 170 | { |
170 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, | 171 | return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb, |
171 | NULL, skb_dst(skb)->dev, __xfrm6_output, | 172 | NULL, skb_dst(skb)->dev, __xfrm6_output, |
172 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); | 173 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); |
173 | } | 174 | } |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 94b4c898a116..6daa52a18d40 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, | |||
1114 | noblock, &err); | 1114 | noblock, &err); |
1115 | else | 1115 | else |
1116 | skb = sock_alloc_send_skb(sk, len, noblock, &err); | 1116 | skb = sock_alloc_send_skb(sk, len, noblock, &err); |
1117 | if (!skb) { | 1117 | if (!skb) |
1118 | err = -ENOMEM; | ||
1119 | goto out; | 1118 | goto out; |
1120 | } | ||
1121 | if (iucv->transport == AF_IUCV_TRANS_HIPER) | 1119 | if (iucv->transport == AF_IUCV_TRANS_HIPER) |
1122 | skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); | 1120 | skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); |
1123 | if (memcpy_from_msg(skb_put(skb, len), msg, len)) { | 1121 | if (memcpy_from_msg(skb_put(skb, len), msg, len)) { |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 895348e44c7d..a29a504492af 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1871,6 +1871,7 @@ static int __init l2tp_init(void) | |||
1871 | l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); | 1871 | l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); |
1872 | if (!l2tp_wq) { | 1872 | if (!l2tp_wq) { |
1873 | pr_err("alloc_workqueue failed\n"); | 1873 | pr_err("alloc_workqueue failed\n"); |
1874 | unregister_pernet_device(&l2tp_net_ops); | ||
1874 | rc = -ENOMEM; | 1875 | rc = -ENOMEM; |
1875 | goto out; | 1876 | goto out; |
1876 | } | 1877 | } |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 2c090c507391..5c564a68fb50 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h) | |||
49 | container_of(h, struct tid_ampdu_rx, rcu_head); | 49 | container_of(h, struct tid_ampdu_rx, rcu_head); |
50 | int i; | 50 | int i; |
51 | 51 | ||
52 | del_timer_sync(&tid_rx->reorder_timer); | ||
53 | |||
54 | for (i = 0; i < tid_rx->buf_size; i++) | 52 | for (i = 0; i < tid_rx->buf_size; i++) |
55 | __skb_queue_purge(&tid_rx->reorder_buf[i]); | 53 | __skb_queue_purge(&tid_rx->reorder_buf[i]); |
56 | kfree(tid_rx->reorder_buf); | 54 | kfree(tid_rx->reorder_buf); |
@@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | |||
93 | 91 | ||
94 | del_timer_sync(&tid_rx->session_timer); | 92 | del_timer_sync(&tid_rx->session_timer); |
95 | 93 | ||
94 | /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */ | ||
95 | spin_lock_bh(&tid_rx->reorder_lock); | ||
96 | tid_rx->removed = true; | ||
97 | spin_unlock_bh(&tid_rx->reorder_lock); | ||
98 | del_timer_sync(&tid_rx->reorder_timer); | ||
99 | |||
96 | call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); | 100 | call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); |
97 | } | 101 | } |
98 | 102 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 4f7b922cfda4..2cd02278d4d4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, | |||
873 | 873 | ||
874 | set_release_timer: | 874 | set_release_timer: |
875 | 875 | ||
876 | mod_timer(&tid_agg_rx->reorder_timer, | 876 | if (!tid_agg_rx->removed) |
877 | tid_agg_rx->reorder_time[j] + 1 + | 877 | mod_timer(&tid_agg_rx->reorder_timer, |
878 | HT_RX_REORDER_BUF_TIMEOUT); | 878 | tid_agg_rx->reorder_time[j] + 1 + |
879 | HT_RX_REORDER_BUF_TIMEOUT); | ||
879 | } else { | 880 | } else { |
880 | del_timer(&tid_agg_rx->reorder_timer); | 881 | del_timer(&tid_agg_rx->reorder_timer); |
881 | } | 882 | } |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 248f56e59ebc..7e2fa4018d41 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -175,6 +175,7 @@ struct tid_ampdu_tx { | |||
175 | * @reorder_lock: serializes access to reorder buffer, see below. | 175 | * @reorder_lock: serializes access to reorder buffer, see below. |
176 | * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and | 176 | * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and |
177 | * and ssn. | 177 | * and ssn. |
178 | * @removed: this session is removed (but might have been found due to RCU) | ||
178 | * | 179 | * |
179 | * This structure's lifetime is managed by RCU, assignments to | 180 | * This structure's lifetime is managed by RCU, assignments to |
180 | * the array holding it must hold the aggregation mutex. | 181 | * the array holding it must hold the aggregation mutex. |
@@ -199,6 +200,7 @@ struct tid_ampdu_rx { | |||
199 | u16 timeout; | 200 | u16 timeout; |
200 | u8 dialog_token; | 201 | u8 dialog_token; |
201 | bool auto_seq; | 202 | bool auto_seq; |
203 | bool removed; | ||
202 | }; | 204 | }; |
203 | 205 | ||
204 | /** | 206 | /** |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index fea9ef566427..e6163017c42d 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -120,12 +120,8 @@ EXPORT_SYMBOL(nf_unregister_hooks); | |||
120 | 120 | ||
121 | unsigned int nf_iterate(struct list_head *head, | 121 | unsigned int nf_iterate(struct list_head *head, |
122 | struct sk_buff *skb, | 122 | struct sk_buff *skb, |
123 | unsigned int hook, | 123 | struct nf_hook_state *state, |
124 | const struct net_device *indev, | 124 | struct nf_hook_ops **elemp) |
125 | const struct net_device *outdev, | ||
126 | struct nf_hook_ops **elemp, | ||
127 | int (*okfn)(struct sk_buff *), | ||
128 | int hook_thresh) | ||
129 | { | 125 | { |
130 | unsigned int verdict; | 126 | unsigned int verdict; |
131 | 127 | ||
@@ -134,19 +130,19 @@ unsigned int nf_iterate(struct list_head *head, | |||
134 | * function because of risk of continuing from deleted element. | 130 | * function because of risk of continuing from deleted element. |
135 | */ | 131 | */ |
136 | list_for_each_entry_continue_rcu((*elemp), head, list) { | 132 | list_for_each_entry_continue_rcu((*elemp), head, list) { |
137 | if (hook_thresh > (*elemp)->priority) | 133 | if (state->thresh > (*elemp)->priority) |
138 | continue; | 134 | continue; |
139 | 135 | ||
140 | /* Optimization: we don't need to hold module | 136 | /* Optimization: we don't need to hold module |
141 | reference here, since function can't sleep. --RR */ | 137 | reference here, since function can't sleep. --RR */ |
142 | repeat: | 138 | repeat: |
143 | verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn); | 139 | verdict = (*elemp)->hook(*elemp, skb, state); |
144 | if (verdict != NF_ACCEPT) { | 140 | if (verdict != NF_ACCEPT) { |
145 | #ifdef CONFIG_NETFILTER_DEBUG | 141 | #ifdef CONFIG_NETFILTER_DEBUG |
146 | if (unlikely((verdict & NF_VERDICT_MASK) | 142 | if (unlikely((verdict & NF_VERDICT_MASK) |
147 | > NF_MAX_VERDICT)) { | 143 | > NF_MAX_VERDICT)) { |
148 | NFDEBUG("Evil return from %p(%u).\n", | 144 | NFDEBUG("Evil return from %p(%u).\n", |
149 | (*elemp)->hook, hook); | 145 | (*elemp)->hook, state->hook); |
150 | continue; | 146 | continue; |
151 | } | 147 | } |
152 | #endif | 148 | #endif |
@@ -161,11 +157,7 @@ repeat: | |||
161 | 157 | ||
162 | /* Returns 1 if okfn() needs to be executed by the caller, | 158 | /* Returns 1 if okfn() needs to be executed by the caller, |
163 | * -EPERM for NF_DROP, 0 otherwise. */ | 159 | * -EPERM for NF_DROP, 0 otherwise. */ |
164 | int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, | 160 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state) |
165 | struct net_device *indev, | ||
166 | struct net_device *outdev, | ||
167 | int (*okfn)(struct sk_buff *), | ||
168 | int hook_thresh) | ||
169 | { | 161 | { |
170 | struct nf_hook_ops *elem; | 162 | struct nf_hook_ops *elem; |
171 | unsigned int verdict; | 163 | unsigned int verdict; |
@@ -174,10 +166,11 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, | |||
174 | /* We may already have this, but read-locks nest anyway */ | 166 | /* We may already have this, but read-locks nest anyway */ |
175 | rcu_read_lock(); | 167 | rcu_read_lock(); |
176 | 168 | ||
177 | elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list); | 169 | elem = list_entry_rcu(&nf_hooks[state->pf][state->hook], |
170 | struct nf_hook_ops, list); | ||
178 | next_hook: | 171 | next_hook: |
179 | verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev, | 172 | verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state, |
180 | outdev, &elem, okfn, hook_thresh); | 173 | &elem); |
181 | if (verdict == NF_ACCEPT || verdict == NF_STOP) { | 174 | if (verdict == NF_ACCEPT || verdict == NF_STOP) { |
182 | ret = 1; | 175 | ret = 1; |
183 | } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { | 176 | } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { |
@@ -186,8 +179,8 @@ next_hook: | |||
186 | if (ret == 0) | 179 | if (ret == 0) |
187 | ret = -EPERM; | 180 | ret = -EPERM; |
188 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { | 181 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { |
189 | int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn, | 182 | int err = nf_queue(skb, elem, state, |
190 | verdict >> NF_VERDICT_QBITS); | 183 | verdict >> NF_VERDICT_QBITS); |
191 | if (err < 0) { | 184 | if (err < 0) { |
192 | if (err == -ECANCELED) | 185 | if (err == -ECANCELED) |
193 | goto next_hook; | 186 | goto next_hook; |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 04dbd9c7213f..5d2b806a862e 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1272,8 +1272,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1272 | */ | 1272 | */ |
1273 | static unsigned int | 1273 | static unsigned int |
1274 | ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1274 | ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1275 | const struct net_device *in, const struct net_device *out, | 1275 | const struct nf_hook_state *state) |
1276 | int (*okfn)(struct sk_buff *)) | ||
1277 | { | 1276 | { |
1278 | return ip_vs_out(ops->hooknum, skb, AF_INET); | 1277 | return ip_vs_out(ops->hooknum, skb, AF_INET); |
1279 | } | 1278 | } |
@@ -1284,8 +1283,7 @@ ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
1284 | */ | 1283 | */ |
1285 | static unsigned int | 1284 | static unsigned int |
1286 | ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1285 | ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1287 | const struct net_device *in, const struct net_device *out, | 1286 | const struct nf_hook_state *state) |
1288 | int (*okfn)(struct sk_buff *)) | ||
1289 | { | 1287 | { |
1290 | return ip_vs_out(ops->hooknum, skb, AF_INET); | 1288 | return ip_vs_out(ops->hooknum, skb, AF_INET); |
1291 | } | 1289 | } |
@@ -1299,8 +1297,7 @@ ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
1299 | */ | 1297 | */ |
1300 | static unsigned int | 1298 | static unsigned int |
1301 | ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1299 | ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1302 | const struct net_device *in, const struct net_device *out, | 1300 | const struct nf_hook_state *state) |
1303 | int (*okfn)(struct sk_buff *)) | ||
1304 | { | 1301 | { |
1305 | return ip_vs_out(ops->hooknum, skb, AF_INET6); | 1302 | return ip_vs_out(ops->hooknum, skb, AF_INET6); |
1306 | } | 1303 | } |
@@ -1311,8 +1308,7 @@ ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
1311 | */ | 1308 | */ |
1312 | static unsigned int | 1309 | static unsigned int |
1313 | ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1310 | ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1314 | const struct net_device *in, const struct net_device *out, | 1311 | const struct nf_hook_state *state) |
1315 | int (*okfn)(struct sk_buff *)) | ||
1316 | { | 1312 | { |
1317 | return ip_vs_out(ops->hooknum, skb, AF_INET6); | 1313 | return ip_vs_out(ops->hooknum, skb, AF_INET6); |
1318 | } | 1314 | } |
@@ -1769,9 +1765,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1769 | */ | 1765 | */ |
1770 | static unsigned int | 1766 | static unsigned int |
1771 | ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1767 | ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1772 | const struct net_device *in, | 1768 | const struct nf_hook_state *state) |
1773 | const struct net_device *out, | ||
1774 | int (*okfn)(struct sk_buff *)) | ||
1775 | { | 1769 | { |
1776 | return ip_vs_in(ops->hooknum, skb, AF_INET); | 1770 | return ip_vs_in(ops->hooknum, skb, AF_INET); |
1777 | } | 1771 | } |
@@ -1782,8 +1776,7 @@ ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
1782 | */ | 1776 | */ |
1783 | static unsigned int | 1777 | static unsigned int |
1784 | ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1778 | ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1785 | const struct net_device *in, const struct net_device *out, | 1779 | const struct nf_hook_state *state) |
1786 | int (*okfn)(struct sk_buff *)) | ||
1787 | { | 1780 | { |
1788 | return ip_vs_in(ops->hooknum, skb, AF_INET); | 1781 | return ip_vs_in(ops->hooknum, skb, AF_INET); |
1789 | } | 1782 | } |
@@ -1796,9 +1789,7 @@ ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
1796 | */ | 1789 | */ |
1797 | static unsigned int | 1790 | static unsigned int |
1798 | ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1791 | ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1799 | const struct net_device *in, | 1792 | const struct nf_hook_state *state) |
1800 | const struct net_device *out, | ||
1801 | int (*okfn)(struct sk_buff *)) | ||
1802 | { | 1793 | { |
1803 | return ip_vs_in(ops->hooknum, skb, AF_INET6); | 1794 | return ip_vs_in(ops->hooknum, skb, AF_INET6); |
1804 | } | 1795 | } |
@@ -1809,8 +1800,7 @@ ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
1809 | */ | 1800 | */ |
1810 | static unsigned int | 1801 | static unsigned int |
1811 | ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1802 | ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1812 | const struct net_device *in, const struct net_device *out, | 1803 | const struct nf_hook_state *state) |
1813 | int (*okfn)(struct sk_buff *)) | ||
1814 | { | 1804 | { |
1815 | return ip_vs_in(ops->hooknum, skb, AF_INET6); | 1805 | return ip_vs_in(ops->hooknum, skb, AF_INET6); |
1816 | } | 1806 | } |
@@ -1829,8 +1819,7 @@ ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
1829 | */ | 1819 | */ |
1830 | static unsigned int | 1820 | static unsigned int |
1831 | ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1821 | ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1832 | const struct net_device *in, const struct net_device *out, | 1822 | const struct nf_hook_state *state) |
1833 | int (*okfn)(struct sk_buff *)) | ||
1834 | { | 1823 | { |
1835 | int r; | 1824 | int r; |
1836 | struct net *net; | 1825 | struct net *net; |
@@ -1851,8 +1840,7 @@ ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb, | |||
1851 | #ifdef CONFIG_IP_VS_IPV6 | 1840 | #ifdef CONFIG_IP_VS_IPV6 |
1852 | static unsigned int | 1841 | static unsigned int |
1853 | ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb, | 1842 | ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb, |
1854 | const struct net_device *in, const struct net_device *out, | 1843 | const struct nf_hook_state *state) |
1855 | int (*okfn)(struct sk_buff *)) | ||
1856 | { | 1844 | { |
1857 | int r; | 1845 | int r; |
1858 | struct net *net; | 1846 | struct net *net; |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index bf02932b7188..19986ec5f21a 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -536,8 +536,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, | |||
536 | ip_vs_update_conntrack(skb, cp, 1); | 536 | ip_vs_update_conntrack(skb, cp, 1); |
537 | if (!local) { | 537 | if (!local) { |
538 | skb_forward_csum(skb); | 538 | skb_forward_csum(skb); |
539 | NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, | 539 | NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, |
540 | dst_output); | 540 | NULL, skb_dst(skb)->dev, dst_output_sk); |
541 | } else | 541 | } else |
542 | ret = NF_ACCEPT; | 542 | ret = NF_ACCEPT; |
543 | return ret; | 543 | return ret; |
@@ -554,8 +554,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, | |||
554 | ip_vs_notrack(skb); | 554 | ip_vs_notrack(skb); |
555 | if (!local) { | 555 | if (!local) { |
556 | skb_forward_csum(skb); | 556 | skb_forward_csum(skb); |
557 | NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, | 557 | NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, |
558 | dst_output); | 558 | NULL, skb_dst(skb)->dev, dst_output_sk); |
559 | } else | 559 | } else |
560 | ret = NF_ACCEPT; | 560 | ret = NF_ACCEPT; |
561 | return ret; | 561 | return ret; |
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h index 61a3c927e63c..ea7f36784b3d 100644 --- a/net/netfilter/nf_internals.h +++ b/net/netfilter/nf_internals.h | |||
@@ -14,16 +14,11 @@ | |||
14 | 14 | ||
15 | /* core.c */ | 15 | /* core.c */ |
16 | unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb, | 16 | unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb, |
17 | unsigned int hook, const struct net_device *indev, | 17 | struct nf_hook_state *state, struct nf_hook_ops **elemp); |
18 | const struct net_device *outdev, | ||
19 | struct nf_hook_ops **elemp, | ||
20 | int (*okfn)(struct sk_buff *), int hook_thresh); | ||
21 | 18 | ||
22 | /* nf_queue.c */ | 19 | /* nf_queue.c */ |
23 | int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf, | 20 | int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, |
24 | unsigned int hook, struct net_device *indev, | 21 | struct nf_hook_state *state, unsigned int queuenum); |
25 | struct net_device *outdev, int (*okfn)(struct sk_buff *), | ||
26 | unsigned int queuenum); | ||
27 | int __init netfilter_queue_init(void); | 22 | int __init netfilter_queue_init(void); |
28 | 23 | ||
29 | /* nf_log.c */ | 24 | /* nf_log.c */ |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index fb045b4c2966..2e88032cd5ad 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -48,11 +48,15 @@ EXPORT_SYMBOL(nf_unregister_queue_handler); | |||
48 | 48 | ||
49 | void nf_queue_entry_release_refs(struct nf_queue_entry *entry) | 49 | void nf_queue_entry_release_refs(struct nf_queue_entry *entry) |
50 | { | 50 | { |
51 | struct nf_hook_state *state = &entry->state; | ||
52 | |||
51 | /* Release those devices we held, or Alexey will kill me. */ | 53 | /* Release those devices we held, or Alexey will kill me. */ |
52 | if (entry->indev) | 54 | if (state->in) |
53 | dev_put(entry->indev); | 55 | dev_put(state->in); |
54 | if (entry->outdev) | 56 | if (state->out) |
55 | dev_put(entry->outdev); | 57 | dev_put(state->out); |
58 | if (state->sk) | ||
59 | sock_put(state->sk); | ||
56 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 60 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
57 | if (entry->skb->nf_bridge) { | 61 | if (entry->skb->nf_bridge) { |
58 | struct net_device *physdev; | 62 | struct net_device *physdev; |
@@ -73,13 +77,17 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); | |||
73 | /* Bump dev refs so they don't vanish while packet is out */ | 77 | /* Bump dev refs so they don't vanish while packet is out */ |
74 | bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) | 78 | bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) |
75 | { | 79 | { |
80 | struct nf_hook_state *state = &entry->state; | ||
81 | |||
76 | if (!try_module_get(entry->elem->owner)) | 82 | if (!try_module_get(entry->elem->owner)) |
77 | return false; | 83 | return false; |
78 | 84 | ||
79 | if (entry->indev) | 85 | if (state->in) |
80 | dev_hold(entry->indev); | 86 | dev_hold(state->in); |
81 | if (entry->outdev) | 87 | if (state->out) |
82 | dev_hold(entry->outdev); | 88 | dev_hold(state->out); |
89 | if (state->sk) | ||
90 | sock_hold(state->sk); | ||
83 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 91 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
84 | if (entry->skb->nf_bridge) { | 92 | if (entry->skb->nf_bridge) { |
85 | struct net_device *physdev; | 93 | struct net_device *physdev; |
@@ -102,12 +110,9 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); | |||
102 | * through nf_reinject(). | 110 | * through nf_reinject(). |
103 | */ | 111 | */ |
104 | int nf_queue(struct sk_buff *skb, | 112 | int nf_queue(struct sk_buff *skb, |
105 | struct nf_hook_ops *elem, | 113 | struct nf_hook_ops *elem, |
106 | u_int8_t pf, unsigned int hook, | 114 | struct nf_hook_state *state, |
107 | struct net_device *indev, | 115 | unsigned int queuenum) |
108 | struct net_device *outdev, | ||
109 | int (*okfn)(struct sk_buff *), | ||
110 | unsigned int queuenum) | ||
111 | { | 116 | { |
112 | int status = -ENOENT; | 117 | int status = -ENOENT; |
113 | struct nf_queue_entry *entry = NULL; | 118 | struct nf_queue_entry *entry = NULL; |
@@ -123,7 +128,7 @@ int nf_queue(struct sk_buff *skb, | |||
123 | goto err_unlock; | 128 | goto err_unlock; |
124 | } | 129 | } |
125 | 130 | ||
126 | afinfo = nf_get_afinfo(pf); | 131 | afinfo = nf_get_afinfo(state->pf); |
127 | if (!afinfo) | 132 | if (!afinfo) |
128 | goto err_unlock; | 133 | goto err_unlock; |
129 | 134 | ||
@@ -136,11 +141,7 @@ int nf_queue(struct sk_buff *skb, | |||
136 | *entry = (struct nf_queue_entry) { | 141 | *entry = (struct nf_queue_entry) { |
137 | .skb = skb, | 142 | .skb = skb, |
138 | .elem = elem, | 143 | .elem = elem, |
139 | .pf = pf, | 144 | .state = *state, |
140 | .hook = hook, | ||
141 | .indev = indev, | ||
142 | .outdev = outdev, | ||
143 | .okfn = okfn, | ||
144 | .size = sizeof(*entry) + afinfo->route_key_size, | 145 | .size = sizeof(*entry) + afinfo->route_key_size, |
145 | }; | 146 | }; |
146 | 147 | ||
@@ -186,30 +187,29 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) | |||
186 | } | 187 | } |
187 | 188 | ||
188 | if (verdict == NF_ACCEPT) { | 189 | if (verdict == NF_ACCEPT) { |
189 | afinfo = nf_get_afinfo(entry->pf); | 190 | afinfo = nf_get_afinfo(entry->state.pf); |
190 | if (!afinfo || afinfo->reroute(skb, entry) < 0) | 191 | if (!afinfo || afinfo->reroute(skb, entry) < 0) |
191 | verdict = NF_DROP; | 192 | verdict = NF_DROP; |
192 | } | 193 | } |
193 | 194 | ||
195 | entry->state.thresh = INT_MIN; | ||
196 | |||
194 | if (verdict == NF_ACCEPT) { | 197 | if (verdict == NF_ACCEPT) { |
195 | next_hook: | 198 | next_hook: |
196 | verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook], | 199 | verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], |
197 | skb, entry->hook, | 200 | skb, &entry->state, &elem); |
198 | entry->indev, entry->outdev, &elem, | ||
199 | entry->okfn, INT_MIN); | ||
200 | } | 201 | } |
201 | 202 | ||
202 | switch (verdict & NF_VERDICT_MASK) { | 203 | switch (verdict & NF_VERDICT_MASK) { |
203 | case NF_ACCEPT: | 204 | case NF_ACCEPT: |
204 | case NF_STOP: | 205 | case NF_STOP: |
205 | local_bh_disable(); | 206 | local_bh_disable(); |
206 | entry->okfn(skb); | 207 | entry->state.okfn(entry->state.sk, skb); |
207 | local_bh_enable(); | 208 | local_bh_enable(); |
208 | break; | 209 | break; |
209 | case NF_QUEUE: | 210 | case NF_QUEUE: |
210 | err = nf_queue(skb, elem, entry->pf, entry->hook, | 211 | err = nf_queue(skb, elem, &entry->state, |
211 | entry->indev, entry->outdev, entry->okfn, | 212 | verdict >> NF_VERDICT_QBITS); |
212 | verdict >> NF_VERDICT_QBITS); | ||
213 | if (err < 0) { | 213 | if (err < 0) { |
214 | if (err == -ECANCELED) | 214 | if (err == -ECANCELED) |
215 | goto next_hook; | 215 | goto next_hook; |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 94e1aaf86070..628afc350c02 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -315,13 +315,13 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
315 | if (entskb->tstamp.tv64) | 315 | if (entskb->tstamp.tv64) |
316 | size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); | 316 | size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); |
317 | 317 | ||
318 | if (entry->hook <= NF_INET_FORWARD || | 318 | if (entry->state.hook <= NF_INET_FORWARD || |
319 | (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) | 319 | (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) |
320 | csum_verify = !skb_csum_unnecessary(entskb); | 320 | csum_verify = !skb_csum_unnecessary(entskb); |
321 | else | 321 | else |
322 | csum_verify = false; | 322 | csum_verify = false; |
323 | 323 | ||
324 | outdev = entry->outdev; | 324 | outdev = entry->state.out; |
325 | 325 | ||
326 | switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { | 326 | switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { |
327 | case NFQNL_COPY_META: | 327 | case NFQNL_COPY_META: |
@@ -369,23 +369,23 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
369 | return NULL; | 369 | return NULL; |
370 | } | 370 | } |
371 | nfmsg = nlmsg_data(nlh); | 371 | nfmsg = nlmsg_data(nlh); |
372 | nfmsg->nfgen_family = entry->pf; | 372 | nfmsg->nfgen_family = entry->state.pf; |
373 | nfmsg->version = NFNETLINK_V0; | 373 | nfmsg->version = NFNETLINK_V0; |
374 | nfmsg->res_id = htons(queue->queue_num); | 374 | nfmsg->res_id = htons(queue->queue_num); |
375 | 375 | ||
376 | nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); | 376 | nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); |
377 | pmsg = nla_data(nla); | 377 | pmsg = nla_data(nla); |
378 | pmsg->hw_protocol = entskb->protocol; | 378 | pmsg->hw_protocol = entskb->protocol; |
379 | pmsg->hook = entry->hook; | 379 | pmsg->hook = entry->state.hook; |
380 | *packet_id_ptr = &pmsg->packet_id; | 380 | *packet_id_ptr = &pmsg->packet_id; |
381 | 381 | ||
382 | indev = entry->indev; | 382 | indev = entry->state.in; |
383 | if (indev) { | 383 | if (indev) { |
384 | #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 384 | #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
385 | if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) | 385 | if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) |
386 | goto nla_put_failure; | 386 | goto nla_put_failure; |
387 | #else | 387 | #else |
388 | if (entry->pf == PF_BRIDGE) { | 388 | if (entry->state.pf == PF_BRIDGE) { |
389 | /* Case 1: indev is physical input device, we need to | 389 | /* Case 1: indev is physical input device, we need to |
390 | * look for bridge group (when called from | 390 | * look for bridge group (when called from |
391 | * netfilter_bridge) */ | 391 | * netfilter_bridge) */ |
@@ -419,7 +419,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
419 | if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) | 419 | if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) |
420 | goto nla_put_failure; | 420 | goto nla_put_failure; |
421 | #else | 421 | #else |
422 | if (entry->pf == PF_BRIDGE) { | 422 | if (entry->state.pf == PF_BRIDGE) { |
423 | /* Case 1: outdev is physical output device, we need to | 423 | /* Case 1: outdev is physical output device, we need to |
424 | * look for bridge group (when called from | 424 | * look for bridge group (when called from |
425 | * netfilter_bridge) */ | 425 | * netfilter_bridge) */ |
@@ -642,8 +642,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | |||
642 | struct nfqnl_instance *queue; | 642 | struct nfqnl_instance *queue; |
643 | struct sk_buff *skb, *segs; | 643 | struct sk_buff *skb, *segs; |
644 | int err = -ENOBUFS; | 644 | int err = -ENOBUFS; |
645 | struct net *net = dev_net(entry->indev ? | 645 | struct net *net = dev_net(entry->state.in ? |
646 | entry->indev : entry->outdev); | 646 | entry->state.in : entry->state.out); |
647 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 647 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
648 | 648 | ||
649 | /* rcu_read_lock()ed by nf_hook_slow() */ | 649 | /* rcu_read_lock()ed by nf_hook_slow() */ |
@@ -656,7 +656,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | |||
656 | 656 | ||
657 | skb = entry->skb; | 657 | skb = entry->skb; |
658 | 658 | ||
659 | switch (entry->pf) { | 659 | switch (entry->state.pf) { |
660 | case NFPROTO_IPV4: | 660 | case NFPROTO_IPV4: |
661 | skb->protocol = htons(ETH_P_IP); | 661 | skb->protocol = htons(ETH_P_IP); |
662 | break; | 662 | break; |
@@ -766,11 +766,11 @@ nfqnl_set_mode(struct nfqnl_instance *queue, | |||
766 | static int | 766 | static int |
767 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) | 767 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) |
768 | { | 768 | { |
769 | if (entry->indev) | 769 | if (entry->state.in) |
770 | if (entry->indev->ifindex == ifindex) | 770 | if (entry->state.in->ifindex == ifindex) |
771 | return 1; | 771 | return 1; |
772 | if (entry->outdev) | 772 | if (entry->state.out) |
773 | if (entry->outdev->ifindex == ifindex) | 773 | if (entry->state.out->ifindex == ifindex) |
774 | return 1; | 774 | return 1; |
775 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 775 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
776 | if (entry->skb->nf_bridge) { | 776 | if (entry->skb->nf_bridge) { |
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 3277a7520e31..6d39766e7828 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c | |||
@@ -222,7 +222,8 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) | |||
222 | { | 222 | { |
223 | struct net *net = ovs_dp_get_net(vport->dp); | 223 | struct net *net = ovs_dp_get_net(vport->dp); |
224 | struct vxlan_port *vxlan_port = vxlan_vport(vport); | 224 | struct vxlan_port *vxlan_port = vxlan_vport(vport); |
225 | __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; | 225 | struct sock *sk = vxlan_port->vs->sock->sk; |
226 | __be16 dst_port = inet_sk(sk)->inet_sport; | ||
226 | const struct ovs_key_ipv4_tunnel *tun_key; | 227 | const struct ovs_key_ipv4_tunnel *tun_key; |
227 | struct vxlan_metadata md = {0}; | 228 | struct vxlan_metadata md = {0}; |
228 | struct rtable *rt; | 229 | struct rtable *rt; |
@@ -255,7 +256,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) | |||
255 | vxflags = vxlan_port->exts | | 256 | vxflags = vxlan_port->exts | |
256 | (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0); | 257 | (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0); |
257 | 258 | ||
258 | err = vxlan_xmit_skb(rt, skb, fl.saddr, tun_key->ipv4_dst, | 259 | err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst, |
259 | tun_key->ipv4_tos, tun_key->ipv4_ttl, df, | 260 | tun_key->ipv4_tos, tun_key->ipv4_ttl, df, |
260 | src_port, dst_port, | 261 | src_port, dst_port, |
261 | &md, false, vxflags); | 262 | &md, false, vxflags); |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index ec2954ffc690..067a3fff1d2c 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport) | |||
274 | ASSERT_OVSL(); | 274 | ASSERT_OVSL(); |
275 | 275 | ||
276 | hlist_del_rcu(&vport->hash_node); | 276 | hlist_del_rcu(&vport->hash_node); |
277 | |||
278 | vport->ops->destroy(vport); | ||
279 | |||
280 | module_put(vport->ops->owner); | 277 | module_put(vport->ops->owner); |
278 | vport->ops->destroy(vport); | ||
281 | } | 279 | } |
282 | 280 | ||
283 | /** | 281 | /** |
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index dfcea20e3171..f377702d4b91 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * Meant to be mostly used for localy generated traffic : | 11 | * Meant to be mostly used for locally generated traffic : |
12 | * Fast classification depends on skb->sk being set before reaching us. | 12 | * Fast classification depends on skb->sk being set before reaching us. |
13 | * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. | 13 | * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. |
14 | * All packets belonging to a socket are considered as a 'flow'. | 14 | * All packets belonging to a socket are considered as a 'flow'. |
@@ -63,7 +63,7 @@ struct fq_flow { | |||
63 | struct sk_buff *tail; /* last skb in the list */ | 63 | struct sk_buff *tail; /* last skb in the list */ |
64 | unsigned long age; /* jiffies when flow was emptied, for gc */ | 64 | unsigned long age; /* jiffies when flow was emptied, for gc */ |
65 | }; | 65 | }; |
66 | struct rb_node fq_node; /* anchor in fq_root[] trees */ | 66 | struct rb_node fq_node; /* anchor in fq_root[] trees */ |
67 | struct sock *sk; | 67 | struct sock *sk; |
68 | int qlen; /* number of packets in flow queue */ | 68 | int qlen; /* number of packets in flow queue */ |
69 | int credit; | 69 | int credit; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 612aa73bbc60..e6ce1517367f 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt, | |||
303 | struct super_block *pipefs_sb; | 303 | struct super_block *pipefs_sb; |
304 | int err; | 304 | int err; |
305 | 305 | ||
306 | err = rpc_clnt_debugfs_register(clnt); | 306 | rpc_clnt_debugfs_register(clnt); |
307 | if (err) | ||
308 | return err; | ||
309 | 307 | ||
310 | pipefs_sb = rpc_get_sb_net(net); | 308 | pipefs_sb = rpc_get_sb_net(net); |
311 | if (pipefs_sb) { | 309 | if (pipefs_sb) { |
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index e811f390f9f6..82962f7e6e88 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c | |||
@@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = { | |||
129 | .release = tasks_release, | 129 | .release = tasks_release, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | int | 132 | void |
133 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | 133 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) |
134 | { | 134 | { |
135 | int len, err; | 135 | int len; |
136 | char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ | 136 | char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ |
137 | struct rpc_xprt *xprt; | ||
137 | 138 | ||
138 | /* Already registered? */ | 139 | /* Already registered? */ |
139 | if (clnt->cl_debugfs) | 140 | if (clnt->cl_debugfs || !rpc_clnt_dir) |
140 | return 0; | 141 | return; |
141 | 142 | ||
142 | len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); | 143 | len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); |
143 | if (len >= sizeof(name)) | 144 | if (len >= sizeof(name)) |
144 | return -EINVAL; | 145 | return; |
145 | 146 | ||
146 | /* make the per-client dir */ | 147 | /* make the per-client dir */ |
147 | clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); | 148 | clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); |
148 | if (!clnt->cl_debugfs) | 149 | if (!clnt->cl_debugfs) |
149 | return -ENOMEM; | 150 | return; |
150 | 151 | ||
151 | /* make tasks file */ | 152 | /* make tasks file */ |
152 | err = -ENOMEM; | ||
153 | if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, | 153 | if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, |
154 | clnt, &tasks_fops)) | 154 | clnt, &tasks_fops)) |
155 | goto out_err; | 155 | goto out_err; |
156 | 156 | ||
157 | err = -EINVAL; | ||
158 | rcu_read_lock(); | 157 | rcu_read_lock(); |
158 | xprt = rcu_dereference(clnt->cl_xprt); | ||
159 | /* no "debugfs" dentry? Don't bother with the symlink. */ | ||
160 | if (!xprt->debugfs) { | ||
161 | rcu_read_unlock(); | ||
162 | return; | ||
163 | } | ||
159 | len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", | 164 | len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", |
160 | rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name); | 165 | xprt->debugfs->d_name.name); |
161 | rcu_read_unlock(); | 166 | rcu_read_unlock(); |
167 | |||
162 | if (len >= sizeof(name)) | 168 | if (len >= sizeof(name)) |
163 | goto out_err; | 169 | goto out_err; |
164 | 170 | ||
165 | err = -ENOMEM; | ||
166 | if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) | 171 | if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) |
167 | goto out_err; | 172 | goto out_err; |
168 | 173 | ||
169 | return 0; | 174 | return; |
170 | out_err: | 175 | out_err: |
171 | debugfs_remove_recursive(clnt->cl_debugfs); | 176 | debugfs_remove_recursive(clnt->cl_debugfs); |
172 | clnt->cl_debugfs = NULL; | 177 | clnt->cl_debugfs = NULL; |
173 | return err; | ||
174 | } | 178 | } |
175 | 179 | ||
176 | void | 180 | void |
@@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = { | |||
226 | .release = xprt_info_release, | 230 | .release = xprt_info_release, |
227 | }; | 231 | }; |
228 | 232 | ||
229 | int | 233 | void |
230 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) | 234 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) |
231 | { | 235 | { |
232 | int len, id; | 236 | int len, id; |
233 | static atomic_t cur_id; | 237 | static atomic_t cur_id; |
234 | char name[9]; /* 8 hex digits + NULL term */ | 238 | char name[9]; /* 8 hex digits + NULL term */ |
235 | 239 | ||
240 | if (!rpc_xprt_dir) | ||
241 | return; | ||
242 | |||
236 | id = (unsigned int)atomic_inc_return(&cur_id); | 243 | id = (unsigned int)atomic_inc_return(&cur_id); |
237 | 244 | ||
238 | len = snprintf(name, sizeof(name), "%x", id); | 245 | len = snprintf(name, sizeof(name), "%x", id); |
239 | if (len >= sizeof(name)) | 246 | if (len >= sizeof(name)) |
240 | return -EINVAL; | 247 | return; |
241 | 248 | ||
242 | /* make the per-client dir */ | 249 | /* make the per-client dir */ |
243 | xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); | 250 | xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); |
244 | if (!xprt->debugfs) | 251 | if (!xprt->debugfs) |
245 | return -ENOMEM; | 252 | return; |
246 | 253 | ||
247 | /* make tasks file */ | 254 | /* make tasks file */ |
248 | if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, | 255 | if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, |
249 | xprt, &xprt_info_fops)) { | 256 | xprt, &xprt_info_fops)) { |
250 | debugfs_remove_recursive(xprt->debugfs); | 257 | debugfs_remove_recursive(xprt->debugfs); |
251 | xprt->debugfs = NULL; | 258 | xprt->debugfs = NULL; |
252 | return -ENOMEM; | ||
253 | } | 259 | } |
254 | |||
255 | return 0; | ||
256 | } | 260 | } |
257 | 261 | ||
258 | void | 262 | void |
@@ -266,14 +270,17 @@ void __exit | |||
266 | sunrpc_debugfs_exit(void) | 270 | sunrpc_debugfs_exit(void) |
267 | { | 271 | { |
268 | debugfs_remove_recursive(topdir); | 272 | debugfs_remove_recursive(topdir); |
273 | topdir = NULL; | ||
274 | rpc_clnt_dir = NULL; | ||
275 | rpc_xprt_dir = NULL; | ||
269 | } | 276 | } |
270 | 277 | ||
271 | int __init | 278 | void __init |
272 | sunrpc_debugfs_init(void) | 279 | sunrpc_debugfs_init(void) |
273 | { | 280 | { |
274 | topdir = debugfs_create_dir("sunrpc", NULL); | 281 | topdir = debugfs_create_dir("sunrpc", NULL); |
275 | if (!topdir) | 282 | if (!topdir) |
276 | goto out; | 283 | return; |
277 | 284 | ||
278 | rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); | 285 | rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); |
279 | if (!rpc_clnt_dir) | 286 | if (!rpc_clnt_dir) |
@@ -283,10 +290,9 @@ sunrpc_debugfs_init(void) | |||
283 | if (!rpc_xprt_dir) | 290 | if (!rpc_xprt_dir) |
284 | goto out_remove; | 291 | goto out_remove; |
285 | 292 | ||
286 | return 0; | 293 | return; |
287 | out_remove: | 294 | out_remove: |
288 | debugfs_remove_recursive(topdir); | 295 | debugfs_remove_recursive(topdir); |
289 | topdir = NULL; | 296 | topdir = NULL; |
290 | out: | 297 | rpc_clnt_dir = NULL; |
291 | return -ENOMEM; | ||
292 | } | 298 | } |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index e37fbed87956..ee5d3d253102 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -98,10 +98,7 @@ init_sunrpc(void) | |||
98 | if (err) | 98 | if (err) |
99 | goto out4; | 99 | goto out4; |
100 | 100 | ||
101 | err = sunrpc_debugfs_init(); | 101 | sunrpc_debugfs_init(); |
102 | if (err) | ||
103 | goto out5; | ||
104 | |||
105 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 102 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
106 | rpc_register_sysctl(); | 103 | rpc_register_sysctl(); |
107 | #endif | 104 | #endif |
@@ -109,8 +106,6 @@ init_sunrpc(void) | |||
109 | init_socket_xprt(); /* clnt sock transport */ | 106 | init_socket_xprt(); /* clnt sock transport */ |
110 | return 0; | 107 | return 0; |
111 | 108 | ||
112 | out5: | ||
113 | unregister_rpc_pipefs(); | ||
114 | out4: | 109 | out4: |
115 | unregister_pernet_subsys(&sunrpc_net_ops); | 110 | unregister_pernet_subsys(&sunrpc_net_ops); |
116 | out3: | 111 | out3: |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e3015aede0d9..9949722d99ce 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -1331,7 +1331,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) | |||
1331 | */ | 1331 | */ |
1332 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) | 1332 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) |
1333 | { | 1333 | { |
1334 | int err; | ||
1335 | struct rpc_xprt *xprt; | 1334 | struct rpc_xprt *xprt; |
1336 | struct xprt_class *t; | 1335 | struct xprt_class *t; |
1337 | 1336 | ||
@@ -1372,11 +1371,7 @@ found: | |||
1372 | return ERR_PTR(-ENOMEM); | 1371 | return ERR_PTR(-ENOMEM); |
1373 | } | 1372 | } |
1374 | 1373 | ||
1375 | err = rpc_xprt_debugfs_register(xprt); | 1374 | rpc_xprt_debugfs_register(xprt); |
1376 | if (err) { | ||
1377 | xprt_destroy(xprt); | ||
1378 | return ERR_PTR(err); | ||
1379 | } | ||
1380 | 1375 | ||
1381 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 1376 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
1382 | xprt->max_reqs); | 1377 | xprt->max_reqs); |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index ae558dd7f8ee..c5cbdcb1f0b5 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -413,7 +413,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) | |||
413 | */ | 413 | */ |
414 | if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) { | 414 | if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) { |
415 | tipc_link_proto_xmit(node->active_links[node->addr & 1], | 415 | tipc_link_proto_xmit(node->active_links[node->addr & 1], |
416 | STATE_MSG, 0, 0, 0, 0, 0); | 416 | STATE_MSG, 0, 0, 0, 0); |
417 | tn->bcl->stats.sent_acks++; | 417 | tn->bcl->stats.sent_acks++; |
418 | } | 418 | } |
419 | } | 419 | } |
@@ -899,7 +899,7 @@ int tipc_bclink_init(struct net *net) | |||
899 | skb_queue_head_init(&bclink->inputq); | 899 | skb_queue_head_init(&bclink->inputq); |
900 | bcl->owner = &bclink->node; | 900 | bcl->owner = &bclink->node; |
901 | bcl->owner->net = net; | 901 | bcl->owner->net = net; |
902 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; | 902 | bcl->mtu = MAX_PKT_DEFAULT_MCAST; |
903 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); | 903 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); |
904 | bcl->bearer_id = MAX_BEARERS; | 904 | bcl->bearer_id = MAX_BEARERS; |
905 | rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer); | 905 | rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer); |
diff --git a/net/tipc/core.c b/net/tipc/core.c index 935205e6bcfe..be1c9fa60b09 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -152,11 +152,11 @@ out_netlink: | |||
152 | static void __exit tipc_exit(void) | 152 | static void __exit tipc_exit(void) |
153 | { | 153 | { |
154 | tipc_bearer_cleanup(); | 154 | tipc_bearer_cleanup(); |
155 | unregister_pernet_subsys(&tipc_net_ops); | ||
155 | tipc_netlink_stop(); | 156 | tipc_netlink_stop(); |
156 | tipc_netlink_compat_stop(); | 157 | tipc_netlink_compat_stop(); |
157 | tipc_socket_stop(); | 158 | tipc_socket_stop(); |
158 | tipc_unregister_sysctl(); | 159 | tipc_unregister_sysctl(); |
159 | unregister_pernet_subsys(&tipc_net_ops); | ||
160 | 160 | ||
161 | pr_info("Deactivated\n"); | 161 | pr_info("Deactivated\n"); |
162 | } | 162 | } |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 514466efc25c..a6b30df6ec02 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -89,24 +89,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { | |||
89 | #define TIMEOUT_EVT 560817u /* link timer expired */ | 89 | #define TIMEOUT_EVT 560817u /* link timer expired */ |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * The following two 'message types' is really just implementation | 92 | * State value stored in 'failover_pkts' |
93 | * data conveniently stored in the message header. | ||
94 | * They must not be considered part of the protocol | ||
95 | */ | 93 | */ |
96 | #define OPEN_MSG 0 | 94 | #define FIRST_FAILOVER 0xffffu |
97 | #define CLOSED_MSG 1 | ||
98 | |||
99 | /* | ||
100 | * State value stored in 'exp_msg_count' | ||
101 | */ | ||
102 | #define START_CHANGEOVER 100000u | ||
103 | 95 | ||
104 | static void link_handle_out_of_seq_msg(struct tipc_link *link, | 96 | static void link_handle_out_of_seq_msg(struct tipc_link *link, |
105 | struct sk_buff *skb); | 97 | struct sk_buff *skb); |
106 | static void tipc_link_proto_rcv(struct tipc_link *link, | 98 | static void tipc_link_proto_rcv(struct tipc_link *link, |
107 | struct sk_buff *skb); | 99 | struct sk_buff *skb); |
108 | static int tipc_link_tunnel_rcv(struct tipc_node *node, | ||
109 | struct sk_buff **skb); | ||
110 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); | 100 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); |
111 | static void link_state_event(struct tipc_link *l_ptr, u32 event); | 101 | static void link_state_event(struct tipc_link *l_ptr, u32 event); |
112 | static void link_reset_statistics(struct tipc_link *l_ptr); | 102 | static void link_reset_statistics(struct tipc_link *l_ptr); |
@@ -115,7 +105,7 @@ static void tipc_link_sync_xmit(struct tipc_link *l); | |||
115 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); | 105 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); |
116 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); | 106 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); |
117 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); | 107 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); |
118 | 108 | static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb); | |
119 | /* | 109 | /* |
120 | * Simple link routines | 110 | * Simple link routines |
121 | */ | 111 | */ |
@@ -146,34 +136,6 @@ static struct tipc_link *tipc_parallel_link(struct tipc_link *l) | |||
146 | return l->owner->active_links[1]; | 136 | return l->owner->active_links[1]; |
147 | } | 137 | } |
148 | 138 | ||
149 | static void link_init_max_pkt(struct tipc_link *l_ptr) | ||
150 | { | ||
151 | struct tipc_node *node = l_ptr->owner; | ||
152 | struct tipc_net *tn = net_generic(node->net, tipc_net_id); | ||
153 | struct tipc_bearer *b_ptr; | ||
154 | u32 max_pkt; | ||
155 | |||
156 | rcu_read_lock(); | ||
157 | b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]); | ||
158 | if (!b_ptr) { | ||
159 | rcu_read_unlock(); | ||
160 | return; | ||
161 | } | ||
162 | max_pkt = (b_ptr->mtu & ~3); | ||
163 | rcu_read_unlock(); | ||
164 | |||
165 | if (max_pkt > MAX_MSG_SIZE) | ||
166 | max_pkt = MAX_MSG_SIZE; | ||
167 | |||
168 | l_ptr->max_pkt_target = max_pkt; | ||
169 | if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) | ||
170 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
171 | else | ||
172 | l_ptr->max_pkt = MAX_PKT_DEFAULT; | ||
173 | |||
174 | l_ptr->max_pkt_probes = 0; | ||
175 | } | ||
176 | |||
177 | /* | 139 | /* |
178 | * Simple non-static link routines (i.e. referenced outside this file) | 140 | * Simple non-static link routines (i.e. referenced outside this file) |
179 | */ | 141 | */ |
@@ -314,7 +276,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
314 | msg_set_bearer_id(msg, b_ptr->identity); | 276 | msg_set_bearer_id(msg, b_ptr->identity); |
315 | strcpy((char *)msg_data(msg), if_name); | 277 | strcpy((char *)msg_data(msg), if_name); |
316 | l_ptr->net_plane = b_ptr->net_plane; | 278 | l_ptr->net_plane = b_ptr->net_plane; |
317 | link_init_max_pkt(l_ptr); | 279 | l_ptr->advertised_mtu = b_ptr->mtu; |
280 | l_ptr->mtu = l_ptr->advertised_mtu; | ||
318 | l_ptr->priority = b_ptr->priority; | 281 | l_ptr->priority = b_ptr->priority; |
319 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); | 282 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); |
320 | l_ptr->next_out_no = 1; | 283 | l_ptr->next_out_no = 1; |
@@ -333,15 +296,19 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
333 | } | 296 | } |
334 | 297 | ||
335 | /** | 298 | /** |
336 | * link_delete - Conditional deletion of link. | 299 | * tipc_link_delete - Delete a link |
337 | * If timer still running, real delete is done when it expires | 300 | * @l: link to be deleted |
338 | * @link: link to be deleted | ||
339 | */ | 301 | */ |
340 | void tipc_link_delete(struct tipc_link *link) | 302 | void tipc_link_delete(struct tipc_link *l) |
341 | { | 303 | { |
342 | tipc_link_reset_fragments(link); | 304 | tipc_link_reset(l); |
343 | tipc_node_detach_link(link->owner, link); | 305 | if (del_timer(&l->timer)) |
344 | tipc_link_put(link); | 306 | tipc_link_put(l); |
307 | l->flags |= LINK_STOPPED; | ||
308 | /* Delete link now, or when timer is finished: */ | ||
309 | tipc_link_reset_fragments(l); | ||
310 | tipc_node_detach_link(l->owner, l); | ||
311 | tipc_link_put(l); | ||
345 | } | 312 | } |
346 | 313 | ||
347 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | 314 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, |
@@ -350,23 +317,12 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | |||
350 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 317 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
351 | struct tipc_link *link; | 318 | struct tipc_link *link; |
352 | struct tipc_node *node; | 319 | struct tipc_node *node; |
353 | bool del_link; | ||
354 | 320 | ||
355 | rcu_read_lock(); | 321 | rcu_read_lock(); |
356 | list_for_each_entry_rcu(node, &tn->node_list, list) { | 322 | list_for_each_entry_rcu(node, &tn->node_list, list) { |
357 | tipc_node_lock(node); | 323 | tipc_node_lock(node); |
358 | link = node->links[bearer_id]; | 324 | link = node->links[bearer_id]; |
359 | if (!link) { | 325 | if (link) |
360 | tipc_node_unlock(node); | ||
361 | continue; | ||
362 | } | ||
363 | del_link = !tipc_link_is_up(link) && !link->exp_msg_count; | ||
364 | tipc_link_reset(link); | ||
365 | if (del_timer(&link->timer)) | ||
366 | tipc_link_put(link); | ||
367 | link->flags |= LINK_STOPPED; | ||
368 | /* Delete link now, or when failover is finished: */ | ||
369 | if (shutting_down || !tipc_node_is_up(node) || del_link) | ||
370 | tipc_link_delete(link); | 326 | tipc_link_delete(link); |
371 | tipc_node_unlock(node); | 327 | tipc_node_unlock(node); |
372 | } | 328 | } |
@@ -473,17 +429,17 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr) | |||
473 | void tipc_link_reset(struct tipc_link *l_ptr) | 429 | void tipc_link_reset(struct tipc_link *l_ptr) |
474 | { | 430 | { |
475 | u32 prev_state = l_ptr->state; | 431 | u32 prev_state = l_ptr->state; |
476 | u32 checkpoint = l_ptr->next_in_no; | ||
477 | int was_active_link = tipc_link_is_active(l_ptr); | 432 | int was_active_link = tipc_link_is_active(l_ptr); |
478 | struct tipc_node *owner = l_ptr->owner; | 433 | struct tipc_node *owner = l_ptr->owner; |
434 | struct tipc_link *pl = tipc_parallel_link(l_ptr); | ||
479 | 435 | ||
480 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); | 436 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); |
481 | 437 | ||
482 | /* Link is down, accept any session */ | 438 | /* Link is down, accept any session */ |
483 | l_ptr->peer_session = INVALID_SESSION; | 439 | l_ptr->peer_session = INVALID_SESSION; |
484 | 440 | ||
485 | /* Prepare for max packet size negotiation */ | 441 | /* Prepare for renewed mtu size negotiation */ |
486 | link_init_max_pkt(l_ptr); | 442 | l_ptr->mtu = l_ptr->advertised_mtu; |
487 | 443 | ||
488 | l_ptr->state = RESET_UNKNOWN; | 444 | l_ptr->state = RESET_UNKNOWN; |
489 | 445 | ||
@@ -493,11 +449,15 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
493 | tipc_node_link_down(l_ptr->owner, l_ptr); | 449 | tipc_node_link_down(l_ptr->owner, l_ptr); |
494 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); | 450 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); |
495 | 451 | ||
496 | if (was_active_link && tipc_node_active_links(l_ptr->owner)) { | 452 | if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) { |
497 | l_ptr->reset_checkpoint = checkpoint; | 453 | l_ptr->flags |= LINK_FAILINGOVER; |
498 | l_ptr->exp_msg_count = START_CHANGEOVER; | 454 | l_ptr->failover_checkpt = l_ptr->next_in_no; |
455 | pl->failover_pkts = FIRST_FAILOVER; | ||
456 | pl->failover_checkpt = l_ptr->next_in_no; | ||
457 | pl->failover_skb = l_ptr->reasm_buf; | ||
458 | } else { | ||
459 | kfree_skb(l_ptr->reasm_buf); | ||
499 | } | 460 | } |
500 | |||
501 | /* Clean up all queues, except inputq: */ | 461 | /* Clean up all queues, except inputq: */ |
502 | __skb_queue_purge(&l_ptr->transmq); | 462 | __skb_queue_purge(&l_ptr->transmq); |
503 | __skb_queue_purge(&l_ptr->deferdq); | 463 | __skb_queue_purge(&l_ptr->deferdq); |
@@ -507,6 +467,7 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
507 | if (!skb_queue_empty(owner->inputq)) | 467 | if (!skb_queue_empty(owner->inputq)) |
508 | owner->action_flags |= TIPC_MSG_EVT; | 468 | owner->action_flags |= TIPC_MSG_EVT; |
509 | tipc_link_purge_backlog(l_ptr); | 469 | tipc_link_purge_backlog(l_ptr); |
470 | l_ptr->reasm_buf = NULL; | ||
510 | l_ptr->rcv_unacked = 0; | 471 | l_ptr->rcv_unacked = 0; |
511 | l_ptr->checkpoint = 1; | 472 | l_ptr->checkpoint = 1; |
512 | l_ptr->next_out_no = 1; | 473 | l_ptr->next_out_no = 1; |
@@ -558,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
558 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) | 519 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) |
559 | return; /* Not yet. */ | 520 | return; /* Not yet. */ |
560 | 521 | ||
561 | /* Check whether changeover is going on */ | 522 | if (l_ptr->flags & LINK_FAILINGOVER) { |
562 | if (l_ptr->exp_msg_count) { | ||
563 | if (event == TIMEOUT_EVT) | 523 | if (event == TIMEOUT_EVT) |
564 | link_set_timer(l_ptr, cont_intv); | 524 | link_set_timer(l_ptr, cont_intv); |
565 | return; | 525 | return; |
@@ -576,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
576 | l_ptr->checkpoint = l_ptr->next_in_no; | 536 | l_ptr->checkpoint = l_ptr->next_in_no; |
577 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 537 | if (tipc_bclink_acks_missing(l_ptr->owner)) { |
578 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 538 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
579 | 0, 0, 0, 0, 0); | 539 | 0, 0, 0, 0); |
580 | l_ptr->fsm_msg_cnt++; | ||
581 | } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { | ||
582 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | ||
583 | 1, 0, 0, 0, 0); | ||
584 | l_ptr->fsm_msg_cnt++; | 540 | l_ptr->fsm_msg_cnt++; |
585 | } | 541 | } |
586 | link_set_timer(l_ptr, cont_intv); | 542 | link_set_timer(l_ptr, cont_intv); |
@@ -588,7 +544,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
588 | } | 544 | } |
589 | l_ptr->state = WORKING_UNKNOWN; | 545 | l_ptr->state = WORKING_UNKNOWN; |
590 | l_ptr->fsm_msg_cnt = 0; | 546 | l_ptr->fsm_msg_cnt = 0; |
591 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 547 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
592 | l_ptr->fsm_msg_cnt++; | 548 | l_ptr->fsm_msg_cnt++; |
593 | link_set_timer(l_ptr, cont_intv / 4); | 549 | link_set_timer(l_ptr, cont_intv / 4); |
594 | break; | 550 | break; |
@@ -599,7 +555,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
599 | l_ptr->state = RESET_RESET; | 555 | l_ptr->state = RESET_RESET; |
600 | l_ptr->fsm_msg_cnt = 0; | 556 | l_ptr->fsm_msg_cnt = 0; |
601 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 557 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
602 | 0, 0, 0, 0, 0); | 558 | 0, 0, 0, 0); |
603 | l_ptr->fsm_msg_cnt++; | 559 | l_ptr->fsm_msg_cnt++; |
604 | link_set_timer(l_ptr, cont_intv); | 560 | link_set_timer(l_ptr, cont_intv); |
605 | break; | 561 | break; |
@@ -622,7 +578,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
622 | l_ptr->state = RESET_RESET; | 578 | l_ptr->state = RESET_RESET; |
623 | l_ptr->fsm_msg_cnt = 0; | 579 | l_ptr->fsm_msg_cnt = 0; |
624 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 580 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
625 | 0, 0, 0, 0, 0); | 581 | 0, 0, 0, 0); |
626 | l_ptr->fsm_msg_cnt++; | 582 | l_ptr->fsm_msg_cnt++; |
627 | link_set_timer(l_ptr, cont_intv); | 583 | link_set_timer(l_ptr, cont_intv); |
628 | break; | 584 | break; |
@@ -633,13 +589,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
633 | l_ptr->checkpoint = l_ptr->next_in_no; | 589 | l_ptr->checkpoint = l_ptr->next_in_no; |
634 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 590 | if (tipc_bclink_acks_missing(l_ptr->owner)) { |
635 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 591 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
636 | 0, 0, 0, 0, 0); | 592 | 0, 0, 0, 0); |
637 | l_ptr->fsm_msg_cnt++; | 593 | l_ptr->fsm_msg_cnt++; |
638 | } | 594 | } |
639 | link_set_timer(l_ptr, cont_intv); | 595 | link_set_timer(l_ptr, cont_intv); |
640 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { | 596 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { |
641 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 597 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
642 | 1, 0, 0, 0, 0); | 598 | 1, 0, 0, 0); |
643 | l_ptr->fsm_msg_cnt++; | 599 | l_ptr->fsm_msg_cnt++; |
644 | link_set_timer(l_ptr, cont_intv / 4); | 600 | link_set_timer(l_ptr, cont_intv / 4); |
645 | } else { /* Link has failed */ | 601 | } else { /* Link has failed */ |
@@ -649,7 +605,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
649 | l_ptr->state = RESET_UNKNOWN; | 605 | l_ptr->state = RESET_UNKNOWN; |
650 | l_ptr->fsm_msg_cnt = 0; | 606 | l_ptr->fsm_msg_cnt = 0; |
651 | tipc_link_proto_xmit(l_ptr, RESET_MSG, | 607 | tipc_link_proto_xmit(l_ptr, RESET_MSG, |
652 | 0, 0, 0, 0, 0); | 608 | 0, 0, 0, 0); |
653 | l_ptr->fsm_msg_cnt++; | 609 | l_ptr->fsm_msg_cnt++; |
654 | link_set_timer(l_ptr, cont_intv); | 610 | link_set_timer(l_ptr, cont_intv); |
655 | } | 611 | } |
@@ -669,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
669 | l_ptr->state = WORKING_WORKING; | 625 | l_ptr->state = WORKING_WORKING; |
670 | l_ptr->fsm_msg_cnt = 0; | 626 | l_ptr->fsm_msg_cnt = 0; |
671 | link_activate(l_ptr); | 627 | link_activate(l_ptr); |
672 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 628 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
673 | l_ptr->fsm_msg_cnt++; | 629 | l_ptr->fsm_msg_cnt++; |
674 | if (l_ptr->owner->working_links == 1) | 630 | if (l_ptr->owner->working_links == 1) |
675 | tipc_link_sync_xmit(l_ptr); | 631 | tipc_link_sync_xmit(l_ptr); |
@@ -679,7 +635,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
679 | l_ptr->state = RESET_RESET; | 635 | l_ptr->state = RESET_RESET; |
680 | l_ptr->fsm_msg_cnt = 0; | 636 | l_ptr->fsm_msg_cnt = 0; |
681 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 637 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
682 | 1, 0, 0, 0, 0); | 638 | 1, 0, 0, 0); |
683 | l_ptr->fsm_msg_cnt++; | 639 | l_ptr->fsm_msg_cnt++; |
684 | link_set_timer(l_ptr, cont_intv); | 640 | link_set_timer(l_ptr, cont_intv); |
685 | break; | 641 | break; |
@@ -689,7 +645,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
689 | link_set_timer(l_ptr, cont_intv); | 645 | link_set_timer(l_ptr, cont_intv); |
690 | break; | 646 | break; |
691 | case TIMEOUT_EVT: | 647 | case TIMEOUT_EVT: |
692 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); | 648 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0); |
693 | l_ptr->fsm_msg_cnt++; | 649 | l_ptr->fsm_msg_cnt++; |
694 | link_set_timer(l_ptr, cont_intv); | 650 | link_set_timer(l_ptr, cont_intv); |
695 | break; | 651 | break; |
@@ -707,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
707 | l_ptr->state = WORKING_WORKING; | 663 | l_ptr->state = WORKING_WORKING; |
708 | l_ptr->fsm_msg_cnt = 0; | 664 | l_ptr->fsm_msg_cnt = 0; |
709 | link_activate(l_ptr); | 665 | link_activate(l_ptr); |
710 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 666 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
711 | l_ptr->fsm_msg_cnt++; | 667 | l_ptr->fsm_msg_cnt++; |
712 | if (l_ptr->owner->working_links == 1) | 668 | if (l_ptr->owner->working_links == 1) |
713 | tipc_link_sync_xmit(l_ptr); | 669 | tipc_link_sync_xmit(l_ptr); |
@@ -717,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
717 | break; | 673 | break; |
718 | case TIMEOUT_EVT: | 674 | case TIMEOUT_EVT: |
719 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 675 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
720 | 0, 0, 0, 0, 0); | 676 | 0, 0, 0, 0); |
721 | l_ptr->fsm_msg_cnt++; | 677 | l_ptr->fsm_msg_cnt++; |
722 | link_set_timer(l_ptr, cont_intv); | 678 | link_set_timer(l_ptr, cont_intv); |
723 | break; | 679 | break; |
@@ -746,7 +702,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link, | |||
746 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | 702 | struct tipc_msg *msg = buf_msg(skb_peek(list)); |
747 | unsigned int maxwin = link->window; | 703 | unsigned int maxwin = link->window; |
748 | unsigned int imp = msg_importance(msg); | 704 | unsigned int imp = msg_importance(msg); |
749 | uint mtu = link->max_pkt; | 705 | uint mtu = link->mtu; |
750 | uint ack = mod(link->next_in_no - 1); | 706 | uint ack = mod(link->next_in_no - 1); |
751 | uint seqno = link->next_out_no; | 707 | uint seqno = link->next_out_no; |
752 | uint bc_last_in = link->owner->bclink.last_in; | 708 | uint bc_last_in = link->owner->bclink.last_in; |
@@ -1200,7 +1156,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1200 | link_retrieve_defq(l_ptr, &head); | 1156 | link_retrieve_defq(l_ptr, &head); |
1201 | if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { | 1157 | if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { |
1202 | l_ptr->stats.sent_acks++; | 1158 | l_ptr->stats.sent_acks++; |
1203 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 1159 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); |
1204 | } | 1160 | } |
1205 | tipc_link_input(l_ptr, skb); | 1161 | tipc_link_input(l_ptr, skb); |
1206 | skb = NULL; | 1162 | skb = NULL; |
@@ -1243,7 +1199,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb) | |||
1243 | node->action_flags |= TIPC_NAMED_MSG_EVT; | 1199 | node->action_flags |= TIPC_NAMED_MSG_EVT; |
1244 | return true; | 1200 | return true; |
1245 | case MSG_BUNDLER: | 1201 | case MSG_BUNDLER: |
1246 | case CHANGEOVER_PROTOCOL: | 1202 | case TUNNEL_PROTOCOL: |
1247 | case MSG_FRAGMENTER: | 1203 | case MSG_FRAGMENTER: |
1248 | case BCAST_PROTOCOL: | 1204 | case BCAST_PROTOCOL: |
1249 | return false; | 1205 | return false; |
@@ -1270,12 +1226,14 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb) | |||
1270 | return; | 1226 | return; |
1271 | 1227 | ||
1272 | switch (msg_user(msg)) { | 1228 | switch (msg_user(msg)) { |
1273 | case CHANGEOVER_PROTOCOL: | 1229 | case TUNNEL_PROTOCOL: |
1274 | if (msg_dup(msg)) { | 1230 | if (msg_dup(msg)) { |
1275 | link->flags |= LINK_SYNCHING; | 1231 | link->flags |= LINK_SYNCHING; |
1276 | link->synch_point = msg_seqno(msg_get_wrapped(msg)); | 1232 | link->synch_point = msg_seqno(msg_get_wrapped(msg)); |
1233 | kfree_skb(skb); | ||
1234 | break; | ||
1277 | } | 1235 | } |
1278 | if (!tipc_link_tunnel_rcv(node, &skb)) | 1236 | if (!tipc_link_failover_rcv(link, &skb)) |
1279 | break; | 1237 | break; |
1280 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { | 1238 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { |
1281 | tipc_data_input(link, skb); | 1239 | tipc_data_input(link, skb); |
@@ -1373,7 +1331,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
1373 | if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) { | 1331 | if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) { |
1374 | l_ptr->stats.deferred_recv++; | 1332 | l_ptr->stats.deferred_recv++; |
1375 | if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1) | 1333 | if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1) |
1376 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 1334 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); |
1377 | } else { | 1335 | } else { |
1378 | l_ptr->stats.duplicates++; | 1336 | l_ptr->stats.duplicates++; |
1379 | } | 1337 | } |
@@ -1383,15 +1341,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
1383 | * Send protocol message to the other endpoint. | 1341 | * Send protocol message to the other endpoint. |
1384 | */ | 1342 | */ |
1385 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | 1343 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, |
1386 | u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) | 1344 | u32 gap, u32 tolerance, u32 priority) |
1387 | { | 1345 | { |
1388 | struct sk_buff *buf = NULL; | 1346 | struct sk_buff *buf = NULL; |
1389 | struct tipc_msg *msg = l_ptr->pmsg; | 1347 | struct tipc_msg *msg = l_ptr->pmsg; |
1390 | u32 msg_size = sizeof(l_ptr->proto_msg); | 1348 | u32 msg_size = sizeof(l_ptr->proto_msg); |
1391 | int r_flag; | 1349 | int r_flag; |
1392 | 1350 | ||
1393 | /* Don't send protocol message during link changeover */ | 1351 | /* Don't send protocol message during link failover */ |
1394 | if (l_ptr->exp_msg_count) | 1352 | if (l_ptr->flags & LINK_FAILINGOVER) |
1395 | return; | 1353 | return; |
1396 | 1354 | ||
1397 | /* Abort non-RESET send if communication with node is prohibited */ | 1355 | /* Abort non-RESET send if communication with node is prohibited */ |
@@ -1421,35 +1379,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
1421 | l_ptr->stats.sent_nacks++; | 1379 | l_ptr->stats.sent_nacks++; |
1422 | msg_set_link_tolerance(msg, tolerance); | 1380 | msg_set_link_tolerance(msg, tolerance); |
1423 | msg_set_linkprio(msg, priority); | 1381 | msg_set_linkprio(msg, priority); |
1424 | msg_set_max_pkt(msg, ack_mtu); | 1382 | msg_set_max_pkt(msg, l_ptr->mtu); |
1425 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1383 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1426 | msg_set_probe(msg, probe_msg != 0); | 1384 | msg_set_probe(msg, probe_msg != 0); |
1427 | if (probe_msg) { | 1385 | if (probe_msg) |
1428 | u32 mtu = l_ptr->max_pkt; | ||
1429 | |||
1430 | if ((mtu < l_ptr->max_pkt_target) && | ||
1431 | link_working_working(l_ptr) && | ||
1432 | l_ptr->fsm_msg_cnt) { | ||
1433 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
1434 | if (l_ptr->max_pkt_probes == 10) { | ||
1435 | l_ptr->max_pkt_target = (msg_size - 4); | ||
1436 | l_ptr->max_pkt_probes = 0; | ||
1437 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
1438 | } | ||
1439 | l_ptr->max_pkt_probes++; | ||
1440 | } | ||
1441 | |||
1442 | l_ptr->stats.sent_probes++; | 1386 | l_ptr->stats.sent_probes++; |
1443 | } | ||
1444 | l_ptr->stats.sent_states++; | 1387 | l_ptr->stats.sent_states++; |
1445 | } else { /* RESET_MSG or ACTIVATE_MSG */ | 1388 | } else { /* RESET_MSG or ACTIVATE_MSG */ |
1446 | msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); | 1389 | msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1)); |
1447 | msg_set_seq_gap(msg, 0); | 1390 | msg_set_seq_gap(msg, 0); |
1448 | msg_set_next_sent(msg, 1); | 1391 | msg_set_next_sent(msg, 1); |
1449 | msg_set_probe(msg, 0); | 1392 | msg_set_probe(msg, 0); |
1450 | msg_set_link_tolerance(msg, l_ptr->tolerance); | 1393 | msg_set_link_tolerance(msg, l_ptr->tolerance); |
1451 | msg_set_linkprio(msg, l_ptr->priority); | 1394 | msg_set_linkprio(msg, l_ptr->priority); |
1452 | msg_set_max_pkt(msg, l_ptr->max_pkt_target); | 1395 | msg_set_max_pkt(msg, l_ptr->advertised_mtu); |
1453 | } | 1396 | } |
1454 | 1397 | ||
1455 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); | 1398 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); |
@@ -1480,13 +1423,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
1480 | struct sk_buff *buf) | 1423 | struct sk_buff *buf) |
1481 | { | 1424 | { |
1482 | u32 rec_gap = 0; | 1425 | u32 rec_gap = 0; |
1483 | u32 max_pkt_info; | ||
1484 | u32 max_pkt_ack; | ||
1485 | u32 msg_tol; | 1426 | u32 msg_tol; |
1486 | struct tipc_msg *msg = buf_msg(buf); | 1427 | struct tipc_msg *msg = buf_msg(buf); |
1487 | 1428 | ||
1488 | /* Discard protocol message during link changeover */ | 1429 | if (l_ptr->flags & LINK_FAILINGOVER) |
1489 | if (l_ptr->exp_msg_count) | ||
1490 | goto exit; | 1430 | goto exit; |
1491 | 1431 | ||
1492 | if (l_ptr->net_plane != msg_net_plane(msg)) | 1432 | if (l_ptr->net_plane != msg_net_plane(msg)) |
@@ -1525,15 +1465,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
1525 | if (msg_linkprio(msg) > l_ptr->priority) | 1465 | if (msg_linkprio(msg) > l_ptr->priority) |
1526 | l_ptr->priority = msg_linkprio(msg); | 1466 | l_ptr->priority = msg_linkprio(msg); |
1527 | 1467 | ||
1528 | max_pkt_info = msg_max_pkt(msg); | 1468 | if (l_ptr->mtu > msg_max_pkt(msg)) |
1529 | if (max_pkt_info) { | 1469 | l_ptr->mtu = msg_max_pkt(msg); |
1530 | if (max_pkt_info < l_ptr->max_pkt_target) | ||
1531 | l_ptr->max_pkt_target = max_pkt_info; | ||
1532 | if (l_ptr->max_pkt > l_ptr->max_pkt_target) | ||
1533 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
1534 | } else { | ||
1535 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
1536 | } | ||
1537 | 1470 | ||
1538 | /* Synchronize broadcast link info, if not done previously */ | 1471 | /* Synchronize broadcast link info, if not done previously */ |
1539 | if (!tipc_node_is_up(l_ptr->owner)) { | 1472 | if (!tipc_node_is_up(l_ptr->owner)) { |
@@ -1578,18 +1511,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
1578 | mod(l_ptr->next_in_no)); | 1511 | mod(l_ptr->next_in_no)); |
1579 | } | 1512 | } |
1580 | 1513 | ||
1581 | max_pkt_ack = msg_max_pkt(msg); | 1514 | if (msg_probe(msg)) |
1582 | if (max_pkt_ack > l_ptr->max_pkt) { | ||
1583 | l_ptr->max_pkt = max_pkt_ack; | ||
1584 | l_ptr->max_pkt_probes = 0; | ||
1585 | } | ||
1586 | |||
1587 | max_pkt_ack = 0; | ||
1588 | if (msg_probe(msg)) { | ||
1589 | l_ptr->stats.recv_probes++; | 1515 | l_ptr->stats.recv_probes++; |
1590 | if (msg_size(msg) > sizeof(l_ptr->proto_msg)) | ||
1591 | max_pkt_ack = msg_size(msg); | ||
1592 | } | ||
1593 | 1516 | ||
1594 | /* Protocol message before retransmits, reduce loss risk */ | 1517 | /* Protocol message before retransmits, reduce loss risk */ |
1595 | if (l_ptr->owner->bclink.recv_permitted) | 1518 | if (l_ptr->owner->bclink.recv_permitted) |
@@ -1597,8 +1520,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
1597 | msg_last_bcast(msg)); | 1520 | msg_last_bcast(msg)); |
1598 | 1521 | ||
1599 | if (rec_gap || (msg_probe(msg))) { | 1522 | if (rec_gap || (msg_probe(msg))) { |
1600 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0, | 1523 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, |
1601 | 0, max_pkt_ack); | 1524 | rec_gap, 0, 0); |
1602 | } | 1525 | } |
1603 | if (msg_seq_gap(msg)) { | 1526 | if (msg_seq_gap(msg)) { |
1604 | l_ptr->stats.recv_nacks++; | 1527 | l_ptr->stats.recv_nacks++; |
@@ -1658,8 +1581,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
1658 | if (!tunnel) | 1581 | if (!tunnel) |
1659 | return; | 1582 | return; |
1660 | 1583 | ||
1661 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, | 1584 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL, |
1662 | ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); | 1585 | FAILOVER_MSG, INT_H_SIZE, l_ptr->addr); |
1663 | skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); | 1586 | skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); |
1664 | tipc_link_purge_backlog(l_ptr); | 1587 | tipc_link_purge_backlog(l_ptr); |
1665 | msgcount = skb_queue_len(&l_ptr->transmq); | 1588 | msgcount = skb_queue_len(&l_ptr->transmq); |
@@ -1721,8 +1644,8 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link, | |||
1721 | struct sk_buff_head *queue = &link->transmq; | 1644 | struct sk_buff_head *queue = &link->transmq; |
1722 | int mcnt; | 1645 | int mcnt; |
1723 | 1646 | ||
1724 | tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL, | 1647 | tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL, |
1725 | DUPLICATE_MSG, INT_H_SIZE, link->addr); | 1648 | SYNCH_MSG, INT_H_SIZE, link->addr); |
1726 | mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq); | 1649 | mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq); |
1727 | msg_set_msgcnt(&tnl_hdr, mcnt); | 1650 | msg_set_msgcnt(&tnl_hdr, mcnt); |
1728 | msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id); | 1651 | msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id); |
@@ -1755,101 +1678,63 @@ tunnel_queue: | |||
1755 | goto tunnel_queue; | 1678 | goto tunnel_queue; |
1756 | } | 1679 | } |
1757 | 1680 | ||
1758 | /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. | 1681 | /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet |
1759 | * Owner node is locked. | ||
1760 | */ | ||
1761 | static void tipc_link_dup_rcv(struct tipc_link *link, | ||
1762 | struct sk_buff *skb) | ||
1763 | { | ||
1764 | struct sk_buff *iskb; | ||
1765 | int pos = 0; | ||
1766 | |||
1767 | if (!tipc_link_is_up(link)) | ||
1768 | return; | ||
1769 | |||
1770 | if (!tipc_msg_extract(skb, &iskb, &pos)) { | ||
1771 | pr_warn("%sfailed to extract inner dup pkt\n", link_co_err); | ||
1772 | return; | ||
1773 | } | ||
1774 | /* Append buffer to deferred queue, if applicable: */ | ||
1775 | link_handle_out_of_seq_msg(link, iskb); | ||
1776 | } | ||
1777 | |||
1778 | /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet | ||
1779 | * Owner node is locked. | 1682 | * Owner node is locked. |
1780 | */ | 1683 | */ |
1781 | static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, | 1684 | static bool tipc_link_failover_rcv(struct tipc_link *link, |
1782 | struct sk_buff *t_buf) | 1685 | struct sk_buff **skb) |
1783 | { | 1686 | { |
1784 | struct tipc_msg *t_msg = buf_msg(t_buf); | 1687 | struct tipc_msg *msg = buf_msg(*skb); |
1785 | struct sk_buff *buf = NULL; | 1688 | struct sk_buff *iskb = NULL; |
1786 | struct tipc_msg *msg; | 1689 | struct tipc_link *pl = NULL; |
1690 | int bearer_id = msg_bearer_id(msg); | ||
1787 | int pos = 0; | 1691 | int pos = 0; |
1788 | 1692 | ||
1789 | if (tipc_link_is_up(l_ptr)) | 1693 | if (msg_type(msg) != FAILOVER_MSG) { |
1790 | tipc_link_reset(l_ptr); | 1694 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); |
1791 | 1695 | goto exit; | |
1792 | /* First failover packet? */ | ||
1793 | if (l_ptr->exp_msg_count == START_CHANGEOVER) | ||
1794 | l_ptr->exp_msg_count = msg_msgcnt(t_msg); | ||
1795 | |||
1796 | /* Should there be an inner packet? */ | ||
1797 | if (l_ptr->exp_msg_count) { | ||
1798 | l_ptr->exp_msg_count--; | ||
1799 | if (!tipc_msg_extract(t_buf, &buf, &pos)) { | ||
1800 | pr_warn("%sno inner failover pkt\n", link_co_err); | ||
1801 | goto exit; | ||
1802 | } | ||
1803 | msg = buf_msg(buf); | ||
1804 | |||
1805 | if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) { | ||
1806 | kfree_skb(buf); | ||
1807 | buf = NULL; | ||
1808 | goto exit; | ||
1809 | } | ||
1810 | if (msg_user(msg) == MSG_FRAGMENTER) { | ||
1811 | l_ptr->stats.recv_fragments++; | ||
1812 | tipc_buf_append(&l_ptr->reasm_buf, &buf); | ||
1813 | } | ||
1814 | } | 1696 | } |
1815 | exit: | 1697 | if (bearer_id >= MAX_BEARERS) |
1816 | if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED)) | 1698 | goto exit; |
1817 | tipc_link_delete(l_ptr); | ||
1818 | return buf; | ||
1819 | } | ||
1820 | 1699 | ||
1821 | /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent | 1700 | if (bearer_id == link->bearer_id) |
1822 | * via other link as result of a failover (ORIGINAL_MSG) or | 1701 | goto exit; |
1823 | * a new active link (DUPLICATE_MSG). Failover packets are | ||
1824 | * returned to the active link for delivery upwards. | ||
1825 | * Owner node is locked. | ||
1826 | */ | ||
1827 | static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, | ||
1828 | struct sk_buff **buf) | ||
1829 | { | ||
1830 | struct sk_buff *t_buf = *buf; | ||
1831 | struct tipc_link *l_ptr; | ||
1832 | struct tipc_msg *t_msg = buf_msg(t_buf); | ||
1833 | u32 bearer_id = msg_bearer_id(t_msg); | ||
1834 | 1702 | ||
1835 | *buf = NULL; | 1703 | pl = link->owner->links[bearer_id]; |
1704 | if (pl && tipc_link_is_up(pl)) | ||
1705 | tipc_link_reset(pl); | ||
1836 | 1706 | ||
1837 | if (bearer_id >= MAX_BEARERS) | 1707 | if (link->failover_pkts == FIRST_FAILOVER) |
1708 | link->failover_pkts = msg_msgcnt(msg); | ||
1709 | |||
1710 | /* Should we expect an inner packet? */ | ||
1711 | if (!link->failover_pkts) | ||
1838 | goto exit; | 1712 | goto exit; |
1839 | 1713 | ||
1840 | l_ptr = n_ptr->links[bearer_id]; | 1714 | if (!tipc_msg_extract(*skb, &iskb, &pos)) { |
1841 | if (!l_ptr) | 1715 | pr_warn("%sno inner failover pkt\n", link_co_err); |
1716 | *skb = NULL; | ||
1842 | goto exit; | 1717 | goto exit; |
1718 | } | ||
1719 | link->failover_pkts--; | ||
1720 | *skb = NULL; | ||
1843 | 1721 | ||
1844 | if (msg_type(t_msg) == DUPLICATE_MSG) | 1722 | /* Was this packet already delivered? */ |
1845 | tipc_link_dup_rcv(l_ptr, t_buf); | 1723 | if (less(buf_seqno(iskb), link->failover_checkpt)) { |
1846 | else if (msg_type(t_msg) == ORIGINAL_MSG) | 1724 | kfree_skb(iskb); |
1847 | *buf = tipc_link_failover_rcv(l_ptr, t_buf); | 1725 | iskb = NULL; |
1848 | else | 1726 | goto exit; |
1849 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); | 1727 | } |
1728 | if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) { | ||
1729 | link->stats.recv_fragments++; | ||
1730 | tipc_buf_append(&link->failover_skb, &iskb); | ||
1731 | } | ||
1850 | exit: | 1732 | exit: |
1851 | kfree_skb(t_buf); | 1733 | if (!link->failover_pkts && pl) |
1852 | return *buf != NULL; | 1734 | pl->flags &= ~LINK_FAILINGOVER; |
1735 | kfree_skb(*skb); | ||
1736 | *skb = iskb; | ||
1737 | return *skb; | ||
1853 | } | 1738 | } |
1854 | 1739 | ||
1855 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | 1740 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) |
@@ -1866,7 +1751,7 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | |||
1866 | 1751 | ||
1867 | void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) | 1752 | void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) |
1868 | { | 1753 | { |
1869 | int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE); | 1754 | int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); |
1870 | 1755 | ||
1871 | l->window = win; | 1756 | l->window = win; |
1872 | l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; | 1757 | l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; |
@@ -2038,14 +1923,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) | |||
2038 | 1923 | ||
2039 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 1924 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); |
2040 | link_set_supervision_props(link, tol); | 1925 | link_set_supervision_props(link, tol); |
2041 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0); | 1926 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); |
2042 | } | 1927 | } |
2043 | if (props[TIPC_NLA_PROP_PRIO]) { | 1928 | if (props[TIPC_NLA_PROP_PRIO]) { |
2044 | u32 prio; | 1929 | u32 prio; |
2045 | 1930 | ||
2046 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | 1931 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); |
2047 | link->priority = prio; | 1932 | link->priority = prio; |
2048 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0); | 1933 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); |
2049 | } | 1934 | } |
2050 | if (props[TIPC_NLA_PROP_WIN]) { | 1935 | if (props[TIPC_NLA_PROP_WIN]) { |
2051 | u32 win; | 1936 | u32 win; |
@@ -2150,7 +2035,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
2150 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, | 2035 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, |
2151 | tipc_cluster_mask(tn->own_addr))) | 2036 | tipc_cluster_mask(tn->own_addr))) |
2152 | goto attr_msg_full; | 2037 | goto attr_msg_full; |
2153 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) | 2038 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) |
2154 | goto attr_msg_full; | 2039 | goto attr_msg_full; |
2155 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) | 2040 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) |
2156 | goto attr_msg_full; | 2041 | goto attr_msg_full; |
diff --git a/net/tipc/link.h b/net/tipc/link.h index d2b5663643da..b5b4e3554d4e 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h | |||
@@ -58,9 +58,10 @@ | |||
58 | 58 | ||
59 | /* Link endpoint execution states | 59 | /* Link endpoint execution states |
60 | */ | 60 | */ |
61 | #define LINK_STARTED 0x0001 | 61 | #define LINK_STARTED 0x0001 |
62 | #define LINK_STOPPED 0x0002 | 62 | #define LINK_STOPPED 0x0002 |
63 | #define LINK_SYNCHING 0x0004 | 63 | #define LINK_SYNCHING 0x0004 |
64 | #define LINK_FAILINGOVER 0x0008 | ||
64 | 65 | ||
65 | /* Starting value for maximum packet size negotiation on unicast links | 66 | /* Starting value for maximum packet size negotiation on unicast links |
66 | * (unless bearer MTU is less) | 67 | * (unless bearer MTU is less) |
@@ -122,9 +123,8 @@ struct tipc_stats { | |||
122 | * @backlog_limit: backlog queue congestion thresholds (indexed by importance) | 123 | * @backlog_limit: backlog queue congestion thresholds (indexed by importance) |
123 | * @exp_msg_count: # of tunnelled messages expected during link changeover | 124 | * @exp_msg_count: # of tunnelled messages expected during link changeover |
124 | * @reset_checkpoint: seq # of last acknowledged message at time of link reset | 125 | * @reset_checkpoint: seq # of last acknowledged message at time of link reset |
125 | * @max_pkt: current maximum packet size for this link | 126 | * @mtu: current maximum packet size for this link |
126 | * @max_pkt_target: desired maximum packet size for this link | 127 | * @advertised_mtu: advertised own mtu when link is being established |
127 | * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target) | ||
128 | * @transmitq: queue for sent, non-acked messages | 128 | * @transmitq: queue for sent, non-acked messages |
129 | * @backlogq: queue for messages waiting to be sent | 129 | * @backlogq: queue for messages waiting to be sent |
130 | * @next_out_no: next sequence number to use for outbound messages | 130 | * @next_out_no: next sequence number to use for outbound messages |
@@ -167,16 +167,16 @@ struct tipc_link { | |||
167 | struct tipc_msg *pmsg; | 167 | struct tipc_msg *pmsg; |
168 | u32 priority; | 168 | u32 priority; |
169 | char net_plane; | 169 | char net_plane; |
170 | u16 synch_point; | ||
170 | 171 | ||
171 | /* Changeover */ | 172 | /* Failover */ |
172 | u32 exp_msg_count; | 173 | u16 failover_pkts; |
173 | u32 reset_checkpoint; | 174 | u16 failover_checkpt; |
174 | u32 synch_point; | 175 | struct sk_buff *failover_skb; |
175 | 176 | ||
176 | /* Max packet negotiation */ | 177 | /* Max packet negotiation */ |
177 | u32 max_pkt; | 178 | u16 mtu; |
178 | u32 max_pkt_target; | 179 | u16 advertised_mtu; |
179 | u32 max_pkt_probes; | ||
180 | 180 | ||
181 | /* Sending */ | 181 | /* Sending */ |
182 | struct sk_buff_head transmq; | 182 | struct sk_buff_head transmq; |
@@ -201,7 +201,6 @@ struct tipc_link { | |||
201 | struct sk_buff_head wakeupq; | 201 | struct sk_buff_head wakeupq; |
202 | 202 | ||
203 | /* Fragmentation/reassembly */ | 203 | /* Fragmentation/reassembly */ |
204 | u32 long_msg_seq_no; | ||
205 | struct sk_buff *reasm_buf; | 204 | struct sk_buff *reasm_buf; |
206 | 205 | ||
207 | /* Statistics */ | 206 | /* Statistics */ |
@@ -232,7 +231,7 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest, | |||
232 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, | 231 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, |
233 | struct sk_buff_head *list); | 232 | struct sk_buff_head *list); |
234 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, | 233 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, |
235 | u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); | 234 | u32 gap, u32 tolerance, u32 priority); |
236 | void tipc_link_push_packets(struct tipc_link *l_ptr); | 235 | void tipc_link_push_packets(struct tipc_link *l_ptr); |
237 | u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); | 236 | u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); |
238 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); | 237 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 3bb499c61918..c3e96e815418 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -355,7 +355,7 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu) | |||
355 | start = align(bsz); | 355 | start = align(bsz); |
356 | pad = start - bsz; | 356 | pad = start - bsz; |
357 | 357 | ||
358 | if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) | 358 | if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL)) |
359 | return false; | 359 | return false; |
360 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) | 360 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) |
361 | return false; | 361 | return false; |
@@ -433,7 +433,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode) | |||
433 | 433 | ||
434 | if (msg_user(msg) == MSG_FRAGMENTER) | 434 | if (msg_user(msg) == MSG_FRAGMENTER) |
435 | return false; | 435 | return false; |
436 | if (msg_user(msg) == CHANGEOVER_PROTOCOL) | 436 | if (msg_user(msg) == TUNNEL_PROTOCOL) |
437 | return false; | 437 | return false; |
438 | if (msg_user(msg) == BCAST_PROTOCOL) | 438 | if (msg_user(msg) == BCAST_PROTOCOL) |
439 | return false; | 439 | return false; |
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index d273207ede28..e1d3595e2ee9 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -72,7 +72,7 @@ struct plist; | |||
72 | #define MSG_BUNDLER 6 | 72 | #define MSG_BUNDLER 6 |
73 | #define LINK_PROTOCOL 7 | 73 | #define LINK_PROTOCOL 7 |
74 | #define CONN_MANAGER 8 | 74 | #define CONN_MANAGER 8 |
75 | #define CHANGEOVER_PROTOCOL 10 | 75 | #define TUNNEL_PROTOCOL 10 |
76 | #define NAME_DISTRIBUTOR 11 | 76 | #define NAME_DISTRIBUTOR 11 |
77 | #define MSG_FRAGMENTER 12 | 77 | #define MSG_FRAGMENTER 12 |
78 | #define LINK_CONFIG 13 | 78 | #define LINK_CONFIG 13 |
@@ -512,8 +512,8 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) | |||
512 | /* | 512 | /* |
513 | * Changeover tunnel message types | 513 | * Changeover tunnel message types |
514 | */ | 514 | */ |
515 | #define DUPLICATE_MSG 0 | 515 | #define SYNCH_MSG 0 |
516 | #define ORIGINAL_MSG 1 | 516 | #define FAILOVER_MSG 1 |
517 | 517 | ||
518 | /* | 518 | /* |
519 | * Config protocol message types | 519 | * Config protocol message types |
@@ -556,9 +556,9 @@ static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n) | |||
556 | 556 | ||
557 | static inline bool msg_dup(struct tipc_msg *m) | 557 | static inline bool msg_dup(struct tipc_msg *m) |
558 | { | 558 | { |
559 | if (likely(msg_user(m) != CHANGEOVER_PROTOCOL)) | 559 | if (likely(msg_user(m) != TUNNEL_PROTOCOL)) |
560 | return false; | 560 | return false; |
561 | if (msg_type(m) != DUPLICATE_MSG) | 561 | if (msg_type(m) != SYNCH_MSG) |
562 | return false; | 562 | return false; |
563 | return true; | 563 | return true; |
564 | } | 564 | } |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 3e4f04897c03..22c059ad2999 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -254,8 +254,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
254 | active[0] = active[1] = l_ptr; | 254 | active[0] = active[1] = l_ptr; |
255 | exit: | 255 | exit: |
256 | /* Leave room for changeover header when returning 'mtu' to users: */ | 256 | /* Leave room for changeover header when returning 'mtu' to users: */ |
257 | n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; | 257 | n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE; |
258 | n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; | 258 | n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE; |
259 | } | 259 | } |
260 | 260 | ||
261 | /** | 261 | /** |
@@ -319,11 +319,10 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
319 | 319 | ||
320 | /* Leave room for changeover header when returning 'mtu' to users: */ | 320 | /* Leave room for changeover header when returning 'mtu' to users: */ |
321 | if (active[0]) { | 321 | if (active[0]) { |
322 | n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; | 322 | n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE; |
323 | n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; | 323 | n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE; |
324 | return; | 324 | return; |
325 | } | 325 | } |
326 | |||
327 | /* Loopback link went down? No fragmentation needed from now on. */ | 326 | /* Loopback link went down? No fragmentation needed from now on. */ |
328 | if (n_ptr->addr == tn->own_addr) { | 327 | if (n_ptr->addr == tn->own_addr) { |
329 | n_ptr->act_mtus[0] = MAX_MSG_SIZE; | 328 | n_ptr->act_mtus[0] = MAX_MSG_SIZE; |
@@ -394,18 +393,17 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
394 | n_ptr->bclink.recv_permitted = false; | 393 | n_ptr->bclink.recv_permitted = false; |
395 | } | 394 | } |
396 | 395 | ||
397 | /* Abort link changeover */ | 396 | /* Abort any ongoing link failover */ |
398 | for (i = 0; i < MAX_BEARERS; i++) { | 397 | for (i = 0; i < MAX_BEARERS; i++) { |
399 | struct tipc_link *l_ptr = n_ptr->links[i]; | 398 | struct tipc_link *l_ptr = n_ptr->links[i]; |
400 | if (!l_ptr) | 399 | if (!l_ptr) |
401 | continue; | 400 | continue; |
402 | l_ptr->reset_checkpoint = l_ptr->next_in_no; | 401 | l_ptr->flags &= ~LINK_FAILINGOVER; |
403 | l_ptr->exp_msg_count = 0; | 402 | l_ptr->failover_checkpt = 0; |
403 | l_ptr->failover_pkts = 0; | ||
404 | kfree_skb(l_ptr->failover_skb); | ||
405 | l_ptr->failover_skb = NULL; | ||
404 | tipc_link_reset_fragments(l_ptr); | 406 | tipc_link_reset_fragments(l_ptr); |
405 | |||
406 | /* Link marked for deletion after failover? => do it now */ | ||
407 | if (l_ptr->flags & LINK_STOPPED) | ||
408 | tipc_link_delete(l_ptr); | ||
409 | } | 407 | } |
410 | 408 | ||
411 | n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; | 409 | n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; |
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index ef3d7aa2854a..66deebc66aa1 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c | |||
@@ -176,7 +176,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, | |||
176 | goto tx_error; | 176 | goto tx_error; |
177 | } | 177 | } |
178 | ttl = ip4_dst_hoplimit(&rt->dst); | 178 | ttl = ip4_dst_hoplimit(&rt->dst); |
179 | err = udp_tunnel_xmit_skb(rt, clone, src->ipv4.s_addr, | 179 | err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone, |
180 | src->ipv4.s_addr, | ||
180 | dst->ipv4.s_addr, 0, ttl, 0, | 181 | dst->ipv4.s_addr, 0, ttl, 0, |
181 | src->udp_port, dst->udp_port, | 182 | src->udp_port, dst->udp_port, |
182 | false, true); | 183 | false, true); |
@@ -197,7 +198,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, | |||
197 | if (err) | 198 | if (err) |
198 | goto tx_error; | 199 | goto tx_error; |
199 | ttl = ip6_dst_hoplimit(ndst); | 200 | ttl = ip6_dst_hoplimit(ndst); |
200 | err = udp_tunnel6_xmit_skb(ndst, clone, ndst->dev, &src->ipv6, | 201 | err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone, |
202 | ndst->dev, &src->ipv6, | ||
201 | &dst->ipv6, 0, ttl, src->udp_port, | 203 | &dst->ipv6, 0, ttl, src->udp_port, |
202 | dst->udp_port, false); | 204 | dst->udp_port, false); |
203 | #endif | 205 | #endif |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 7c532856b398..fbcedbe33190 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <net/dst.h> | 19 | #include <net/dst.h> |
20 | #include <net/xfrm.h> | 20 | #include <net/xfrm.h> |
21 | 21 | ||
22 | static int xfrm_output2(struct sk_buff *skb); | 22 | static int xfrm_output2(struct sock *sk, struct sk_buff *skb); |
23 | 23 | ||
24 | static int xfrm_skb_check_space(struct sk_buff *skb) | 24 | static int xfrm_skb_check_space(struct sk_buff *skb) |
25 | { | 25 | { |
@@ -130,7 +130,7 @@ int xfrm_output_resume(struct sk_buff *skb, int err) | |||
130 | return dst_output(skb); | 130 | return dst_output(skb); |
131 | 131 | ||
132 | err = nf_hook(skb_dst(skb)->ops->family, | 132 | err = nf_hook(skb_dst(skb)->ops->family, |
133 | NF_INET_POST_ROUTING, skb, | 133 | NF_INET_POST_ROUTING, skb->sk, skb, |
134 | NULL, skb_dst(skb)->dev, xfrm_output2); | 134 | NULL, skb_dst(skb)->dev, xfrm_output2); |
135 | if (unlikely(err != 1)) | 135 | if (unlikely(err != 1)) |
136 | goto out; | 136 | goto out; |
@@ -144,12 +144,12 @@ out: | |||
144 | } | 144 | } |
145 | EXPORT_SYMBOL_GPL(xfrm_output_resume); | 145 | EXPORT_SYMBOL_GPL(xfrm_output_resume); |
146 | 146 | ||
147 | static int xfrm_output2(struct sk_buff *skb) | 147 | static int xfrm_output2(struct sock *sk, struct sk_buff *skb) |
148 | { | 148 | { |
149 | return xfrm_output_resume(skb, 1); | 149 | return xfrm_output_resume(skb, 1); |
150 | } | 150 | } |
151 | 151 | ||
152 | static int xfrm_output_gso(struct sk_buff *skb) | 152 | static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb) |
153 | { | 153 | { |
154 | struct sk_buff *segs; | 154 | struct sk_buff *segs; |
155 | 155 | ||
@@ -165,7 +165,7 @@ static int xfrm_output_gso(struct sk_buff *skb) | |||
165 | int err; | 165 | int err; |
166 | 166 | ||
167 | segs->next = NULL; | 167 | segs->next = NULL; |
168 | err = xfrm_output2(segs); | 168 | err = xfrm_output2(sk, segs); |
169 | 169 | ||
170 | if (unlikely(err)) { | 170 | if (unlikely(err)) { |
171 | kfree_skb_list(nskb); | 171 | kfree_skb_list(nskb); |
@@ -178,13 +178,13 @@ static int xfrm_output_gso(struct sk_buff *skb) | |||
178 | return 0; | 178 | return 0; |
179 | } | 179 | } |
180 | 180 | ||
181 | int xfrm_output(struct sk_buff *skb) | 181 | int xfrm_output(struct sock *sk, struct sk_buff *skb) |
182 | { | 182 | { |
183 | struct net *net = dev_net(skb_dst(skb)->dev); | 183 | struct net *net = dev_net(skb_dst(skb)->dev); |
184 | int err; | 184 | int err; |
185 | 185 | ||
186 | if (skb_is_gso(skb)) | 186 | if (skb_is_gso(skb)) |
187 | return xfrm_output_gso(skb); | 187 | return xfrm_output_gso(sk, skb); |
188 | 188 | ||
189 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 189 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
190 | err = skb_checksum_help(skb); | 190 | err = skb_checksum_help(skb); |
@@ -195,7 +195,7 @@ int xfrm_output(struct sk_buff *skb) | |||
195 | } | 195 | } |
196 | } | 196 | } |
197 | 197 | ||
198 | return xfrm_output2(skb); | 198 | return xfrm_output2(sk, skb); |
199 | } | 199 | } |
200 | EXPORT_SYMBOL_GPL(xfrm_output); | 200 | EXPORT_SYMBOL_GPL(xfrm_output); |
201 | 201 | ||