diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/9p/trans_virtio.c | 6 | ||||
| -rw-r--r-- | net/Kconfig | 14 | ||||
| -rw-r--r-- | net/bridge/br_netfilter.c | 7 | ||||
| -rw-r--r-- | net/ceph/ceph_common.c | 16 | ||||
| -rw-r--r-- | net/ceph/ceph_strings.c | 14 | ||||
| -rw-r--r-- | net/ceph/debugfs.c | 2 | ||||
| -rw-r--r-- | net/ceph/messenger.c | 14 | ||||
| -rw-r--r-- | net/ceph/mon_client.c | 139 | ||||
| -rw-r--r-- | net/ceph/osd_client.c | 31 | ||||
| -rw-r--r-- | net/core/dev.c | 3 | ||||
| -rw-r--r-- | net/core/filter.c | 2 | ||||
| -rw-r--r-- | net/core/pktgen.c | 2 | ||||
| -rw-r--r-- | net/core/rtnetlink.c | 9 | ||||
| -rw-r--r-- | net/dsa/slave.c | 9 | ||||
| -rw-r--r-- | net/ipv4/devinet.c | 2 | ||||
| -rw-r--r-- | net/ipv4/fou.c | 42 | ||||
| -rw-r--r-- | net/ipv4/tcp_fastopen.c | 32 | ||||
| -rw-r--r-- | net/ipv4/udp_offload.c | 13 | ||||
| -rw-r--r-- | net/ipv6/ip6_flowlabel.c | 4 | ||||
| -rw-r--r-- | net/ipv6/ip6_output.c | 2 | ||||
| -rw-r--r-- | net/ipv6/route.c | 2 | ||||
| -rw-r--r-- | net/ipv6/udp_offload.c | 6 | ||||
| -rw-r--r-- | net/netfilter/nft_compat.c | 63 | ||||
| -rw-r--r-- | net/netfilter/nft_lookup.c | 1 | ||||
| -rw-r--r-- | net/openvswitch/flow.c | 2 | ||||
| -rw-r--r-- | net/openvswitch/flow_netlink.c | 4 | ||||
| -rw-r--r-- | net/rds/cong.c | 16 | ||||
| -rw-r--r-- | net/sched/Kconfig | 2 | ||||
| -rw-r--r-- | net/sunrpc/backchannel_rqst.c | 5 | ||||
| -rw-r--r-- | net/switchdev/Kconfig | 2 |
30 files changed, 246 insertions, 220 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index daa749c8b3fb..d8e376a5f0f1 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
| @@ -524,6 +524,12 @@ static int p9_virtio_probe(struct virtio_device *vdev) | |||
| 524 | int err; | 524 | int err; |
| 525 | struct virtio_chan *chan; | 525 | struct virtio_chan *chan; |
| 526 | 526 | ||
| 527 | if (!vdev->config->get) { | ||
| 528 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | ||
| 529 | __func__); | ||
| 530 | return -EINVAL; | ||
| 531 | } | ||
| 532 | |||
| 527 | chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); | 533 | chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); |
| 528 | if (!chan) { | 534 | if (!chan) { |
| 529 | pr_err("Failed to allocate virtio 9P channel\n"); | 535 | pr_err("Failed to allocate virtio 9P channel\n"); |
diff --git a/net/Kconfig b/net/Kconfig index ff9ffc17fa0e..44dd5786ee91 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
| @@ -231,18 +231,18 @@ source "net/hsr/Kconfig" | |||
| 231 | source "net/switchdev/Kconfig" | 231 | source "net/switchdev/Kconfig" |
| 232 | 232 | ||
| 233 | config RPS | 233 | config RPS |
| 234 | boolean | 234 | bool |
| 235 | depends on SMP && SYSFS | 235 | depends on SMP && SYSFS |
| 236 | default y | 236 | default y |
| 237 | 237 | ||
| 238 | config RFS_ACCEL | 238 | config RFS_ACCEL |
| 239 | boolean | 239 | bool |
| 240 | depends on RPS | 240 | depends on RPS |
| 241 | select CPU_RMAP | 241 | select CPU_RMAP |
| 242 | default y | 242 | default y |
| 243 | 243 | ||
| 244 | config XPS | 244 | config XPS |
| 245 | boolean | 245 | bool |
| 246 | depends on SMP | 246 | depends on SMP |
| 247 | default y | 247 | default y |
| 248 | 248 | ||
| @@ -254,18 +254,18 @@ config CGROUP_NET_PRIO | |||
| 254 | a per-interface basis. | 254 | a per-interface basis. |
| 255 | 255 | ||
| 256 | config CGROUP_NET_CLASSID | 256 | config CGROUP_NET_CLASSID |
| 257 | boolean "Network classid cgroup" | 257 | bool "Network classid cgroup" |
| 258 | depends on CGROUPS | 258 | depends on CGROUPS |
| 259 | ---help--- | 259 | ---help--- |
| 260 | Cgroup subsystem for use as general purpose socket classid marker that is | 260 | Cgroup subsystem for use as general purpose socket classid marker that is |
| 261 | being used in cls_cgroup and for netfilter matching. | 261 | being used in cls_cgroup and for netfilter matching. |
| 262 | 262 | ||
| 263 | config NET_RX_BUSY_POLL | 263 | config NET_RX_BUSY_POLL |
| 264 | boolean | 264 | bool |
| 265 | default y | 265 | default y |
| 266 | 266 | ||
| 267 | config BQL | 267 | config BQL |
| 268 | boolean | 268 | bool |
| 269 | depends on SYSFS | 269 | depends on SYSFS |
| 270 | select DQL | 270 | select DQL |
| 271 | default y | 271 | default y |
| @@ -282,7 +282,7 @@ config BPF_JIT | |||
| 282 | this feature changing /proc/sys/net/core/bpf_jit_enable | 282 | this feature changing /proc/sys/net/core/bpf_jit_enable |
| 283 | 283 | ||
| 284 | config NET_FLOW_LIMIT | 284 | config NET_FLOW_LIMIT |
| 285 | boolean | 285 | bool |
| 286 | depends on RPS | 286 | depends on RPS |
| 287 | default y | 287 | default y |
| 288 | ---help--- | 288 | ---help--- |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 65728e0dc4ff..0ee453fad3de 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
| @@ -987,15 +987,12 @@ static int __init br_netfilter_init(void) | |||
| 987 | if (brnf_sysctl_header == NULL) { | 987 | if (brnf_sysctl_header == NULL) { |
| 988 | printk(KERN_WARNING | 988 | printk(KERN_WARNING |
| 989 | "br_netfilter: can't register to sysctl.\n"); | 989 | "br_netfilter: can't register to sysctl.\n"); |
| 990 | ret = -ENOMEM; | 990 | nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); |
| 991 | goto err1; | 991 | return -ENOMEM; |
| 992 | } | 992 | } |
| 993 | #endif | 993 | #endif |
| 994 | printk(KERN_NOTICE "Bridge firewalling registered\n"); | 994 | printk(KERN_NOTICE "Bridge firewalling registered\n"); |
| 995 | return 0; | 995 | return 0; |
| 996 | err1: | ||
| 997 | nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); | ||
| 998 | return ret; | ||
| 999 | } | 996 | } |
| 1000 | 997 | ||
| 1001 | static void __exit br_netfilter_fini(void) | 998 | static void __exit br_netfilter_fini(void) |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 5d5ab67f516d..ec565508e904 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
| @@ -239,6 +239,8 @@ enum { | |||
| 239 | Opt_nocrc, | 239 | Opt_nocrc, |
| 240 | Opt_cephx_require_signatures, | 240 | Opt_cephx_require_signatures, |
| 241 | Opt_nocephx_require_signatures, | 241 | Opt_nocephx_require_signatures, |
| 242 | Opt_tcp_nodelay, | ||
| 243 | Opt_notcp_nodelay, | ||
| 242 | }; | 244 | }; |
| 243 | 245 | ||
| 244 | static match_table_t opt_tokens = { | 246 | static match_table_t opt_tokens = { |
| @@ -259,6 +261,8 @@ static match_table_t opt_tokens = { | |||
| 259 | {Opt_nocrc, "nocrc"}, | 261 | {Opt_nocrc, "nocrc"}, |
| 260 | {Opt_cephx_require_signatures, "cephx_require_signatures"}, | 262 | {Opt_cephx_require_signatures, "cephx_require_signatures"}, |
| 261 | {Opt_nocephx_require_signatures, "nocephx_require_signatures"}, | 263 | {Opt_nocephx_require_signatures, "nocephx_require_signatures"}, |
| 264 | {Opt_tcp_nodelay, "tcp_nodelay"}, | ||
| 265 | {Opt_notcp_nodelay, "notcp_nodelay"}, | ||
| 262 | {-1, NULL} | 266 | {-1, NULL} |
| 263 | }; | 267 | }; |
| 264 | 268 | ||
| @@ -457,6 +461,7 @@ ceph_parse_options(char *options, const char *dev_name, | |||
| 457 | case Opt_nocrc: | 461 | case Opt_nocrc: |
| 458 | opt->flags |= CEPH_OPT_NOCRC; | 462 | opt->flags |= CEPH_OPT_NOCRC; |
| 459 | break; | 463 | break; |
| 464 | |||
| 460 | case Opt_cephx_require_signatures: | 465 | case Opt_cephx_require_signatures: |
| 461 | opt->flags &= ~CEPH_OPT_NOMSGAUTH; | 466 | opt->flags &= ~CEPH_OPT_NOMSGAUTH; |
| 462 | break; | 467 | break; |
| @@ -464,6 +469,13 @@ ceph_parse_options(char *options, const char *dev_name, | |||
| 464 | opt->flags |= CEPH_OPT_NOMSGAUTH; | 469 | opt->flags |= CEPH_OPT_NOMSGAUTH; |
| 465 | break; | 470 | break; |
| 466 | 471 | ||
| 472 | case Opt_tcp_nodelay: | ||
| 473 | opt->flags |= CEPH_OPT_TCP_NODELAY; | ||
| 474 | break; | ||
| 475 | case Opt_notcp_nodelay: | ||
| 476 | opt->flags &= ~CEPH_OPT_TCP_NODELAY; | ||
| 477 | break; | ||
| 478 | |||
| 467 | default: | 479 | default: |
| 468 | BUG_ON(token); | 480 | BUG_ON(token); |
| 469 | } | 481 | } |
| @@ -518,10 +530,12 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, | |||
| 518 | /* msgr */ | 530 | /* msgr */ |
| 519 | if (ceph_test_opt(client, MYIP)) | 531 | if (ceph_test_opt(client, MYIP)) |
| 520 | myaddr = &client->options->my_addr; | 532 | myaddr = &client->options->my_addr; |
| 533 | |||
| 521 | ceph_messenger_init(&client->msgr, myaddr, | 534 | ceph_messenger_init(&client->msgr, myaddr, |
| 522 | client->supported_features, | 535 | client->supported_features, |
| 523 | client->required_features, | 536 | client->required_features, |
| 524 | ceph_test_opt(client, NOCRC)); | 537 | ceph_test_opt(client, NOCRC), |
| 538 | ceph_test_opt(client, TCP_NODELAY)); | ||
| 525 | 539 | ||
| 526 | /* subsystems */ | 540 | /* subsystems */ |
| 527 | err = ceph_monc_init(&client->monc, client); | 541 | err = ceph_monc_init(&client->monc, client); |
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c index 30560202f57b..139a9cb19b0c 100644 --- a/net/ceph/ceph_strings.c +++ b/net/ceph/ceph_strings.c | |||
| @@ -42,17 +42,3 @@ const char *ceph_osd_state_name(int s) | |||
| 42 | return "???"; | 42 | return "???"; |
| 43 | } | 43 | } |
| 44 | } | 44 | } |
| 45 | |||
| 46 | const char *ceph_pool_op_name(int op) | ||
| 47 | { | ||
| 48 | switch (op) { | ||
| 49 | case POOL_OP_CREATE: return "create"; | ||
| 50 | case POOL_OP_DELETE: return "delete"; | ||
| 51 | case POOL_OP_AUID_CHANGE: return "auid change"; | ||
| 52 | case POOL_OP_CREATE_SNAP: return "create snap"; | ||
| 53 | case POOL_OP_DELETE_SNAP: return "delete snap"; | ||
| 54 | case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap"; | ||
| 55 | case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap"; | ||
| 56 | } | ||
| 57 | return "???"; | ||
| 58 | } | ||
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index d2d525529f87..14d9995097cc 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c | |||
| @@ -127,8 +127,6 @@ static int monc_show(struct seq_file *s, void *p) | |||
| 127 | op = le16_to_cpu(req->request->hdr.type); | 127 | op = le16_to_cpu(req->request->hdr.type); |
| 128 | if (op == CEPH_MSG_STATFS) | 128 | if (op == CEPH_MSG_STATFS) |
| 129 | seq_printf(s, "%llu statfs\n", req->tid); | 129 | seq_printf(s, "%llu statfs\n", req->tid); |
| 130 | else if (op == CEPH_MSG_POOLOP) | ||
| 131 | seq_printf(s, "%llu poolop\n", req->tid); | ||
| 132 | else if (op == CEPH_MSG_MON_GET_VERSION) | 130 | else if (op == CEPH_MSG_MON_GET_VERSION) |
| 133 | seq_printf(s, "%llu mon_get_version", req->tid); | 131 | seq_printf(s, "%llu mon_get_version", req->tid); |
| 134 | else | 132 | else |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 33a2f201e460..6b3f54ed65ba 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -510,6 +510,16 @@ static int ceph_tcp_connect(struct ceph_connection *con) | |||
| 510 | return ret; | 510 | return ret; |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | if (con->msgr->tcp_nodelay) { | ||
| 514 | int optval = 1; | ||
| 515 | |||
| 516 | ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, | ||
| 517 | (char *)&optval, sizeof(optval)); | ||
| 518 | if (ret) | ||
| 519 | pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d", | ||
| 520 | ret); | ||
| 521 | } | ||
| 522 | |||
| 513 | sk_set_memalloc(sock->sk); | 523 | sk_set_memalloc(sock->sk); |
| 514 | 524 | ||
| 515 | con->sock = sock; | 525 | con->sock = sock; |
| @@ -2922,7 +2932,8 @@ void ceph_messenger_init(struct ceph_messenger *msgr, | |||
| 2922 | struct ceph_entity_addr *myaddr, | 2932 | struct ceph_entity_addr *myaddr, |
| 2923 | u64 supported_features, | 2933 | u64 supported_features, |
| 2924 | u64 required_features, | 2934 | u64 required_features, |
| 2925 | bool nocrc) | 2935 | bool nocrc, |
| 2936 | bool tcp_nodelay) | ||
| 2926 | { | 2937 | { |
| 2927 | msgr->supported_features = supported_features; | 2938 | msgr->supported_features = supported_features; |
| 2928 | msgr->required_features = required_features; | 2939 | msgr->required_features = required_features; |
| @@ -2937,6 +2948,7 @@ void ceph_messenger_init(struct ceph_messenger *msgr, | |||
| 2937 | get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); | 2948 | get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); |
| 2938 | encode_my_addr(msgr); | 2949 | encode_my_addr(msgr); |
| 2939 | msgr->nocrc = nocrc; | 2950 | msgr->nocrc = nocrc; |
| 2951 | msgr->tcp_nodelay = tcp_nodelay; | ||
| 2940 | 2952 | ||
| 2941 | atomic_set(&msgr->stopping, 0); | 2953 | atomic_set(&msgr->stopping, 0); |
| 2942 | 2954 | ||
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index f2148e22b148..2b3cf05e87b0 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
| @@ -410,7 +410,7 @@ out_unlocked: | |||
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | /* | 412 | /* |
| 413 | * generic requests (e.g., statfs, poolop) | 413 | * generic requests (currently statfs, mon_get_version) |
| 414 | */ | 414 | */ |
| 415 | static struct ceph_mon_generic_request *__lookup_generic_req( | 415 | static struct ceph_mon_generic_request *__lookup_generic_req( |
| 416 | struct ceph_mon_client *monc, u64 tid) | 416 | struct ceph_mon_client *monc, u64 tid) |
| @@ -569,7 +569,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc, | |||
| 569 | return; | 569 | return; |
| 570 | 570 | ||
| 571 | bad: | 571 | bad: |
| 572 | pr_err("corrupt generic reply, tid %llu\n", tid); | 572 | pr_err("corrupt statfs reply, tid %llu\n", tid); |
| 573 | ceph_msg_dump(msg); | 573 | ceph_msg_dump(msg); |
| 574 | } | 574 | } |
| 575 | 575 | ||
| @@ -588,7 +588,6 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) | |||
| 588 | 588 | ||
| 589 | kref_init(&req->kref); | 589 | kref_init(&req->kref); |
| 590 | req->buf = buf; | 590 | req->buf = buf; |
| 591 | req->buf_len = sizeof(*buf); | ||
| 592 | init_completion(&req->completion); | 591 | init_completion(&req->completion); |
| 593 | 592 | ||
| 594 | err = -ENOMEM; | 593 | err = -ENOMEM; |
| @@ -611,7 +610,7 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) | |||
| 611 | err = do_generic_request(monc, req); | 610 | err = do_generic_request(monc, req); |
| 612 | 611 | ||
| 613 | out: | 612 | out: |
| 614 | kref_put(&req->kref, release_generic_request); | 613 | put_generic_request(req); |
| 615 | return err; | 614 | return err; |
| 616 | } | 615 | } |
| 617 | EXPORT_SYMBOL(ceph_monc_do_statfs); | 616 | EXPORT_SYMBOL(ceph_monc_do_statfs); |
| @@ -647,7 +646,7 @@ static void handle_get_version_reply(struct ceph_mon_client *monc, | |||
| 647 | 646 | ||
| 648 | return; | 647 | return; |
| 649 | bad: | 648 | bad: |
| 650 | pr_err("corrupt mon_get_version reply\n"); | 649 | pr_err("corrupt mon_get_version reply, tid %llu\n", tid); |
| 651 | ceph_msg_dump(msg); | 650 | ceph_msg_dump(msg); |
| 652 | } | 651 | } |
| 653 | 652 | ||
| @@ -670,7 +669,6 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what, | |||
| 670 | 669 | ||
| 671 | kref_init(&req->kref); | 670 | kref_init(&req->kref); |
| 672 | req->buf = newest; | 671 | req->buf = newest; |
| 673 | req->buf_len = sizeof(*newest); | ||
| 674 | init_completion(&req->completion); | 672 | init_completion(&req->completion); |
| 675 | 673 | ||
| 676 | req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, | 674 | req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, |
| @@ -701,134 +699,12 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what, | |||
| 701 | 699 | ||
| 702 | mutex_unlock(&monc->mutex); | 700 | mutex_unlock(&monc->mutex); |
| 703 | out: | 701 | out: |
| 704 | kref_put(&req->kref, release_generic_request); | 702 | put_generic_request(req); |
| 705 | return err; | 703 | return err; |
| 706 | } | 704 | } |
| 707 | EXPORT_SYMBOL(ceph_monc_do_get_version); | 705 | EXPORT_SYMBOL(ceph_monc_do_get_version); |
| 708 | 706 | ||
| 709 | /* | 707 | /* |
| 710 | * pool ops | ||
| 711 | */ | ||
| 712 | static int get_poolop_reply_buf(const char *src, size_t src_len, | ||
| 713 | char *dst, size_t dst_len) | ||
| 714 | { | ||
| 715 | u32 buf_len; | ||
| 716 | |||
| 717 | if (src_len != sizeof(u32) + dst_len) | ||
| 718 | return -EINVAL; | ||
| 719 | |||
| 720 | buf_len = le32_to_cpu(*(__le32 *)src); | ||
| 721 | if (buf_len != dst_len) | ||
| 722 | return -EINVAL; | ||
| 723 | |||
| 724 | memcpy(dst, src + sizeof(u32), dst_len); | ||
| 725 | return 0; | ||
| 726 | } | ||
| 727 | |||
| 728 | static void handle_poolop_reply(struct ceph_mon_client *monc, | ||
| 729 | struct ceph_msg *msg) | ||
| 730 | { | ||
| 731 | struct ceph_mon_generic_request *req; | ||
| 732 | struct ceph_mon_poolop_reply *reply = msg->front.iov_base; | ||
| 733 | u64 tid = le64_to_cpu(msg->hdr.tid); | ||
| 734 | |||
| 735 | if (msg->front.iov_len < sizeof(*reply)) | ||
| 736 | goto bad; | ||
| 737 | dout("handle_poolop_reply %p tid %llu\n", msg, tid); | ||
| 738 | |||
| 739 | mutex_lock(&monc->mutex); | ||
| 740 | req = __lookup_generic_req(monc, tid); | ||
| 741 | if (req) { | ||
| 742 | if (req->buf_len && | ||
| 743 | get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply), | ||
| 744 | msg->front.iov_len - sizeof(*reply), | ||
| 745 | req->buf, req->buf_len) < 0) { | ||
| 746 | mutex_unlock(&monc->mutex); | ||
| 747 | goto bad; | ||
| 748 | } | ||
| 749 | req->result = le32_to_cpu(reply->reply_code); | ||
| 750 | get_generic_request(req); | ||
| 751 | } | ||
| 752 | mutex_unlock(&monc->mutex); | ||
| 753 | if (req) { | ||
| 754 | complete(&req->completion); | ||
| 755 | put_generic_request(req); | ||
| 756 | } | ||
| 757 | return; | ||
| 758 | |||
| 759 | bad: | ||
| 760 | pr_err("corrupt generic reply, tid %llu\n", tid); | ||
| 761 | ceph_msg_dump(msg); | ||
| 762 | } | ||
| 763 | |||
| 764 | /* | ||
| 765 | * Do a synchronous pool op. | ||
| 766 | */ | ||
| 767 | static int do_poolop(struct ceph_mon_client *monc, u32 op, | ||
| 768 | u32 pool, u64 snapid, | ||
| 769 | char *buf, int len) | ||
| 770 | { | ||
| 771 | struct ceph_mon_generic_request *req; | ||
| 772 | struct ceph_mon_poolop *h; | ||
| 773 | int err; | ||
| 774 | |||
| 775 | req = kzalloc(sizeof(*req), GFP_NOFS); | ||
| 776 | if (!req) | ||
| 777 | return -ENOMEM; | ||
| 778 | |||
| 779 | kref_init(&req->kref); | ||
| 780 | req->buf = buf; | ||
| 781 | req->buf_len = len; | ||
| 782 | init_completion(&req->completion); | ||
| 783 | |||
| 784 | err = -ENOMEM; | ||
| 785 | req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS, | ||
| 786 | true); | ||
| 787 | if (!req->request) | ||
| 788 | goto out; | ||
| 789 | req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS, | ||
| 790 | true); | ||
| 791 | if (!req->reply) | ||
| 792 | goto out; | ||
| 793 | |||
| 794 | /* fill out request */ | ||
| 795 | req->request->hdr.version = cpu_to_le16(2); | ||
| 796 | h = req->request->front.iov_base; | ||
| 797 | h->monhdr.have_version = 0; | ||
| 798 | h->monhdr.session_mon = cpu_to_le16(-1); | ||
| 799 | h->monhdr.session_mon_tid = 0; | ||
| 800 | h->fsid = monc->monmap->fsid; | ||
| 801 | h->pool = cpu_to_le32(pool); | ||
| 802 | h->op = cpu_to_le32(op); | ||
| 803 | h->auid = 0; | ||
| 804 | h->snapid = cpu_to_le64(snapid); | ||
| 805 | h->name_len = 0; | ||
| 806 | |||
| 807 | err = do_generic_request(monc, req); | ||
| 808 | |||
| 809 | out: | ||
| 810 | kref_put(&req->kref, release_generic_request); | ||
| 811 | return err; | ||
| 812 | } | ||
| 813 | |||
| 814 | int ceph_monc_create_snapid(struct ceph_mon_client *monc, | ||
| 815 | u32 pool, u64 *snapid) | ||
| 816 | { | ||
| 817 | return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, | ||
| 818 | pool, 0, (char *)snapid, sizeof(*snapid)); | ||
| 819 | |||
| 820 | } | ||
| 821 | EXPORT_SYMBOL(ceph_monc_create_snapid); | ||
| 822 | |||
| 823 | int ceph_monc_delete_snapid(struct ceph_mon_client *monc, | ||
| 824 | u32 pool, u64 snapid) | ||
| 825 | { | ||
| 826 | return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, | ||
| 827 | pool, snapid, NULL, 0); | ||
| 828 | |||
| 829 | } | ||
| 830 | |||
| 831 | /* | ||
| 832 | * Resend pending generic requests. | 708 | * Resend pending generic requests. |
| 833 | */ | 709 | */ |
| 834 | static void __resend_generic_request(struct ceph_mon_client *monc) | 710 | static void __resend_generic_request(struct ceph_mon_client *monc) |
| @@ -1112,10 +988,6 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |||
| 1112 | handle_get_version_reply(monc, msg); | 988 | handle_get_version_reply(monc, msg); |
| 1113 | break; | 989 | break; |
| 1114 | 990 | ||
| 1115 | case CEPH_MSG_POOLOP_REPLY: | ||
| 1116 | handle_poolop_reply(monc, msg); | ||
| 1117 | break; | ||
| 1118 | |||
| 1119 | case CEPH_MSG_MON_MAP: | 991 | case CEPH_MSG_MON_MAP: |
| 1120 | ceph_monc_handle_map(monc, msg); | 992 | ceph_monc_handle_map(monc, msg); |
| 1121 | break; | 993 | break; |
| @@ -1154,7 +1026,6 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, | |||
| 1154 | case CEPH_MSG_MON_SUBSCRIBE_ACK: | 1026 | case CEPH_MSG_MON_SUBSCRIBE_ACK: |
| 1155 | m = ceph_msg_get(monc->m_subscribe_ack); | 1027 | m = ceph_msg_get(monc->m_subscribe_ack); |
| 1156 | break; | 1028 | break; |
| 1157 | case CEPH_MSG_POOLOP_REPLY: | ||
| 1158 | case CEPH_MSG_STATFS_REPLY: | 1029 | case CEPH_MSG_STATFS_REPLY: |
| 1159 | return get_generic_reply(con, hdr, skip); | 1030 | return get_generic_reply(con, hdr, skip); |
| 1160 | case CEPH_MSG_AUTH_REPLY: | 1031 | case CEPH_MSG_AUTH_REPLY: |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 53299c7b0ca4..41a4abc7e98e 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -1035,10 +1035,11 @@ static void put_osd(struct ceph_osd *osd) | |||
| 1035 | { | 1035 | { |
| 1036 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), | 1036 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), |
| 1037 | atomic_read(&osd->o_ref) - 1); | 1037 | atomic_read(&osd->o_ref) - 1); |
| 1038 | if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { | 1038 | if (atomic_dec_and_test(&osd->o_ref)) { |
| 1039 | struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; | 1039 | struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; |
| 1040 | 1040 | ||
| 1041 | ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); | 1041 | if (osd->o_auth.authorizer) |
| 1042 | ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); | ||
| 1042 | kfree(osd); | 1043 | kfree(osd); |
| 1043 | } | 1044 | } |
| 1044 | } | 1045 | } |
| @@ -1048,14 +1049,24 @@ static void put_osd(struct ceph_osd *osd) | |||
| 1048 | */ | 1049 | */ |
| 1049 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | 1050 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
| 1050 | { | 1051 | { |
| 1051 | dout("__remove_osd %p\n", osd); | 1052 | dout("%s %p osd%d\n", __func__, osd, osd->o_osd); |
| 1052 | WARN_ON(!list_empty(&osd->o_requests)); | 1053 | WARN_ON(!list_empty(&osd->o_requests)); |
| 1053 | WARN_ON(!list_empty(&osd->o_linger_requests)); | 1054 | WARN_ON(!list_empty(&osd->o_linger_requests)); |
| 1054 | 1055 | ||
| 1055 | rb_erase(&osd->o_node, &osdc->osds); | ||
| 1056 | list_del_init(&osd->o_osd_lru); | 1056 | list_del_init(&osd->o_osd_lru); |
| 1057 | ceph_con_close(&osd->o_con); | 1057 | rb_erase(&osd->o_node, &osdc->osds); |
| 1058 | put_osd(osd); | 1058 | RB_CLEAR_NODE(&osd->o_node); |
| 1059 | } | ||
| 1060 | |||
| 1061 | static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | ||
| 1062 | { | ||
| 1063 | dout("%s %p osd%d\n", __func__, osd, osd->o_osd); | ||
| 1064 | |||
| 1065 | if (!RB_EMPTY_NODE(&osd->o_node)) { | ||
| 1066 | ceph_con_close(&osd->o_con); | ||
| 1067 | __remove_osd(osdc, osd); | ||
| 1068 | put_osd(osd); | ||
| 1069 | } | ||
| 1059 | } | 1070 | } |
| 1060 | 1071 | ||
| 1061 | static void remove_all_osds(struct ceph_osd_client *osdc) | 1072 | static void remove_all_osds(struct ceph_osd_client *osdc) |
| @@ -1065,7 +1076,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc) | |||
| 1065 | while (!RB_EMPTY_ROOT(&osdc->osds)) { | 1076 | while (!RB_EMPTY_ROOT(&osdc->osds)) { |
| 1066 | struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), | 1077 | struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), |
| 1067 | struct ceph_osd, o_node); | 1078 | struct ceph_osd, o_node); |
| 1068 | __remove_osd(osdc, osd); | 1079 | remove_osd(osdc, osd); |
| 1069 | } | 1080 | } |
| 1070 | mutex_unlock(&osdc->request_mutex); | 1081 | mutex_unlock(&osdc->request_mutex); |
| 1071 | } | 1082 | } |
| @@ -1106,7 +1117,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc) | |||
| 1106 | list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { | 1117 | list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { |
| 1107 | if (time_before(jiffies, osd->lru_ttl)) | 1118 | if (time_before(jiffies, osd->lru_ttl)) |
| 1108 | break; | 1119 | break; |
| 1109 | __remove_osd(osdc, osd); | 1120 | remove_osd(osdc, osd); |
| 1110 | } | 1121 | } |
| 1111 | mutex_unlock(&osdc->request_mutex); | 1122 | mutex_unlock(&osdc->request_mutex); |
| 1112 | } | 1123 | } |
| @@ -1121,8 +1132,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | |||
| 1121 | dout("__reset_osd %p osd%d\n", osd, osd->o_osd); | 1132 | dout("__reset_osd %p osd%d\n", osd, osd->o_osd); |
| 1122 | if (list_empty(&osd->o_requests) && | 1133 | if (list_empty(&osd->o_requests) && |
| 1123 | list_empty(&osd->o_linger_requests)) { | 1134 | list_empty(&osd->o_linger_requests)) { |
| 1124 | __remove_osd(osdc, osd); | 1135 | remove_osd(osdc, osd); |
| 1125 | |||
| 1126 | return -ENODEV; | 1136 | return -ENODEV; |
| 1127 | } | 1137 | } |
| 1128 | 1138 | ||
| @@ -1926,6 +1936,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc) | |||
| 1926 | { | 1936 | { |
| 1927 | struct rb_node *p, *n; | 1937 | struct rb_node *p, *n; |
| 1928 | 1938 | ||
| 1939 | dout("%s %p\n", __func__, osdc); | ||
| 1929 | for (p = rb_first(&osdc->osds); p; p = n) { | 1940 | for (p = rb_first(&osdc->osds); p; p = n) { |
| 1930 | struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); | 1941 | struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); |
| 1931 | 1942 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index d030575532a2..8f9710c62e20 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -4024,6 +4024,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff | |||
| 4024 | NAPI_GRO_CB(skb)->flush = 0; | 4024 | NAPI_GRO_CB(skb)->flush = 0; |
| 4025 | NAPI_GRO_CB(skb)->free = 0; | 4025 | NAPI_GRO_CB(skb)->free = 0; |
| 4026 | NAPI_GRO_CB(skb)->udp_mark = 0; | 4026 | NAPI_GRO_CB(skb)->udp_mark = 0; |
| 4027 | NAPI_GRO_CB(skb)->gro_remcsum_start = 0; | ||
| 4027 | 4028 | ||
| 4028 | /* Setup for GRO checksum validation */ | 4029 | /* Setup for GRO checksum validation */ |
| 4029 | switch (skb->ip_summed) { | 4030 | switch (skb->ip_summed) { |
| @@ -5335,7 +5336,7 @@ EXPORT_SYMBOL(netdev_upper_dev_unlink); | |||
| 5335 | /** | 5336 | /** |
| 5336 | * netdev_bonding_info_change - Dispatch event about slave change | 5337 | * netdev_bonding_info_change - Dispatch event about slave change |
| 5337 | * @dev: device | 5338 | * @dev: device |
| 5338 | * @netdev_bonding_info: info to dispatch | 5339 | * @bonding_info: info to dispatch |
| 5339 | * | 5340 | * |
| 5340 | * Send NETDEV_BONDING_INFO to netdev notifiers with info. | 5341 | * Send NETDEV_BONDING_INFO to netdev notifiers with info. |
| 5341 | * The caller must hold the RTNL lock. | 5342 | * The caller must hold the RTNL lock. |
diff --git a/net/core/filter.c b/net/core/filter.c index ec9baea10c16..f6bdc2b1ba01 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -531,7 +531,7 @@ do_pass: | |||
| 531 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); | 531 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); |
| 532 | break; | 532 | break; |
| 533 | 533 | ||
| 534 | /* Unkown instruction. */ | 534 | /* Unknown instruction. */ |
| 535 | default: | 535 | default: |
| 536 | goto err; | 536 | goto err; |
| 537 | } | 537 | } |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 9fa25b0ea145..b4899f5b7388 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -97,7 +97,7 @@ | |||
| 97 | * New xmit() return, do_div and misc clean up by Stephen Hemminger | 97 | * New xmit() return, do_div and misc clean up by Stephen Hemminger |
| 98 | * <shemminger@osdl.org> 040923 | 98 | * <shemminger@osdl.org> 040923 |
| 99 | * | 99 | * |
| 100 | * Randy Dunlap fixed u64 printk compiler waring | 100 | * Randy Dunlap fixed u64 printk compiler warning |
| 101 | * | 101 | * |
| 102 | * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> | 102 | * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> |
| 103 | * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 | 103 | * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5be499b6a2d2..ab293a3066b3 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -2162,7 +2162,14 @@ replay: | |||
| 2162 | } | 2162 | } |
| 2163 | err = rtnl_configure_link(dev, ifm); | 2163 | err = rtnl_configure_link(dev, ifm); |
| 2164 | if (err < 0) { | 2164 | if (err < 0) { |
| 2165 | unregister_netdevice(dev); | 2165 | if (ops->newlink) { |
| 2166 | LIST_HEAD(list_kill); | ||
| 2167 | |||
| 2168 | ops->dellink(dev, &list_kill); | ||
| 2169 | unregister_netdevice_many(&list_kill); | ||
| 2170 | } else { | ||
| 2171 | unregister_netdevice(dev); | ||
| 2172 | } | ||
| 2166 | goto out; | 2173 | goto out; |
| 2167 | } | 2174 | } |
| 2168 | 2175 | ||
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index d104ae15836f..f23deadf42a0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
| @@ -521,10 +521,13 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, | |||
| 521 | struct device_node *phy_dn, *port_dn; | 521 | struct device_node *phy_dn, *port_dn; |
| 522 | bool phy_is_fixed = false; | 522 | bool phy_is_fixed = false; |
| 523 | u32 phy_flags = 0; | 523 | u32 phy_flags = 0; |
| 524 | int ret; | 524 | int mode, ret; |
| 525 | 525 | ||
| 526 | port_dn = cd->port_dn[p->port]; | 526 | port_dn = cd->port_dn[p->port]; |
| 527 | p->phy_interface = of_get_phy_mode(port_dn); | 527 | mode = of_get_phy_mode(port_dn); |
| 528 | if (mode < 0) | ||
| 529 | mode = PHY_INTERFACE_MODE_NA; | ||
| 530 | p->phy_interface = mode; | ||
| 528 | 531 | ||
| 529 | phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); | 532 | phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); |
| 530 | if (of_phy_is_fixed_link(port_dn)) { | 533 | if (of_phy_is_fixed_link(port_dn)) { |
| @@ -559,6 +562,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, | |||
| 559 | if (!p->phy) | 562 | if (!p->phy) |
| 560 | return -ENODEV; | 563 | return -ENODEV; |
| 561 | 564 | ||
| 565 | /* Use already configured phy mode */ | ||
| 566 | p->phy_interface = p->phy->interface; | ||
| 562 | phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, | 567 | phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, |
| 563 | p->phy_interface); | 568 | p->phy_interface); |
| 564 | } else { | 569 | } else { |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f0b4a31d7bd6..3a8985c94581 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
| @@ -1186,7 +1186,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) | |||
| 1186 | no_in_dev: | 1186 | no_in_dev: |
| 1187 | 1187 | ||
| 1188 | /* Not loopback addresses on loopback should be preferred | 1188 | /* Not loopback addresses on loopback should be preferred |
| 1189 | in this case. It is importnat that lo is the first interface | 1189 | in this case. It is important that lo is the first interface |
| 1190 | in dev_base list. | 1190 | in dev_base list. |
| 1191 | */ | 1191 | */ |
| 1192 | for_each_netdev_rcu(net, dev) { | 1192 | for_each_netdev_rcu(net, dev) { |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 92ddea1e6457..ff069f6597ac 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
| @@ -22,14 +22,18 @@ static LIST_HEAD(fou_list); | |||
| 22 | struct fou { | 22 | struct fou { |
| 23 | struct socket *sock; | 23 | struct socket *sock; |
| 24 | u8 protocol; | 24 | u8 protocol; |
| 25 | u8 flags; | ||
| 25 | u16 port; | 26 | u16 port; |
| 26 | struct udp_offload udp_offloads; | 27 | struct udp_offload udp_offloads; |
| 27 | struct list_head list; | 28 | struct list_head list; |
| 28 | }; | 29 | }; |
| 29 | 30 | ||
| 31 | #define FOU_F_REMCSUM_NOPARTIAL BIT(0) | ||
| 32 | |||
| 30 | struct fou_cfg { | 33 | struct fou_cfg { |
| 31 | u16 type; | 34 | u16 type; |
| 32 | u8 protocol; | 35 | u8 protocol; |
| 36 | u8 flags; | ||
| 33 | struct udp_port_cfg udp_config; | 37 | struct udp_port_cfg udp_config; |
| 34 | }; | 38 | }; |
| 35 | 39 | ||
| @@ -64,24 +68,20 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) | |||
| 64 | } | 68 | } |
| 65 | 69 | ||
| 66 | static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, | 70 | static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, |
| 67 | void *data, size_t hdrlen, u8 ipproto) | 71 | void *data, size_t hdrlen, u8 ipproto, |
| 72 | bool nopartial) | ||
| 68 | { | 73 | { |
| 69 | __be16 *pd = data; | 74 | __be16 *pd = data; |
| 70 | size_t start = ntohs(pd[0]); | 75 | size_t start = ntohs(pd[0]); |
| 71 | size_t offset = ntohs(pd[1]); | 76 | size_t offset = ntohs(pd[1]); |
| 72 | size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); | 77 | size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); |
| 73 | 78 | ||
| 74 | if (skb->remcsum_offload) { | ||
| 75 | /* Already processed in GRO path */ | ||
| 76 | skb->remcsum_offload = 0; | ||
| 77 | return guehdr; | ||
| 78 | } | ||
| 79 | |||
| 80 | if (!pskb_may_pull(skb, plen)) | 79 | if (!pskb_may_pull(skb, plen)) |
| 81 | return NULL; | 80 | return NULL; |
| 82 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | 81 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; |
| 83 | 82 | ||
| 84 | skb_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset); | 83 | skb_remcsum_process(skb, (void *)guehdr + hdrlen, |
| 84 | start, offset, nopartial); | ||
| 85 | 85 | ||
| 86 | return guehdr; | 86 | return guehdr; |
| 87 | } | 87 | } |
| @@ -142,7 +142,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) | |||
| 142 | 142 | ||
| 143 | if (flags & GUE_PFLAG_REMCSUM) { | 143 | if (flags & GUE_PFLAG_REMCSUM) { |
| 144 | guehdr = gue_remcsum(skb, guehdr, data + doffset, | 144 | guehdr = gue_remcsum(skb, guehdr, data + doffset, |
| 145 | hdrlen, guehdr->proto_ctype); | 145 | hdrlen, guehdr->proto_ctype, |
| 146 | !!(fou->flags & | ||
| 147 | FOU_F_REMCSUM_NOPARTIAL)); | ||
| 146 | if (!guehdr) | 148 | if (!guehdr) |
| 147 | goto drop; | 149 | goto drop; |
| 148 | 150 | ||
| @@ -214,7 +216,8 @@ out_unlock: | |||
| 214 | 216 | ||
| 215 | static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, | 217 | static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, |
| 216 | struct guehdr *guehdr, void *data, | 218 | struct guehdr *guehdr, void *data, |
| 217 | size_t hdrlen, u8 ipproto) | 219 | size_t hdrlen, u8 ipproto, |
| 220 | struct gro_remcsum *grc, bool nopartial) | ||
| 218 | { | 221 | { |
| 219 | __be16 *pd = data; | 222 | __be16 *pd = data; |
| 220 | size_t start = ntohs(pd[0]); | 223 | size_t start = ntohs(pd[0]); |
| @@ -222,7 +225,7 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, | |||
| 222 | size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); | 225 | size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); |
| 223 | 226 | ||
| 224 | if (skb->remcsum_offload) | 227 | if (skb->remcsum_offload) |
| 225 | return guehdr; | 228 | return NULL; |
| 226 | 229 | ||
| 227 | if (!NAPI_GRO_CB(skb)->csum_valid) | 230 | if (!NAPI_GRO_CB(skb)->csum_valid) |
| 228 | return NULL; | 231 | return NULL; |
| @@ -234,7 +237,8 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, | |||
| 234 | return NULL; | 237 | return NULL; |
| 235 | } | 238 | } |
| 236 | 239 | ||
| 237 | skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset); | 240 | skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, |
| 241 | start, offset, grc, nopartial); | ||
| 238 | 242 | ||
| 239 | skb->remcsum_offload = 1; | 243 | skb->remcsum_offload = 1; |
| 240 | 244 | ||
| @@ -254,6 +258,10 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, | |||
| 254 | void *data; | 258 | void *data; |
| 255 | u16 doffset = 0; | 259 | u16 doffset = 0; |
| 256 | int flush = 1; | 260 | int flush = 1; |
| 261 | struct fou *fou = container_of(uoff, struct fou, udp_offloads); | ||
| 262 | struct gro_remcsum grc; | ||
| 263 | |||
| 264 | skb_gro_remcsum_init(&grc); | ||
| 257 | 265 | ||
| 258 | off = skb_gro_offset(skb); | 266 | off = skb_gro_offset(skb); |
| 259 | len = off + sizeof(*guehdr); | 267 | len = off + sizeof(*guehdr); |
| @@ -295,7 +303,9 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, | |||
| 295 | if (flags & GUE_PFLAG_REMCSUM) { | 303 | if (flags & GUE_PFLAG_REMCSUM) { |
| 296 | guehdr = gue_gro_remcsum(skb, off, guehdr, | 304 | guehdr = gue_gro_remcsum(skb, off, guehdr, |
| 297 | data + doffset, hdrlen, | 305 | data + doffset, hdrlen, |
| 298 | guehdr->proto_ctype); | 306 | guehdr->proto_ctype, &grc, |
| 307 | !!(fou->flags & | ||
| 308 | FOU_F_REMCSUM_NOPARTIAL)); | ||
| 299 | if (!guehdr) | 309 | if (!guehdr) |
| 300 | goto out; | 310 | goto out; |
| 301 | 311 | ||
| @@ -345,6 +355,7 @@ out_unlock: | |||
| 345 | rcu_read_unlock(); | 355 | rcu_read_unlock(); |
| 346 | out: | 356 | out: |
| 347 | NAPI_GRO_CB(skb)->flush |= flush; | 357 | NAPI_GRO_CB(skb)->flush |= flush; |
| 358 | skb_gro_remcsum_cleanup(skb, &grc); | ||
| 348 | 359 | ||
| 349 | return pp; | 360 | return pp; |
| 350 | } | 361 | } |
| @@ -455,6 +466,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg, | |||
| 455 | 466 | ||
| 456 | sk = sock->sk; | 467 | sk = sock->sk; |
| 457 | 468 | ||
| 469 | fou->flags = cfg->flags; | ||
| 458 | fou->port = cfg->udp_config.local_udp_port; | 470 | fou->port = cfg->udp_config.local_udp_port; |
| 459 | 471 | ||
| 460 | /* Initial for fou type */ | 472 | /* Initial for fou type */ |
| @@ -541,6 +553,7 @@ static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = { | |||
| 541 | [FOU_ATTR_AF] = { .type = NLA_U8, }, | 553 | [FOU_ATTR_AF] = { .type = NLA_U8, }, |
| 542 | [FOU_ATTR_IPPROTO] = { .type = NLA_U8, }, | 554 | [FOU_ATTR_IPPROTO] = { .type = NLA_U8, }, |
| 543 | [FOU_ATTR_TYPE] = { .type = NLA_U8, }, | 555 | [FOU_ATTR_TYPE] = { .type = NLA_U8, }, |
| 556 | [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, }, | ||
| 544 | }; | 557 | }; |
| 545 | 558 | ||
| 546 | static int parse_nl_config(struct genl_info *info, | 559 | static int parse_nl_config(struct genl_info *info, |
| @@ -571,6 +584,9 @@ static int parse_nl_config(struct genl_info *info, | |||
| 571 | if (info->attrs[FOU_ATTR_TYPE]) | 584 | if (info->attrs[FOU_ATTR_TYPE]) |
| 572 | cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]); | 585 | cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]); |
| 573 | 586 | ||
| 587 | if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL]) | ||
| 588 | cfg->flags |= FOU_F_REMCSUM_NOPARTIAL; | ||
| 589 | |||
| 574 | return 0; | 590 | return 0; |
| 575 | } | 591 | } |
| 576 | 592 | ||
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 53db2c309572..ea82fd492c1b 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c | |||
| @@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
| 134 | struct tcp_sock *tp; | 134 | struct tcp_sock *tp; |
| 135 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | 135 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
| 136 | struct sock *child; | 136 | struct sock *child; |
| 137 | u32 end_seq; | ||
| 137 | 138 | ||
| 138 | req->num_retrans = 0; | 139 | req->num_retrans = 0; |
| 139 | req->num_timeout = 0; | 140 | req->num_timeout = 0; |
| @@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
| 185 | 186 | ||
| 186 | /* Queue the data carried in the SYN packet. We need to first | 187 | /* Queue the data carried in the SYN packet. We need to first |
| 187 | * bump skb's refcnt because the caller will attempt to free it. | 188 | * bump skb's refcnt because the caller will attempt to free it. |
| 189 | * Note that IPv6 might also have used skb_get() trick | ||
| 190 | * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts) | ||
| 191 | * So we need to eventually get a clone of the packet, | ||
| 192 | * before inserting it in sk_receive_queue. | ||
| 188 | * | 193 | * |
| 189 | * XXX (TFO) - we honor a zero-payload TFO request for now, | 194 | * XXX (TFO) - we honor a zero-payload TFO request for now, |
| 190 | * (any reason not to?) but no need to queue the skb since | 195 | * (any reason not to?) but no need to queue the skb since |
| 191 | * there is no data. How about SYN+FIN? | 196 | * there is no data. How about SYN+FIN? |
| 192 | */ | 197 | */ |
| 193 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) { | 198 | end_seq = TCP_SKB_CB(skb)->end_seq; |
| 194 | skb = skb_get(skb); | 199 | if (end_seq != TCP_SKB_CB(skb)->seq + 1) { |
| 195 | skb_dst_drop(skb); | 200 | struct sk_buff *skb2; |
| 196 | __skb_pull(skb, tcp_hdr(skb)->doff * 4); | 201 | |
| 197 | skb_set_owner_r(skb, child); | 202 | if (unlikely(skb_shared(skb))) |
| 198 | __skb_queue_tail(&child->sk_receive_queue, skb); | 203 | skb2 = skb_clone(skb, GFP_ATOMIC); |
| 199 | tp->syn_data_acked = 1; | 204 | else |
| 205 | skb2 = skb_get(skb); | ||
| 206 | |||
| 207 | if (likely(skb2)) { | ||
| 208 | skb_dst_drop(skb2); | ||
| 209 | __skb_pull(skb2, tcp_hdrlen(skb)); | ||
| 210 | skb_set_owner_r(skb2, child); | ||
| 211 | __skb_queue_tail(&child->sk_receive_queue, skb2); | ||
| 212 | tp->syn_data_acked = 1; | ||
| 213 | } else { | ||
| 214 | end_seq = TCP_SKB_CB(skb)->seq + 1; | ||
| 215 | } | ||
| 200 | } | 216 | } |
| 201 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 217 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq; |
| 202 | sk->sk_data_ready(sk); | 218 | sk->sk_data_ready(sk); |
| 203 | bh_unlock_sock(child); | 219 | bh_unlock_sock(child); |
| 204 | sock_put(child); | 220 | sock_put(child); |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index d10f6f4ead27..4915d8284a86 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
| @@ -402,6 +402,13 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff) | |||
| 402 | } | 402 | } |
| 403 | 403 | ||
| 404 | rcu_read_unlock(); | 404 | rcu_read_unlock(); |
| 405 | |||
| 406 | if (skb->remcsum_offload) | ||
| 407 | skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; | ||
| 408 | |||
| 409 | skb->encapsulation = 1; | ||
| 410 | skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); | ||
| 411 | |||
| 405 | return err; | 412 | return err; |
| 406 | } | 413 | } |
| 407 | 414 | ||
| @@ -410,9 +417,13 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) | |||
| 410 | const struct iphdr *iph = ip_hdr(skb); | 417 | const struct iphdr *iph = ip_hdr(skb); |
| 411 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); | 418 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); |
| 412 | 419 | ||
| 413 | if (uh->check) | 420 | if (uh->check) { |
| 421 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; | ||
| 414 | uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, | 422 | uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, |
| 415 | iph->daddr, 0); | 423 | iph->daddr, 0); |
| 424 | } else { | ||
| 425 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; | ||
| 426 | } | ||
| 416 | 427 | ||
| 417 | return udp_gro_complete(skb, nhoff); | 428 | return udp_gro_complete(skb, nhoff); |
| 418 | } | 429 | } |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 2f780cba6e12..f45d6db50a45 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
| @@ -172,7 +172,7 @@ static void __net_exit ip6_fl_purge(struct net *net) | |||
| 172 | { | 172 | { |
| 173 | int i; | 173 | int i; |
| 174 | 174 | ||
| 175 | spin_lock(&ip6_fl_lock); | 175 | spin_lock_bh(&ip6_fl_lock); |
| 176 | for (i = 0; i <= FL_HASH_MASK; i++) { | 176 | for (i = 0; i <= FL_HASH_MASK; i++) { |
| 177 | struct ip6_flowlabel *fl; | 177 | struct ip6_flowlabel *fl; |
| 178 | struct ip6_flowlabel __rcu **flp; | 178 | struct ip6_flowlabel __rcu **flp; |
| @@ -190,7 +190,7 @@ static void __net_exit ip6_fl_purge(struct net *net) | |||
| 190 | flp = &fl->next; | 190 | flp = &fl->next; |
| 191 | } | 191 | } |
| 192 | } | 192 | } |
| 193 | spin_unlock(&ip6_fl_lock); | 193 | spin_unlock_bh(&ip6_fl_lock); |
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | static struct ip6_flowlabel *fl_intern(struct net *net, | 196 | static struct ip6_flowlabel *fl_intern(struct net *net, |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index d33df4cbd872..7deebf102cba 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1273,7 +1273,7 @@ emsgsize: | |||
| 1273 | /* If this is the first and only packet and device | 1273 | /* If this is the first and only packet and device |
| 1274 | * supports checksum offloading, let's use it. | 1274 | * supports checksum offloading, let's use it. |
| 1275 | */ | 1275 | */ |
| 1276 | if (!skb && | 1276 | if (!skb && sk->sk_protocol == IPPROTO_UDP && |
| 1277 | length + fragheaderlen < mtu && | 1277 | length + fragheaderlen < mtu && |
| 1278 | rt->dst.dev->features & NETIF_F_V6_CSUM && | 1278 | rt->dst.dev->features & NETIF_F_V6_CSUM && |
| 1279 | !exthdrlen) | 1279 | !exthdrlen) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 98565ce0ebcd..4688bd4d7f59 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) | |||
| 141 | u32 *p = NULL; | 141 | u32 *p = NULL; |
| 142 | 142 | ||
| 143 | if (!(rt->dst.flags & DST_HOST)) | 143 | if (!(rt->dst.flags & DST_HOST)) |
| 144 | return NULL; | 144 | return dst_cow_metrics_generic(dst, old); |
| 145 | 145 | ||
| 146 | peer = rt6_get_peer_create(rt); | 146 | peer = rt6_get_peer_create(rt); |
| 147 | if (peer) { | 147 | if (peer) { |
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index a56276996b72..ab889bb16b3c 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c | |||
| @@ -161,9 +161,13 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff) | |||
| 161 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 161 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
| 162 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); | 162 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); |
| 163 | 163 | ||
| 164 | if (uh->check) | 164 | if (uh->check) { |
| 165 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; | ||
| 165 | uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, | 166 | uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, |
| 166 | &ipv6h->daddr, 0); | 167 | &ipv6h->daddr, 0); |
| 168 | } else { | ||
| 169 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; | ||
| 170 | } | ||
| 167 | 171 | ||
| 168 | return udp_gro_complete(skb, nhoff); | 172 | return udp_gro_complete(skb, nhoff); |
| 169 | } | 173 | } |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 265e190f2218..c598f74063a1 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/netfilter/x_tables.h> | 19 | #include <linux/netfilter/x_tables.h> |
| 20 | #include <linux/netfilter_ipv4/ip_tables.h> | 20 | #include <linux/netfilter_ipv4/ip_tables.h> |
| 21 | #include <linux/netfilter_ipv6/ip6_tables.h> | 21 | #include <linux/netfilter_ipv6/ip6_tables.h> |
| 22 | #include <linux/netfilter_bridge/ebtables.h> | ||
| 22 | #include <net/netfilter/nf_tables.h> | 23 | #include <net/netfilter/nf_tables.h> |
| 23 | 24 | ||
| 24 | static int nft_compat_chain_validate_dependency(const char *tablename, | 25 | static int nft_compat_chain_validate_dependency(const char *tablename, |
| @@ -40,6 +41,7 @@ static int nft_compat_chain_validate_dependency(const char *tablename, | |||
| 40 | union nft_entry { | 41 | union nft_entry { |
| 41 | struct ipt_entry e4; | 42 | struct ipt_entry e4; |
| 42 | struct ip6t_entry e6; | 43 | struct ip6t_entry e6; |
| 44 | struct ebt_entry ebt; | ||
| 43 | }; | 45 | }; |
| 44 | 46 | ||
| 45 | static inline void | 47 | static inline void |
| @@ -50,9 +52,9 @@ nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info) | |||
| 50 | par->hotdrop = false; | 52 | par->hotdrop = false; |
| 51 | } | 53 | } |
| 52 | 54 | ||
| 53 | static void nft_target_eval(const struct nft_expr *expr, | 55 | static void nft_target_eval_xt(const struct nft_expr *expr, |
| 54 | struct nft_data data[NFT_REG_MAX + 1], | 56 | struct nft_data data[NFT_REG_MAX + 1], |
| 55 | const struct nft_pktinfo *pkt) | 57 | const struct nft_pktinfo *pkt) |
| 56 | { | 58 | { |
| 57 | void *info = nft_expr_priv(expr); | 59 | void *info = nft_expr_priv(expr); |
| 58 | struct xt_target *target = expr->ops->data; | 60 | struct xt_target *target = expr->ops->data; |
| @@ -66,7 +68,7 @@ static void nft_target_eval(const struct nft_expr *expr, | |||
| 66 | if (pkt->xt.hotdrop) | 68 | if (pkt->xt.hotdrop) |
| 67 | ret = NF_DROP; | 69 | ret = NF_DROP; |
| 68 | 70 | ||
| 69 | switch(ret) { | 71 | switch (ret) { |
| 70 | case XT_CONTINUE: | 72 | case XT_CONTINUE: |
| 71 | data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; | 73 | data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; |
| 72 | break; | 74 | break; |
| @@ -74,7 +76,41 @@ static void nft_target_eval(const struct nft_expr *expr, | |||
| 74 | data[NFT_REG_VERDICT].verdict = ret; | 76 | data[NFT_REG_VERDICT].verdict = ret; |
| 75 | break; | 77 | break; |
| 76 | } | 78 | } |
| 77 | return; | 79 | } |
| 80 | |||
| 81 | static void nft_target_eval_bridge(const struct nft_expr *expr, | ||
| 82 | struct nft_data data[NFT_REG_MAX + 1], | ||
| 83 | const struct nft_pktinfo *pkt) | ||
| 84 | { | ||
| 85 | void *info = nft_expr_priv(expr); | ||
| 86 | struct xt_target *target = expr->ops->data; | ||
| 87 | struct sk_buff *skb = pkt->skb; | ||
| 88 | int ret; | ||
| 89 | |||
| 90 | nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info); | ||
| 91 | |||
| 92 | ret = target->target(skb, &pkt->xt); | ||
| 93 | |||
| 94 | if (pkt->xt.hotdrop) | ||
| 95 | ret = NF_DROP; | ||
| 96 | |||
| 97 | switch (ret) { | ||
| 98 | case EBT_ACCEPT: | ||
| 99 | data[NFT_REG_VERDICT].verdict = NF_ACCEPT; | ||
| 100 | break; | ||
| 101 | case EBT_DROP: | ||
| 102 | data[NFT_REG_VERDICT].verdict = NF_DROP; | ||
| 103 | break; | ||
| 104 | case EBT_CONTINUE: | ||
| 105 | data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; | ||
| 106 | break; | ||
| 107 | case EBT_RETURN: | ||
| 108 | data[NFT_REG_VERDICT].verdict = NFT_RETURN; | ||
| 109 | break; | ||
| 110 | default: | ||
| 111 | data[NFT_REG_VERDICT].verdict = ret; | ||
| 112 | break; | ||
| 113 | } | ||
| 78 | } | 114 | } |
| 79 | 115 | ||
| 80 | static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = { | 116 | static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = { |
| @@ -100,6 +136,10 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
| 100 | entry->e6.ipv6.proto = proto; | 136 | entry->e6.ipv6.proto = proto; |
| 101 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 137 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
| 102 | break; | 138 | break; |
| 139 | case NFPROTO_BRIDGE: | ||
| 140 | entry->ebt.ethproto = proto; | ||
| 141 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | ||
| 142 | break; | ||
| 103 | } | 143 | } |
| 104 | par->entryinfo = entry; | 144 | par->entryinfo = entry; |
| 105 | par->target = target; | 145 | par->target = target; |
| @@ -307,6 +347,10 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
| 307 | entry->e6.ipv6.proto = proto; | 347 | entry->e6.ipv6.proto = proto; |
| 308 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 348 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
| 309 | break; | 349 | break; |
| 350 | case NFPROTO_BRIDGE: | ||
| 351 | entry->ebt.ethproto = proto; | ||
| 352 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | ||
| 353 | break; | ||
| 310 | } | 354 | } |
| 311 | par->entryinfo = entry; | 355 | par->entryinfo = entry; |
| 312 | par->match = match; | 356 | par->match = match; |
| @@ -490,6 +534,9 @@ nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb, | |||
| 490 | case AF_INET6: | 534 | case AF_INET6: |
| 491 | fmt = "ip6t_%s"; | 535 | fmt = "ip6t_%s"; |
| 492 | break; | 536 | break; |
| 537 | case NFPROTO_BRIDGE: | ||
| 538 | fmt = "ebt_%s"; | ||
| 539 | break; | ||
| 493 | default: | 540 | default: |
| 494 | pr_err("nft_compat: unsupported protocol %d\n", | 541 | pr_err("nft_compat: unsupported protocol %d\n", |
| 495 | nfmsg->nfgen_family); | 542 | nfmsg->nfgen_family); |
| @@ -663,13 +710,17 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
| 663 | 710 | ||
| 664 | nft_target->ops.type = &nft_target_type; | 711 | nft_target->ops.type = &nft_target_type; |
| 665 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); | 712 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); |
| 666 | nft_target->ops.eval = nft_target_eval; | ||
| 667 | nft_target->ops.init = nft_target_init; | 713 | nft_target->ops.init = nft_target_init; |
| 668 | nft_target->ops.destroy = nft_target_destroy; | 714 | nft_target->ops.destroy = nft_target_destroy; |
| 669 | nft_target->ops.dump = nft_target_dump; | 715 | nft_target->ops.dump = nft_target_dump; |
| 670 | nft_target->ops.validate = nft_target_validate; | 716 | nft_target->ops.validate = nft_target_validate; |
| 671 | nft_target->ops.data = target; | 717 | nft_target->ops.data = target; |
| 672 | 718 | ||
| 719 | if (family == NFPROTO_BRIDGE) | ||
| 720 | nft_target->ops.eval = nft_target_eval_bridge; | ||
| 721 | else | ||
| 722 | nft_target->ops.eval = nft_target_eval_xt; | ||
| 723 | |||
| 673 | list_add(&nft_target->head, &nft_target_list); | 724 | list_add(&nft_target->head, &nft_target_list); |
| 674 | 725 | ||
| 675 | return &nft_target->ops; | 726 | return &nft_target->ops; |
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 6404a726d17b..9615b8b9fb37 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c | |||
| @@ -39,6 +39,7 @@ static void nft_lookup_eval(const struct nft_expr *expr, | |||
| 39 | 39 | ||
| 40 | static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { | 40 | static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { |
| 41 | [NFTA_LOOKUP_SET] = { .type = NLA_STRING }, | 41 | [NFTA_LOOKUP_SET] = { .type = NLA_STRING }, |
| 42 | [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, | ||
| 42 | [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, | 43 | [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, |
| 43 | [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, | 44 | [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, |
| 44 | }; | 45 | }; |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index e2c348b8baca..50ec42f170a0 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -717,6 +717,8 @@ int ovs_flow_key_extract_userspace(const struct nlattr *attr, | |||
| 717 | { | 717 | { |
| 718 | int err; | 718 | int err; |
| 719 | 719 | ||
| 720 | memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE); | ||
| 721 | |||
| 720 | /* Extract metadata from netlink attributes. */ | 722 | /* Extract metadata from netlink attributes. */ |
| 721 | err = ovs_nla_get_flow_metadata(attr, key, log); | 723 | err = ovs_nla_get_flow_metadata(attr, key, log); |
| 722 | if (err) | 724 | if (err) |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 993281e6278d..216f20b90aa5 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
| @@ -1516,7 +1516,7 @@ int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb) | |||
| 1516 | /* Called with ovs_mutex or RCU read lock. */ | 1516 | /* Called with ovs_mutex or RCU read lock. */ |
| 1517 | int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb) | 1517 | int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb) |
| 1518 | { | 1518 | { |
| 1519 | return ovs_nla_put_key(&flow->mask->key, &flow->key, | 1519 | return ovs_nla_put_key(&flow->key, &flow->key, |
| 1520 | OVS_FLOW_ATTR_KEY, false, skb); | 1520 | OVS_FLOW_ATTR_KEY, false, skb); |
| 1521 | } | 1521 | } |
| 1522 | 1522 | ||
| @@ -1746,7 +1746,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, | |||
| 1746 | struct sw_flow_key key; | 1746 | struct sw_flow_key key; |
| 1747 | struct ovs_tunnel_info *tun_info; | 1747 | struct ovs_tunnel_info *tun_info; |
| 1748 | struct nlattr *a; | 1748 | struct nlattr *a; |
| 1749 | int err, start, opts_type; | 1749 | int err = 0, start, opts_type; |
| 1750 | 1750 | ||
| 1751 | ovs_match_init(&match, &key, NULL); | 1751 | ovs_match_init(&match, &key, NULL); |
| 1752 | opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log); | 1752 | opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log); |
diff --git a/net/rds/cong.c b/net/rds/cong.c index e5b65acd650b..e6144b8246fd 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c | |||
| @@ -221,7 +221,21 @@ void rds_cong_queue_updates(struct rds_cong_map *map) | |||
| 221 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { | 221 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { |
| 222 | if (!test_and_set_bit(0, &conn->c_map_queued)) { | 222 | if (!test_and_set_bit(0, &conn->c_map_queued)) { |
| 223 | rds_stats_inc(s_cong_update_queued); | 223 | rds_stats_inc(s_cong_update_queued); |
| 224 | rds_send_xmit(conn); | 224 | /* We cannot inline the call to rds_send_xmit() here |
| 225 | * for two reasons (both pertaining to a TCP transport): | ||
| 226 | * 1. When we get here from the receive path, we | ||
| 227 | * are already holding the sock_lock (held by | ||
| 228 | * tcp_v4_rcv()). So inlining calls to | ||
| 229 | * tcp_setsockopt and/or tcp_sendmsg will deadlock | ||
| 230 | * when it tries to get the sock_lock()) | ||
| 231 | * 2. Interrupts are masked so that we can mark the | ||
| 232 | * the port congested from both send and recv paths. | ||
| 233 | * (See comment around declaration of rdc_cong_lock). | ||
| 234 | * An attempt to get the sock_lock() here will | ||
| 235 | * therefore trigger warnings. | ||
| 236 | * Defer the xmit to rds_send_worker() instead. | ||
| 237 | */ | ||
| 238 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | ||
| 225 | } | 239 | } |
| 226 | } | 240 | } |
| 227 | 241 | ||
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 899d0319f2b2..2274e723a3df 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
| @@ -348,7 +348,7 @@ config NET_SCH_PLUG | |||
| 348 | comment "Classification" | 348 | comment "Classification" |
| 349 | 349 | ||
| 350 | config NET_CLS | 350 | config NET_CLS |
| 351 | boolean | 351 | bool |
| 352 | 352 | ||
| 353 | config NET_CLS_BASIC | 353 | config NET_CLS_BASIC |
| 354 | tristate "Elementary classification (BASIC)" | 354 | tristate "Elementary classification (BASIC)" |
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 651f49ab601f..9dd0ea8db463 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
| @@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) | |||
| 309 | struct rpc_xprt *xprt = req->rq_xprt; | 309 | struct rpc_xprt *xprt = req->rq_xprt; |
| 310 | struct svc_serv *bc_serv = xprt->bc_serv; | 310 | struct svc_serv *bc_serv = xprt->bc_serv; |
| 311 | 311 | ||
| 312 | spin_lock(&xprt->bc_pa_lock); | ||
| 313 | list_del(&req->rq_bc_pa_list); | ||
| 314 | spin_unlock(&xprt->bc_pa_lock); | ||
| 315 | |||
| 312 | req->rq_private_buf.len = copied; | 316 | req->rq_private_buf.len = copied; |
| 313 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | 317 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |
| 314 | 318 | ||
| 315 | dprintk("RPC: add callback request to list\n"); | 319 | dprintk("RPC: add callback request to list\n"); |
| 316 | spin_lock(&bc_serv->sv_cb_lock); | 320 | spin_lock(&bc_serv->sv_cb_lock); |
| 317 | list_del(&req->rq_bc_pa_list); | ||
| 318 | list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); | 321 | list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); |
| 319 | wake_up(&bc_serv->sv_cb_waitq); | 322 | wake_up(&bc_serv->sv_cb_waitq); |
| 320 | spin_unlock(&bc_serv->sv_cb_lock); | 323 | spin_unlock(&bc_serv->sv_cb_lock); |
diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig index 155754588fd6..86a47e17cfaf 100644 --- a/net/switchdev/Kconfig +++ b/net/switchdev/Kconfig | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | config NET_SWITCHDEV | 5 | config NET_SWITCHDEV |
| 6 | boolean "Switch (and switch-ish) device support (EXPERIMENTAL)" | 6 | bool "Switch (and switch-ish) device support (EXPERIMENTAL)" |
| 7 | depends on INET | 7 | depends on INET |
| 8 | ---help--- | 8 | ---help--- |
| 9 | This module provides glue between core networking code and device | 9 | This module provides glue between core networking code and device |
