diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-10-15 01:41:27 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-10-15 01:41:27 -0400 |
commit | c362495586e8a3a6487a318fcd82eaf15ffe2142 (patch) | |
tree | 86f7b195d36ba198f24f86be327f21a8d24ec248 /net | |
parent | b70936d9ffbf0f45f4fa13a03122f015f13ecdb0 (diff) | |
parent | ddffeb8c4d0331609ef2581d84de4d763607bd37 (diff) |
Merge 3.7-rc1 into tty-linus
This syncs up the tty-linus branch to the latest in Linus's tree to get all of
the UAPI stuff needed for the next set of patches to merge.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
66 files changed, 745 insertions, 754 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index b258da88f675..fbbf1fa00940 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
6 | #include "vlan.h" | 6 | #include "vlan.h" |
7 | 7 | ||
8 | bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) | 8 | bool vlan_do_receive(struct sk_buff **skbp) |
9 | { | 9 | { |
10 | struct sk_buff *skb = *skbp; | 10 | struct sk_buff *skb = *skbp; |
11 | u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; | 11 | u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; |
@@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) | |||
13 | struct vlan_pcpu_stats *rx_stats; | 13 | struct vlan_pcpu_stats *rx_stats; |
14 | 14 | ||
15 | vlan_dev = vlan_find_dev(skb->dev, vlan_id); | 15 | vlan_dev = vlan_find_dev(skb->dev, vlan_id); |
16 | if (!vlan_dev) { | 16 | if (!vlan_dev) |
17 | /* Only the last call to vlan_do_receive() should change | ||
18 | * pkt_type to PACKET_OTHERHOST | ||
19 | */ | ||
20 | if (vlan_id && last_handler) | ||
21 | skb->pkt_type = PACKET_OTHERHOST; | ||
22 | return false; | 17 | return false; |
23 | } | ||
24 | 18 | ||
25 | skb = *skbp = skb_share_check(skb, GFP_ATOMIC); | 19 | skb = *skbp = skb_share_check(skb, GFP_ATOMIC); |
26 | if (unlikely(!skb)) | 20 | if (unlikely(!skb)) |
@@ -105,7 +99,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) | |||
105 | return NULL; | 99 | return NULL; |
106 | memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); | 100 | memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); |
107 | skb->mac_header += VLAN_HLEN; | 101 | skb->mac_header += VLAN_HLEN; |
108 | skb_reset_mac_len(skb); | ||
109 | return skb; | 102 | return skb; |
110 | } | 103 | } |
111 | 104 | ||
@@ -139,6 +132,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb) | |||
139 | 132 | ||
140 | skb_reset_network_header(skb); | 133 | skb_reset_network_header(skb); |
141 | skb_reset_transport_header(skb); | 134 | skb_reset_transport_header(skb); |
135 | skb_reset_mac_len(skb); | ||
136 | |||
142 | return skb; | 137 | return skb; |
143 | 138 | ||
144 | err_free: | 139 | err_free: |
diff --git a/net/9p/client.c b/net/9p/client.c index 8260f132b32e..34d417670935 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -76,6 +76,20 @@ inline int p9_is_proto_dotu(struct p9_client *clnt) | |||
76 | } | 76 | } |
77 | EXPORT_SYMBOL(p9_is_proto_dotu); | 77 | EXPORT_SYMBOL(p9_is_proto_dotu); |
78 | 78 | ||
79 | /* | ||
80 | * Some error codes are taken directly from the server replies, | ||
81 | * make sure they are valid. | ||
82 | */ | ||
83 | static int safe_errno(int err) | ||
84 | { | ||
85 | if ((err > 0) || (err < -MAX_ERRNO)) { | ||
86 | p9_debug(P9_DEBUG_ERROR, "Invalid error code %d\n", err); | ||
87 | return -EPROTO; | ||
88 | } | ||
89 | return err; | ||
90 | } | ||
91 | |||
92 | |||
79 | /* Interpret mount option for protocol version */ | 93 | /* Interpret mount option for protocol version */ |
80 | static int get_protocol_version(char *s) | 94 | static int get_protocol_version(char *s) |
81 | { | 95 | { |
@@ -782,7 +796,7 @@ again: | |||
782 | return req; | 796 | return req; |
783 | reterr: | 797 | reterr: |
784 | p9_free_req(c, req); | 798 | p9_free_req(c, req); |
785 | return ERR_PTR(err); | 799 | return ERR_PTR(safe_errno(err)); |
786 | } | 800 | } |
787 | 801 | ||
788 | /** | 802 | /** |
@@ -865,7 +879,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type, | |||
865 | return req; | 879 | return req; |
866 | reterr: | 880 | reterr: |
867 | p9_free_req(c, req); | 881 | p9_free_req(c, req); |
868 | return ERR_PTR(err); | 882 | return ERR_PTR(safe_errno(err)); |
869 | } | 883 | } |
870 | 884 | ||
871 | static struct p9_fid *p9_fid_create(struct p9_client *clnt) | 885 | static struct p9_fid *p9_fid_create(struct p9_client *clnt) |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 15656b8573f3..02efb25c2957 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -316,8 +316,7 @@ static void p9_read_work(struct work_struct *work) | |||
316 | m->rsize - m->rpos); | 316 | m->rsize - m->rpos); |
317 | p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); | 317 | p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); |
318 | if (err == -EAGAIN) { | 318 | if (err == -EAGAIN) { |
319 | clear_bit(Rworksched, &m->wsched); | 319 | goto end_clear; |
320 | return; | ||
321 | } | 320 | } |
322 | 321 | ||
323 | if (err <= 0) | 322 | if (err <= 0) |
@@ -379,19 +378,20 @@ static void p9_read_work(struct work_struct *work) | |||
379 | m->req = NULL; | 378 | m->req = NULL; |
380 | } | 379 | } |
381 | 380 | ||
381 | end_clear: | ||
382 | clear_bit(Rworksched, &m->wsched); | ||
383 | |||
382 | if (!list_empty(&m->req_list)) { | 384 | if (!list_empty(&m->req_list)) { |
383 | if (test_and_clear_bit(Rpending, &m->wsched)) | 385 | if (test_and_clear_bit(Rpending, &m->wsched)) |
384 | n = POLLIN; | 386 | n = POLLIN; |
385 | else | 387 | else |
386 | n = p9_fd_poll(m->client, NULL); | 388 | n = p9_fd_poll(m->client, NULL); |
387 | 389 | ||
388 | if (n & POLLIN) { | 390 | if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { |
389 | p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); | 391 | p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); |
390 | schedule_work(&m->rq); | 392 | schedule_work(&m->rq); |
391 | } else | 393 | } |
392 | clear_bit(Rworksched, &m->wsched); | 394 | } |
393 | } else | ||
394 | clear_bit(Rworksched, &m->wsched); | ||
395 | 395 | ||
396 | return; | 396 | return; |
397 | error: | 397 | error: |
@@ -453,12 +453,13 @@ static void p9_write_work(struct work_struct *work) | |||
453 | } | 453 | } |
454 | 454 | ||
455 | if (!m->wsize) { | 455 | if (!m->wsize) { |
456 | spin_lock(&m->client->lock); | ||
456 | if (list_empty(&m->unsent_req_list)) { | 457 | if (list_empty(&m->unsent_req_list)) { |
457 | clear_bit(Wworksched, &m->wsched); | 458 | clear_bit(Wworksched, &m->wsched); |
459 | spin_unlock(&m->client->lock); | ||
458 | return; | 460 | return; |
459 | } | 461 | } |
460 | 462 | ||
461 | spin_lock(&m->client->lock); | ||
462 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, | 463 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, |
463 | req_list); | 464 | req_list); |
464 | req->status = REQ_STATUS_SENT; | 465 | req->status = REQ_STATUS_SENT; |
@@ -476,10 +477,9 @@ static void p9_write_work(struct work_struct *work) | |||
476 | clear_bit(Wpending, &m->wsched); | 477 | clear_bit(Wpending, &m->wsched); |
477 | err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); | 478 | err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); |
478 | p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); | 479 | p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); |
479 | if (err == -EAGAIN) { | 480 | if (err == -EAGAIN) |
480 | clear_bit(Wworksched, &m->wsched); | 481 | goto end_clear; |
481 | return; | 482 | |
482 | } | ||
483 | 483 | ||
484 | if (err < 0) | 484 | if (err < 0) |
485 | goto error; | 485 | goto error; |
@@ -492,19 +492,21 @@ static void p9_write_work(struct work_struct *work) | |||
492 | if (m->wpos == m->wsize) | 492 | if (m->wpos == m->wsize) |
493 | m->wpos = m->wsize = 0; | 493 | m->wpos = m->wsize = 0; |
494 | 494 | ||
495 | if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { | 495 | end_clear: |
496 | clear_bit(Wworksched, &m->wsched); | ||
497 | |||
498 | if (m->wsize || !list_empty(&m->unsent_req_list)) { | ||
496 | if (test_and_clear_bit(Wpending, &m->wsched)) | 499 | if (test_and_clear_bit(Wpending, &m->wsched)) |
497 | n = POLLOUT; | 500 | n = POLLOUT; |
498 | else | 501 | else |
499 | n = p9_fd_poll(m->client, NULL); | 502 | n = p9_fd_poll(m->client, NULL); |
500 | 503 | ||
501 | if (n & POLLOUT) { | 504 | if ((n & POLLOUT) && |
505 | !test_and_set_bit(Wworksched, &m->wsched)) { | ||
502 | p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); | 506 | p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); |
503 | schedule_work(&m->wq); | 507 | schedule_work(&m->wq); |
504 | } else | 508 | } |
505 | clear_bit(Wworksched, &m->wsched); | 509 | } |
506 | } else | ||
507 | clear_bit(Wworksched, &m->wsched); | ||
508 | 510 | ||
509 | return; | 511 | return; |
510 | 512 | ||
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 9d49ee6d7219..ba033f09196e 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -591,7 +591,7 @@ static int bt_seq_show(struct seq_file *seq, void *v) | |||
591 | atomic_read(&sk->sk_refcnt), | 591 | atomic_read(&sk->sk_refcnt), |
592 | sk_rmem_alloc_get(sk), | 592 | sk_rmem_alloc_get(sk), |
593 | sk_wmem_alloc_get(sk), | 593 | sk_wmem_alloc_get(sk), |
594 | sock_i_uid(sk), | 594 | from_kuid(seq_user_ns(seq), sock_i_uid(sk)), |
595 | sock_i_ino(sk), | 595 | sock_i_ino(sk), |
596 | &src_baswapped, | 596 | &src_baswapped, |
597 | &dst_baswapped, | 597 | &dst_baswapped, |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 68e8f364bbf8..fe43bc7b063f 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -265,6 +265,9 @@ static int br_parse_ip_options(struct sk_buff *skb) | |||
265 | struct net_device *dev = skb->dev; | 265 | struct net_device *dev = skb->dev; |
266 | u32 len; | 266 | u32 len; |
267 | 267 | ||
268 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) | ||
269 | goto inhdr_error; | ||
270 | |||
268 | iph = ip_hdr(skb); | 271 | iph = ip_hdr(skb); |
269 | opt = &(IPCB(skb)->opt); | 272 | opt = &(IPCB(skb)->opt); |
270 | 273 | ||
diff --git a/net/can/af_can.c b/net/can/af_can.c index 821022a7214f..ddac1ee2ed20 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -63,7 +63,7 @@ | |||
63 | 63 | ||
64 | #include "af_can.h" | 64 | #include "af_can.h" |
65 | 65 | ||
66 | static __initdata const char banner[] = KERN_INFO | 66 | static __initconst const char banner[] = KERN_INFO |
67 | "can: controller area network core (" CAN_VERSION_STRING ")\n"; | 67 | "can: controller area network core (" CAN_VERSION_STRING ")\n"; |
68 | 68 | ||
69 | MODULE_DESCRIPTION("Controller Area Network PF_CAN core"); | 69 | MODULE_DESCRIPTION("Controller Area Network PF_CAN core"); |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 151b7730c12c..6f747582718e 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -77,7 +77,7 @@ | |||
77 | (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) | 77 | (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) |
78 | 78 | ||
79 | #define CAN_BCM_VERSION CAN_VERSION | 79 | #define CAN_BCM_VERSION CAN_VERSION |
80 | static __initdata const char banner[] = KERN_INFO | 80 | static __initconst const char banner[] = KERN_INFO |
81 | "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"; | 81 | "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"; |
82 | 82 | ||
83 | MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); | 83 | MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); |
diff --git a/net/can/gw.c b/net/can/gw.c index 127879c55fb6..1f5c9785a262 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
@@ -58,7 +58,7 @@ | |||
58 | #include <net/sock.h> | 58 | #include <net/sock.h> |
59 | 59 | ||
60 | #define CAN_GW_VERSION "20101209" | 60 | #define CAN_GW_VERSION "20101209" |
61 | static __initdata const char banner[] = | 61 | static __initconst const char banner[] = |
62 | KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n"; | 62 | KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n"; |
63 | 63 | ||
64 | MODULE_DESCRIPTION("PF_CAN netlink gateway"); | 64 | MODULE_DESCRIPTION("PF_CAN netlink gateway"); |
diff --git a/net/can/raw.c b/net/can/raw.c index 3e9c89356a93..5b0e3e330d97 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -55,7 +55,7 @@ | |||
55 | #include <net/net_namespace.h> | 55 | #include <net/net_namespace.h> |
56 | 56 | ||
57 | #define CAN_RAW_VERSION CAN_VERSION | 57 | #define CAN_RAW_VERSION CAN_VERSION |
58 | static __initdata const char banner[] = | 58 | static __initconst const char banner[] = |
59 | KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n"; | 59 | KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n"; |
60 | 60 | ||
61 | MODULE_DESCRIPTION("PF_CAN raw protocol"); | 61 | MODULE_DESCRIPTION("PF_CAN raw protocol"); |
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 9da7fdd3cd8a..af14cb425164 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c | |||
@@ -423,14 +423,15 @@ int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | int ceph_key_instantiate(struct key *key, const void *data, size_t datalen) | 426 | int ceph_key_instantiate(struct key *key, struct key_preparsed_payload *prep) |
427 | { | 427 | { |
428 | struct ceph_crypto_key *ckey; | 428 | struct ceph_crypto_key *ckey; |
429 | size_t datalen = prep->datalen; | ||
429 | int ret; | 430 | int ret; |
430 | void *p; | 431 | void *p; |
431 | 432 | ||
432 | ret = -EINVAL; | 433 | ret = -EINVAL; |
433 | if (datalen <= 0 || datalen > 32767 || !data) | 434 | if (datalen <= 0 || datalen > 32767 || !prep->data) |
434 | goto err; | 435 | goto err; |
435 | 436 | ||
436 | ret = key_payload_reserve(key, datalen); | 437 | ret = key_payload_reserve(key, datalen); |
@@ -443,8 +444,8 @@ int ceph_key_instantiate(struct key *key, const void *data, size_t datalen) | |||
443 | goto err; | 444 | goto err; |
444 | 445 | ||
445 | /* TODO ceph_crypto_key_decode should really take const input */ | 446 | /* TODO ceph_crypto_key_decode should really take const input */ |
446 | p = (void *)data; | 447 | p = (void *)prep->data; |
447 | ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen); | 448 | ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen); |
448 | if (ret < 0) | 449 | if (ret < 0) |
449 | goto err_ckey; | 450 | goto err_ckey; |
450 | 451 | ||
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 900ea0f043fc..812eb3b46c1f 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
@@ -637,7 +637,7 @@ bad: | |||
637 | /* | 637 | /* |
638 | * Do a synchronous pool op. | 638 | * Do a synchronous pool op. |
639 | */ | 639 | */ |
640 | int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op, | 640 | static int do_poolop(struct ceph_mon_client *monc, u32 op, |
641 | u32 pool, u64 snapid, | 641 | u32 pool, u64 snapid, |
642 | char *buf, int len) | 642 | char *buf, int len) |
643 | { | 643 | { |
@@ -687,7 +687,7 @@ out: | |||
687 | int ceph_monc_create_snapid(struct ceph_mon_client *monc, | 687 | int ceph_monc_create_snapid(struct ceph_mon_client *monc, |
688 | u32 pool, u64 *snapid) | 688 | u32 pool, u64 *snapid) |
689 | { | 689 | { |
690 | return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, | 690 | return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, |
691 | pool, 0, (char *)snapid, sizeof(*snapid)); | 691 | pool, 0, (char *)snapid, sizeof(*snapid)); |
692 | 692 | ||
693 | } | 693 | } |
@@ -696,7 +696,7 @@ EXPORT_SYMBOL(ceph_monc_create_snapid); | |||
696 | int ceph_monc_delete_snapid(struct ceph_mon_client *monc, | 696 | int ceph_monc_delete_snapid(struct ceph_mon_client *monc, |
697 | u32 pool, u64 snapid) | 697 | u32 pool, u64 snapid) |
698 | { | 698 | { |
699 | return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, | 699 | return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, |
700 | pool, snapid, 0, 0); | 700 | pool, snapid, 0, 0); |
701 | 701 | ||
702 | } | 702 | } |
@@ -769,7 +769,6 @@ static int build_initial_monmap(struct ceph_mon_client *monc) | |||
769 | monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); | 769 | monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); |
770 | } | 770 | } |
771 | monc->monmap->num_mon = num_mon; | 771 | monc->monmap->num_mon = num_mon; |
772 | monc->have_fsid = false; | ||
773 | return 0; | 772 | return 0; |
774 | } | 773 | } |
775 | 774 | ||
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 42119c05e82c..c1d756cc7448 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -52,7 +52,7 @@ static int op_has_extent(int op) | |||
52 | op == CEPH_OSD_OP_WRITE); | 52 | op == CEPH_OSD_OP_WRITE); |
53 | } | 53 | } |
54 | 54 | ||
55 | void ceph_calc_raw_layout(struct ceph_osd_client *osdc, | 55 | int ceph_calc_raw_layout(struct ceph_osd_client *osdc, |
56 | struct ceph_file_layout *layout, | 56 | struct ceph_file_layout *layout, |
57 | u64 snapid, | 57 | u64 snapid, |
58 | u64 off, u64 *plen, u64 *bno, | 58 | u64 off, u64 *plen, u64 *bno, |
@@ -62,12 +62,15 @@ void ceph_calc_raw_layout(struct ceph_osd_client *osdc, | |||
62 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; | 62 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; |
63 | u64 orig_len = *plen; | 63 | u64 orig_len = *plen; |
64 | u64 objoff, objlen; /* extent in object */ | 64 | u64 objoff, objlen; /* extent in object */ |
65 | int r; | ||
65 | 66 | ||
66 | reqhead->snapid = cpu_to_le64(snapid); | 67 | reqhead->snapid = cpu_to_le64(snapid); |
67 | 68 | ||
68 | /* object extent? */ | 69 | /* object extent? */ |
69 | ceph_calc_file_object_mapping(layout, off, plen, bno, | 70 | r = ceph_calc_file_object_mapping(layout, off, plen, bno, |
70 | &objoff, &objlen); | 71 | &objoff, &objlen); |
72 | if (r < 0) | ||
73 | return r; | ||
71 | if (*plen < orig_len) | 74 | if (*plen < orig_len) |
72 | dout(" skipping last %llu, final file extent %llu~%llu\n", | 75 | dout(" skipping last %llu, final file extent %llu~%llu\n", |
73 | orig_len - *plen, off, *plen); | 76 | orig_len - *plen, off, *plen); |
@@ -83,7 +86,7 @@ void ceph_calc_raw_layout(struct ceph_osd_client *osdc, | |||
83 | 86 | ||
84 | dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", | 87 | dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", |
85 | *bno, objoff, objlen, req->r_num_pages); | 88 | *bno, objoff, objlen, req->r_num_pages); |
86 | 89 | return 0; | |
87 | } | 90 | } |
88 | EXPORT_SYMBOL(ceph_calc_raw_layout); | 91 | EXPORT_SYMBOL(ceph_calc_raw_layout); |
89 | 92 | ||
@@ -112,20 +115,25 @@ EXPORT_SYMBOL(ceph_calc_raw_layout); | |||
112 | * | 115 | * |
113 | * fill osd op in request message. | 116 | * fill osd op in request message. |
114 | */ | 117 | */ |
115 | static void calc_layout(struct ceph_osd_client *osdc, | 118 | static int calc_layout(struct ceph_osd_client *osdc, |
116 | struct ceph_vino vino, | 119 | struct ceph_vino vino, |
117 | struct ceph_file_layout *layout, | 120 | struct ceph_file_layout *layout, |
118 | u64 off, u64 *plen, | 121 | u64 off, u64 *plen, |
119 | struct ceph_osd_request *req, | 122 | struct ceph_osd_request *req, |
120 | struct ceph_osd_req_op *op) | 123 | struct ceph_osd_req_op *op) |
121 | { | 124 | { |
122 | u64 bno; | 125 | u64 bno; |
126 | int r; | ||
123 | 127 | ||
124 | ceph_calc_raw_layout(osdc, layout, vino.snap, off, | 128 | r = ceph_calc_raw_layout(osdc, layout, vino.snap, off, |
125 | plen, &bno, req, op); | 129 | plen, &bno, req, op); |
130 | if (r < 0) | ||
131 | return r; | ||
126 | 132 | ||
127 | snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); | 133 | snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); |
128 | req->r_oid_len = strlen(req->r_oid); | 134 | req->r_oid_len = strlen(req->r_oid); |
135 | |||
136 | return r; | ||
129 | } | 137 | } |
130 | 138 | ||
131 | /* | 139 | /* |
@@ -213,7 +221,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | |||
213 | kref_init(&req->r_kref); | 221 | kref_init(&req->r_kref); |
214 | init_completion(&req->r_completion); | 222 | init_completion(&req->r_completion); |
215 | init_completion(&req->r_safe_completion); | 223 | init_completion(&req->r_safe_completion); |
216 | rb_init_node(&req->r_node); | ||
217 | INIT_LIST_HEAD(&req->r_unsafe_item); | 224 | INIT_LIST_HEAD(&req->r_unsafe_item); |
218 | INIT_LIST_HEAD(&req->r_linger_item); | 225 | INIT_LIST_HEAD(&req->r_linger_item); |
219 | INIT_LIST_HEAD(&req->r_linger_osd); | 226 | INIT_LIST_HEAD(&req->r_linger_osd); |
@@ -456,6 +463,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
456 | { | 463 | { |
457 | struct ceph_osd_req_op ops[3]; | 464 | struct ceph_osd_req_op ops[3]; |
458 | struct ceph_osd_request *req; | 465 | struct ceph_osd_request *req; |
466 | int r; | ||
459 | 467 | ||
460 | ops[0].op = opcode; | 468 | ops[0].op = opcode; |
461 | ops[0].extent.truncate_seq = truncate_seq; | 469 | ops[0].extent.truncate_seq = truncate_seq; |
@@ -474,10 +482,12 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
474 | use_mempool, | 482 | use_mempool, |
475 | GFP_NOFS, NULL, NULL); | 483 | GFP_NOFS, NULL, NULL); |
476 | if (!req) | 484 | if (!req) |
477 | return NULL; | 485 | return ERR_PTR(-ENOMEM); |
478 | 486 | ||
479 | /* calculate max write size */ | 487 | /* calculate max write size */ |
480 | calc_layout(osdc, vino, layout, off, plen, req, ops); | 488 | r = calc_layout(osdc, vino, layout, off, plen, req, ops); |
489 | if (r < 0) | ||
490 | return ERR_PTR(r); | ||
481 | req->r_file_layout = *layout; /* keep a copy */ | 491 | req->r_file_layout = *layout; /* keep a copy */ |
482 | 492 | ||
483 | /* in case it differs from natural (file) alignment that | 493 | /* in case it differs from natural (file) alignment that |
@@ -1920,8 +1930,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |||
1920 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, | 1930 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, |
1921 | NULL, 0, truncate_seq, truncate_size, NULL, | 1931 | NULL, 0, truncate_seq, truncate_size, NULL, |
1922 | false, 1, page_align); | 1932 | false, 1, page_align); |
1923 | if (!req) | 1933 | if (IS_ERR(req)) |
1924 | return -ENOMEM; | 1934 | return PTR_ERR(req); |
1925 | 1935 | ||
1926 | /* it may be a short read due to an object boundary */ | 1936 | /* it may be a short read due to an object boundary */ |
1927 | req->r_pages = pages; | 1937 | req->r_pages = pages; |
@@ -1963,8 +1973,8 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |||
1963 | snapc, do_sync, | 1973 | snapc, do_sync, |
1964 | truncate_seq, truncate_size, mtime, | 1974 | truncate_seq, truncate_size, mtime, |
1965 | nofail, 1, page_align); | 1975 | nofail, 1, page_align); |
1966 | if (!req) | 1976 | if (IS_ERR(req)) |
1967 | return -ENOMEM; | 1977 | return PTR_ERR(req); |
1968 | 1978 | ||
1969 | /* it may be a short write due to an object boundary */ | 1979 | /* it may be a short write due to an object boundary */ |
1970 | req->r_pages = pages; | 1980 | req->r_pages = pages; |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 3124b71a8883..5433fb0eb3c6 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
@@ -984,7 +984,7 @@ bad: | |||
984 | * for now, we write only a single su, until we can | 984 | * for now, we write only a single su, until we can |
985 | * pass a stride back to the caller. | 985 | * pass a stride back to the caller. |
986 | */ | 986 | */ |
987 | void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | 987 | int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, |
988 | u64 off, u64 *plen, | 988 | u64 off, u64 *plen, |
989 | u64 *ono, | 989 | u64 *ono, |
990 | u64 *oxoff, u64 *oxlen) | 990 | u64 *oxoff, u64 *oxlen) |
@@ -998,11 +998,17 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | |||
998 | 998 | ||
999 | dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, | 999 | dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, |
1000 | osize, su); | 1000 | osize, su); |
1001 | if (su == 0 || sc == 0) | ||
1002 | goto invalid; | ||
1001 | su_per_object = osize / su; | 1003 | su_per_object = osize / su; |
1004 | if (su_per_object == 0) | ||
1005 | goto invalid; | ||
1002 | dout("osize %u / su %u = su_per_object %u\n", osize, su, | 1006 | dout("osize %u / su %u = su_per_object %u\n", osize, su, |
1003 | su_per_object); | 1007 | su_per_object); |
1004 | 1008 | ||
1005 | BUG_ON((su & ~PAGE_MASK) != 0); | 1009 | if ((su & ~PAGE_MASK) != 0) |
1010 | goto invalid; | ||
1011 | |||
1006 | /* bl = *off / su; */ | 1012 | /* bl = *off / su; */ |
1007 | t = off; | 1013 | t = off; |
1008 | do_div(t, su); | 1014 | do_div(t, su); |
@@ -1030,6 +1036,14 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | |||
1030 | *plen = *oxlen; | 1036 | *plen = *oxlen; |
1031 | 1037 | ||
1032 | dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); | 1038 | dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); |
1039 | return 0; | ||
1040 | |||
1041 | invalid: | ||
1042 | dout(" invalid layout\n"); | ||
1043 | *ono = 0; | ||
1044 | *oxoff = 0; | ||
1045 | *oxlen = 0; | ||
1046 | return -EINVAL; | ||
1033 | } | 1047 | } |
1034 | EXPORT_SYMBOL(ceph_calc_file_object_mapping); | 1048 | EXPORT_SYMBOL(ceph_calc_file_object_mapping); |
1035 | 1049 | ||
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c index 665cd23020ff..92866bebb65f 100644 --- a/net/ceph/pagelist.c +++ b/net/ceph/pagelist.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | #include <linux/module.h> | 1 | #include <linux/module.h> |
3 | #include <linux/gfp.h> | 2 | #include <linux/gfp.h> |
4 | #include <linux/pagemap.h> | 3 | #include <linux/pagemap.h> |
@@ -134,8 +133,8 @@ int ceph_pagelist_truncate(struct ceph_pagelist *pl, | |||
134 | ceph_pagelist_unmap_tail(pl); | 133 | ceph_pagelist_unmap_tail(pl); |
135 | while (pl->head.prev != c->page_lru) { | 134 | while (pl->head.prev != c->page_lru) { |
136 | page = list_entry(pl->head.prev, struct page, lru); | 135 | page = list_entry(pl->head.prev, struct page, lru); |
137 | list_del(&page->lru); /* remove from pagelist */ | 136 | /* move from pagelist to reserve */ |
138 | list_add_tail(&page->lru, &pl->free_list); /* add to reserve */ | 137 | list_move_tail(&page->lru, &pl->free_list); |
139 | ++pl->num_pages_free; | 138 | ++pl->num_pages_free; |
140 | } | 139 | } |
141 | pl->room = c->room; | 140 | pl->room = c->room; |
diff --git a/net/core/dev.c b/net/core/dev.c index 1e0a1847c3bb..09cb3f6dc40c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3300,18 +3300,18 @@ ncls: | |||
3300 | && !skb_pfmemalloc_protocol(skb)) | 3300 | && !skb_pfmemalloc_protocol(skb)) |
3301 | goto drop; | 3301 | goto drop; |
3302 | 3302 | ||
3303 | rx_handler = rcu_dereference(skb->dev->rx_handler); | ||
3304 | if (vlan_tx_tag_present(skb)) { | 3303 | if (vlan_tx_tag_present(skb)) { |
3305 | if (pt_prev) { | 3304 | if (pt_prev) { |
3306 | ret = deliver_skb(skb, pt_prev, orig_dev); | 3305 | ret = deliver_skb(skb, pt_prev, orig_dev); |
3307 | pt_prev = NULL; | 3306 | pt_prev = NULL; |
3308 | } | 3307 | } |
3309 | if (vlan_do_receive(&skb, !rx_handler)) | 3308 | if (vlan_do_receive(&skb)) |
3310 | goto another_round; | 3309 | goto another_round; |
3311 | else if (unlikely(!skb)) | 3310 | else if (unlikely(!skb)) |
3312 | goto unlock; | 3311 | goto unlock; |
3313 | } | 3312 | } |
3314 | 3313 | ||
3314 | rx_handler = rcu_dereference(skb->dev->rx_handler); | ||
3315 | if (rx_handler) { | 3315 | if (rx_handler) { |
3316 | if (pt_prev) { | 3316 | if (pt_prev) { |
3317 | ret = deliver_skb(skb, pt_prev, orig_dev); | 3317 | ret = deliver_skb(skb, pt_prev, orig_dev); |
@@ -3331,6 +3331,9 @@ ncls: | |||
3331 | } | 3331 | } |
3332 | } | 3332 | } |
3333 | 3333 | ||
3334 | if (vlan_tx_nonzero_tag_present(skb)) | ||
3335 | skb->pkt_type = PACKET_OTHERHOST; | ||
3336 | |||
3334 | /* deliver only exact match when indicated */ | 3337 | /* deliver only exact match when indicated */ |
3335 | null_or_dev = deliver_exact ? skb->dev : NULL; | 3338 | null_or_dev = deliver_exact ? skb->dev : NULL; |
3336 | 3339 | ||
@@ -3471,17 +3474,31 @@ out: | |||
3471 | return netif_receive_skb(skb); | 3474 | return netif_receive_skb(skb); |
3472 | } | 3475 | } |
3473 | 3476 | ||
3474 | inline void napi_gro_flush(struct napi_struct *napi) | 3477 | /* napi->gro_list contains packets ordered by age. |
3478 | * youngest packets at the head of it. | ||
3479 | * Complete skbs in reverse order to reduce latencies. | ||
3480 | */ | ||
3481 | void napi_gro_flush(struct napi_struct *napi, bool flush_old) | ||
3475 | { | 3482 | { |
3476 | struct sk_buff *skb, *next; | 3483 | struct sk_buff *skb, *prev = NULL; |
3477 | 3484 | ||
3478 | for (skb = napi->gro_list; skb; skb = next) { | 3485 | /* scan list and build reverse chain */ |
3479 | next = skb->next; | 3486 | for (skb = napi->gro_list; skb != NULL; skb = skb->next) { |
3487 | skb->prev = prev; | ||
3488 | prev = skb; | ||
3489 | } | ||
3490 | |||
3491 | for (skb = prev; skb; skb = prev) { | ||
3480 | skb->next = NULL; | 3492 | skb->next = NULL; |
3493 | |||
3494 | if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) | ||
3495 | return; | ||
3496 | |||
3497 | prev = skb->prev; | ||
3481 | napi_gro_complete(skb); | 3498 | napi_gro_complete(skb); |
3499 | napi->gro_count--; | ||
3482 | } | 3500 | } |
3483 | 3501 | ||
3484 | napi->gro_count = 0; | ||
3485 | napi->gro_list = NULL; | 3502 | napi->gro_list = NULL; |
3486 | } | 3503 | } |
3487 | EXPORT_SYMBOL(napi_gro_flush); | 3504 | EXPORT_SYMBOL(napi_gro_flush); |
@@ -3542,6 +3559,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
3542 | 3559 | ||
3543 | napi->gro_count++; | 3560 | napi->gro_count++; |
3544 | NAPI_GRO_CB(skb)->count = 1; | 3561 | NAPI_GRO_CB(skb)->count = 1; |
3562 | NAPI_GRO_CB(skb)->age = jiffies; | ||
3545 | skb_shinfo(skb)->gso_size = skb_gro_len(skb); | 3563 | skb_shinfo(skb)->gso_size = skb_gro_len(skb); |
3546 | skb->next = napi->gro_list; | 3564 | skb->next = napi->gro_list; |
3547 | napi->gro_list = skb; | 3565 | napi->gro_list = skb; |
@@ -3631,20 +3649,22 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) | |||
3631 | } | 3649 | } |
3632 | EXPORT_SYMBOL(napi_skb_finish); | 3650 | EXPORT_SYMBOL(napi_skb_finish); |
3633 | 3651 | ||
3634 | void skb_gro_reset_offset(struct sk_buff *skb) | 3652 | static void skb_gro_reset_offset(struct sk_buff *skb) |
3635 | { | 3653 | { |
3654 | const struct skb_shared_info *pinfo = skb_shinfo(skb); | ||
3655 | const skb_frag_t *frag0 = &pinfo->frags[0]; | ||
3656 | |||
3636 | NAPI_GRO_CB(skb)->data_offset = 0; | 3657 | NAPI_GRO_CB(skb)->data_offset = 0; |
3637 | NAPI_GRO_CB(skb)->frag0 = NULL; | 3658 | NAPI_GRO_CB(skb)->frag0 = NULL; |
3638 | NAPI_GRO_CB(skb)->frag0_len = 0; | 3659 | NAPI_GRO_CB(skb)->frag0_len = 0; |
3639 | 3660 | ||
3640 | if (skb->mac_header == skb->tail && | 3661 | if (skb->mac_header == skb->tail && |
3641 | !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) { | 3662 | pinfo->nr_frags && |
3642 | NAPI_GRO_CB(skb)->frag0 = | 3663 | !PageHighMem(skb_frag_page(frag0))) { |
3643 | skb_frag_address(&skb_shinfo(skb)->frags[0]); | 3664 | NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); |
3644 | NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]); | 3665 | NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); |
3645 | } | 3666 | } |
3646 | } | 3667 | } |
3647 | EXPORT_SYMBOL(skb_gro_reset_offset); | ||
3648 | 3668 | ||
3649 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 3669 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
3650 | { | 3670 | { |
@@ -3876,7 +3896,7 @@ void napi_complete(struct napi_struct *n) | |||
3876 | if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) | 3896 | if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) |
3877 | return; | 3897 | return; |
3878 | 3898 | ||
3879 | napi_gro_flush(n); | 3899 | napi_gro_flush(n, false); |
3880 | local_irq_save(flags); | 3900 | local_irq_save(flags); |
3881 | __napi_complete(n); | 3901 | __napi_complete(n); |
3882 | local_irq_restore(flags); | 3902 | local_irq_restore(flags); |
@@ -3981,8 +4001,17 @@ static void net_rx_action(struct softirq_action *h) | |||
3981 | local_irq_enable(); | 4001 | local_irq_enable(); |
3982 | napi_complete(n); | 4002 | napi_complete(n); |
3983 | local_irq_disable(); | 4003 | local_irq_disable(); |
3984 | } else | 4004 | } else { |
4005 | if (n->gro_list) { | ||
4006 | /* flush too old packets | ||
4007 | * If HZ < 1000, flush all packets. | ||
4008 | */ | ||
4009 | local_irq_enable(); | ||
4010 | napi_gro_flush(n, HZ >= 1000); | ||
4011 | local_irq_disable(); | ||
4012 | } | ||
3985 | list_move_tail(&n->poll_list, &sd->poll_list); | 4013 | list_move_tail(&n->poll_list, &sd->poll_list); |
4014 | } | ||
3986 | } | 4015 | } |
3987 | 4016 | ||
3988 | netpoll_poll_unlock(have); | 4017 | netpoll_poll_unlock(have); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index baca771caae2..22571488730a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1301,8 +1301,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) | |||
1301 | if (!dst) | 1301 | if (!dst) |
1302 | goto discard; | 1302 | goto discard; |
1303 | 1303 | ||
1304 | __skb_pull(skb, skb_network_offset(skb)); | ||
1305 | |||
1306 | if (!neigh_event_send(neigh, skb)) { | 1304 | if (!neigh_event_send(neigh, skb)) { |
1307 | int err; | 1305 | int err; |
1308 | struct net_device *dev = neigh->dev; | 1306 | struct net_device *dev = neigh->dev; |
@@ -1312,6 +1310,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) | |||
1312 | neigh_hh_init(neigh, dst); | 1310 | neigh_hh_init(neigh, dst); |
1313 | 1311 | ||
1314 | do { | 1312 | do { |
1313 | __skb_pull(skb, skb_network_offset(skb)); | ||
1315 | seq = read_seqbegin(&neigh->ha_lock); | 1314 | seq = read_seqbegin(&neigh->ha_lock); |
1316 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | 1315 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), |
1317 | neigh->ha, NULL, skb->len); | 1316 | neigh->ha, NULL, skb->len); |
@@ -1342,9 +1341,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) | |||
1342 | unsigned int seq; | 1341 | unsigned int seq; |
1343 | int err; | 1342 | int err; |
1344 | 1343 | ||
1345 | __skb_pull(skb, skb_network_offset(skb)); | ||
1346 | |||
1347 | do { | 1344 | do { |
1345 | __skb_pull(skb, skb_network_offset(skb)); | ||
1348 | seq = read_seqbegin(&neigh->ha_lock); | 1346 | seq = read_seqbegin(&neigh->ha_lock); |
1349 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | 1347 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), |
1350 | neigh->ha, NULL, skb->len); | 1348 | neigh->ha, NULL, skb->len); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 148e73d2c451..d1dc14c2aac4 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -248,8 +248,8 @@ struct pktgen_dev { | |||
248 | int removal_mark; /* non-zero => the device is marked for | 248 | int removal_mark; /* non-zero => the device is marked for |
249 | * removal by worker thread */ | 249 | * removal by worker thread */ |
250 | 250 | ||
251 | int min_pkt_size; /* = ETH_ZLEN; */ | 251 | int min_pkt_size; |
252 | int max_pkt_size; /* = ETH_ZLEN; */ | 252 | int max_pkt_size; |
253 | int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ | 253 | int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ |
254 | int nfrags; | 254 | int nfrags; |
255 | struct page *page; | 255 | struct page *page; |
@@ -449,8 +449,6 @@ static void pktgen_stop_all_threads_ifs(void); | |||
449 | static void pktgen_stop(struct pktgen_thread *t); | 449 | static void pktgen_stop(struct pktgen_thread *t); |
450 | static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); | 450 | static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); |
451 | 451 | ||
452 | static unsigned int scan_ip6(const char *s, char ip[16]); | ||
453 | |||
454 | /* Module parameters, defaults. */ | 452 | /* Module parameters, defaults. */ |
455 | static int pg_count_d __read_mostly = 1000; | 453 | static int pg_count_d __read_mostly = 1000; |
456 | static int pg_delay_d __read_mostly; | 454 | static int pg_delay_d __read_mostly; |
@@ -702,8 +700,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
702 | &pkt_dev->cur_in6_saddr, | 700 | &pkt_dev->cur_in6_saddr, |
703 | &pkt_dev->cur_in6_daddr); | 701 | &pkt_dev->cur_in6_daddr); |
704 | } else | 702 | } else |
705 | seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", | 703 | seq_printf(seq, " cur_saddr: %pI4 cur_daddr: %pI4\n", |
706 | pkt_dev->cur_saddr, pkt_dev->cur_daddr); | 704 | &pkt_dev->cur_saddr, &pkt_dev->cur_daddr); |
707 | 705 | ||
708 | seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", | 706 | seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", |
709 | pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); | 707 | pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); |
@@ -1299,7 +1297,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1299 | return -EFAULT; | 1297 | return -EFAULT; |
1300 | buf[len] = 0; | 1298 | buf[len] = 0; |
1301 | 1299 | ||
1302 | scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); | 1300 | in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL); |
1303 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); | 1301 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); |
1304 | 1302 | ||
1305 | pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; | 1303 | pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; |
@@ -1322,7 +1320,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1322 | return -EFAULT; | 1320 | return -EFAULT; |
1323 | buf[len] = 0; | 1321 | buf[len] = 0; |
1324 | 1322 | ||
1325 | scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); | 1323 | in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL); |
1326 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); | 1324 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); |
1327 | 1325 | ||
1328 | pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; | 1326 | pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; |
@@ -1344,7 +1342,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1344 | return -EFAULT; | 1342 | return -EFAULT; |
1345 | buf[len] = 0; | 1343 | buf[len] = 0; |
1346 | 1344 | ||
1347 | scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); | 1345 | in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL); |
1348 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); | 1346 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); |
1349 | 1347 | ||
1350 | if (debug) | 1348 | if (debug) |
@@ -1365,7 +1363,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1365 | return -EFAULT; | 1363 | return -EFAULT; |
1366 | buf[len] = 0; | 1364 | buf[len] = 0; |
1367 | 1365 | ||
1368 | scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); | 1366 | in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL); |
1369 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); | 1367 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); |
1370 | 1368 | ||
1371 | pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; | 1369 | pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; |
@@ -2036,19 +2034,17 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
2036 | /* Set up Dest MAC */ | 2034 | /* Set up Dest MAC */ |
2037 | memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); | 2035 | memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); |
2038 | 2036 | ||
2039 | /* Set up pkt size */ | ||
2040 | pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; | ||
2041 | |||
2042 | if (pkt_dev->flags & F_IPV6) { | 2037 | if (pkt_dev->flags & F_IPV6) { |
2043 | /* | ||
2044 | * Skip this automatic address setting until locks or functions | ||
2045 | * gets exported | ||
2046 | */ | ||
2047 | |||
2048 | #ifdef NOTNOW | ||
2049 | int i, set = 0, err = 1; | 2038 | int i, set = 0, err = 1; |
2050 | struct inet6_dev *idev; | 2039 | struct inet6_dev *idev; |
2051 | 2040 | ||
2041 | if (pkt_dev->min_pkt_size == 0) { | ||
2042 | pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr) | ||
2043 | + sizeof(struct udphdr) | ||
2044 | + sizeof(struct pktgen_hdr) | ||
2045 | + pkt_dev->pkt_overhead; | ||
2046 | } | ||
2047 | |||
2052 | for (i = 0; i < IN6_ADDR_HSIZE; i++) | 2048 | for (i = 0; i < IN6_ADDR_HSIZE; i++) |
2053 | if (pkt_dev->cur_in6_saddr.s6_addr[i]) { | 2049 | if (pkt_dev->cur_in6_saddr.s6_addr[i]) { |
2054 | set = 1; | 2050 | set = 1; |
@@ -2069,9 +2065,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
2069 | struct inet6_ifaddr *ifp; | 2065 | struct inet6_ifaddr *ifp; |
2070 | 2066 | ||
2071 | read_lock_bh(&idev->lock); | 2067 | read_lock_bh(&idev->lock); |
2072 | for (ifp = idev->addr_list; ifp; | 2068 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
2073 | ifp = ifp->if_next) { | 2069 | if ((ifp->scope & IFA_LINK) && |
2074 | if (ifp->scope == IFA_LINK && | ||
2075 | !(ifp->flags & IFA_F_TENTATIVE)) { | 2070 | !(ifp->flags & IFA_F_TENTATIVE)) { |
2076 | pkt_dev->cur_in6_saddr = ifp->addr; | 2071 | pkt_dev->cur_in6_saddr = ifp->addr; |
2077 | err = 0; | 2072 | err = 0; |
@@ -2084,8 +2079,14 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
2084 | if (err) | 2079 | if (err) |
2085 | pr_err("ERROR: IPv6 link address not available\n"); | 2080 | pr_err("ERROR: IPv6 link address not available\n"); |
2086 | } | 2081 | } |
2087 | #endif | ||
2088 | } else { | 2082 | } else { |
2083 | if (pkt_dev->min_pkt_size == 0) { | ||
2084 | pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr) | ||
2085 | + sizeof(struct udphdr) | ||
2086 | + sizeof(struct pktgen_hdr) | ||
2087 | + pkt_dev->pkt_overhead; | ||
2088 | } | ||
2089 | |||
2089 | pkt_dev->saddr_min = 0; | 2090 | pkt_dev->saddr_min = 0; |
2090 | pkt_dev->saddr_max = 0; | 2091 | pkt_dev->saddr_max = 0; |
2091 | if (strlen(pkt_dev->src_min) == 0) { | 2092 | if (strlen(pkt_dev->src_min) == 0) { |
@@ -2111,6 +2112,10 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
2111 | pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); | 2112 | pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); |
2112 | } | 2113 | } |
2113 | /* Initialize current values. */ | 2114 | /* Initialize current values. */ |
2115 | pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; | ||
2116 | if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size) | ||
2117 | pkt_dev->max_pkt_size = pkt_dev->min_pkt_size; | ||
2118 | |||
2114 | pkt_dev->cur_dst_mac_offset = 0; | 2119 | pkt_dev->cur_dst_mac_offset = 0; |
2115 | pkt_dev->cur_src_mac_offset = 0; | 2120 | pkt_dev->cur_src_mac_offset = 0; |
2116 | pkt_dev->cur_saddr = pkt_dev->saddr_min; | 2121 | pkt_dev->cur_saddr = pkt_dev->saddr_min; |
@@ -2758,97 +2763,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2758 | return skb; | 2763 | return skb; |
2759 | } | 2764 | } |
2760 | 2765 | ||
2761 | /* | ||
2762 | * scan_ip6, fmt_ip taken from dietlibc-0.21 | ||
2763 | * Author Felix von Leitner <felix-dietlibc@fefe.de> | ||
2764 | * | ||
2765 | * Slightly modified for kernel. | ||
2766 | * Should be candidate for net/ipv4/utils.c | ||
2767 | * --ro | ||
2768 | */ | ||
2769 | |||
2770 | static unsigned int scan_ip6(const char *s, char ip[16]) | ||
2771 | { | ||
2772 | unsigned int i; | ||
2773 | unsigned int len = 0; | ||
2774 | unsigned long u; | ||
2775 | char suffix[16]; | ||
2776 | unsigned int prefixlen = 0; | ||
2777 | unsigned int suffixlen = 0; | ||
2778 | __be32 tmp; | ||
2779 | char *pos; | ||
2780 | |||
2781 | for (i = 0; i < 16; i++) | ||
2782 | ip[i] = 0; | ||
2783 | |||
2784 | for (;;) { | ||
2785 | if (*s == ':') { | ||
2786 | len++; | ||
2787 | if (s[1] == ':') { /* Found "::", skip to part 2 */ | ||
2788 | s += 2; | ||
2789 | len++; | ||
2790 | break; | ||
2791 | } | ||
2792 | s++; | ||
2793 | } | ||
2794 | |||
2795 | u = simple_strtoul(s, &pos, 16); | ||
2796 | i = pos - s; | ||
2797 | if (!i) | ||
2798 | return 0; | ||
2799 | if (prefixlen == 12 && s[i] == '.') { | ||
2800 | |||
2801 | /* the last 4 bytes may be written as IPv4 address */ | ||
2802 | |||
2803 | tmp = in_aton(s); | ||
2804 | memcpy((struct in_addr *)(ip + 12), &tmp, sizeof(tmp)); | ||
2805 | return i + len; | ||
2806 | } | ||
2807 | ip[prefixlen++] = (u >> 8); | ||
2808 | ip[prefixlen++] = (u & 255); | ||
2809 | s += i; | ||
2810 | len += i; | ||
2811 | if (prefixlen == 16) | ||
2812 | return len; | ||
2813 | } | ||
2814 | |||
2815 | /* part 2, after "::" */ | ||
2816 | for (;;) { | ||
2817 | if (*s == ':') { | ||
2818 | if (suffixlen == 0) | ||
2819 | break; | ||
2820 | s++; | ||
2821 | len++; | ||
2822 | } else if (suffixlen != 0) | ||
2823 | break; | ||
2824 | |||
2825 | u = simple_strtol(s, &pos, 16); | ||
2826 | i = pos - s; | ||
2827 | if (!i) { | ||
2828 | if (*s) | ||
2829 | len--; | ||
2830 | break; | ||
2831 | } | ||
2832 | if (suffixlen + prefixlen <= 12 && s[i] == '.') { | ||
2833 | tmp = in_aton(s); | ||
2834 | memcpy((struct in_addr *)(suffix + suffixlen), &tmp, | ||
2835 | sizeof(tmp)); | ||
2836 | suffixlen += 4; | ||
2837 | len += strlen(s); | ||
2838 | break; | ||
2839 | } | ||
2840 | suffix[suffixlen++] = (u >> 8); | ||
2841 | suffix[suffixlen++] = (u & 255); | ||
2842 | s += i; | ||
2843 | len += i; | ||
2844 | if (prefixlen + suffixlen == 16) | ||
2845 | break; | ||
2846 | } | ||
2847 | for (i = 0; i < suffixlen; i++) | ||
2848 | ip[16 - suffixlen + i] = suffix[i]; | ||
2849 | return len; | ||
2850 | } | ||
2851 | |||
2852 | static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | 2766 | static struct sk_buff *fill_packet_ipv6(struct net_device *odev, |
2853 | struct pktgen_dev *pkt_dev) | 2767 | struct pktgen_dev *pkt_dev) |
2854 | { | 2768 | { |
@@ -2927,7 +2841,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2927 | sizeof(struct ipv6hdr) - sizeof(struct udphdr) - | 2841 | sizeof(struct ipv6hdr) - sizeof(struct udphdr) - |
2928 | pkt_dev->pkt_overhead; | 2842 | pkt_dev->pkt_overhead; |
2929 | 2843 | ||
2930 | if (datalen < sizeof(struct pktgen_hdr)) { | 2844 | if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) { |
2931 | datalen = sizeof(struct pktgen_hdr); | 2845 | datalen = sizeof(struct pktgen_hdr); |
2932 | net_info_ratelimited("increased datalen to %d\n", datalen); | 2846 | net_info_ratelimited("increased datalen to %d\n", datalen); |
2933 | } | 2847 | } |
@@ -3548,8 +3462,6 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) | |||
3548 | } | 3462 | } |
3549 | 3463 | ||
3550 | pkt_dev->removal_mark = 0; | 3464 | pkt_dev->removal_mark = 0; |
3551 | pkt_dev->min_pkt_size = ETH_ZLEN; | ||
3552 | pkt_dev->max_pkt_size = ETH_ZLEN; | ||
3553 | pkt_dev->nfrags = 0; | 3465 | pkt_dev->nfrags = 0; |
3554 | pkt_dev->delay = pg_delay_d; | 3466 | pkt_dev->delay = pg_delay_d; |
3555 | pkt_dev->count = pg_count_d; | 3467 | pkt_dev->count = pg_count_d; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index cdc28598f4ef..6e04b1fa11f2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -655,53 +655,6 @@ void consume_skb(struct sk_buff *skb) | |||
655 | } | 655 | } |
656 | EXPORT_SYMBOL(consume_skb); | 656 | EXPORT_SYMBOL(consume_skb); |
657 | 657 | ||
658 | /** | ||
659 | * skb_recycle - clean up an skb for reuse | ||
660 | * @skb: buffer | ||
661 | * | ||
662 | * Recycles the skb to be reused as a receive buffer. This | ||
663 | * function does any necessary reference count dropping, and | ||
664 | * cleans up the skbuff as if it just came from __alloc_skb(). | ||
665 | */ | ||
666 | void skb_recycle(struct sk_buff *skb) | ||
667 | { | ||
668 | struct skb_shared_info *shinfo; | ||
669 | |||
670 | skb_release_head_state(skb); | ||
671 | |||
672 | shinfo = skb_shinfo(skb); | ||
673 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); | ||
674 | atomic_set(&shinfo->dataref, 1); | ||
675 | |||
676 | memset(skb, 0, offsetof(struct sk_buff, tail)); | ||
677 | skb->data = skb->head + NET_SKB_PAD; | ||
678 | skb_reset_tail_pointer(skb); | ||
679 | } | ||
680 | EXPORT_SYMBOL(skb_recycle); | ||
681 | |||
682 | /** | ||
683 | * skb_recycle_check - check if skb can be reused for receive | ||
684 | * @skb: buffer | ||
685 | * @skb_size: minimum receive buffer size | ||
686 | * | ||
687 | * Checks that the skb passed in is not shared or cloned, and | ||
688 | * that it is linear and its head portion at least as large as | ||
689 | * skb_size so that it can be recycled as a receive buffer. | ||
690 | * If these conditions are met, this function does any necessary | ||
691 | * reference count dropping and cleans up the skbuff as if it | ||
692 | * just came from __alloc_skb(). | ||
693 | */ | ||
694 | bool skb_recycle_check(struct sk_buff *skb, int skb_size) | ||
695 | { | ||
696 | if (!skb_is_recycleable(skb, skb_size)) | ||
697 | return false; | ||
698 | |||
699 | skb_recycle(skb); | ||
700 | |||
701 | return true; | ||
702 | } | ||
703 | EXPORT_SYMBOL(skb_recycle_check); | ||
704 | |||
705 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | 658 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
706 | { | 659 | { |
707 | new->tstamp = old->tstamp; | 660 | new->tstamp = old->tstamp; |
diff --git a/net/core/utils.c b/net/core/utils.c index f5613d569c23..e3487e461939 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -107,6 +107,18 @@ static inline int xdigit2bin(char c, int delim) | |||
107 | return IN6PTON_UNKNOWN; | 107 | return IN6PTON_UNKNOWN; |
108 | } | 108 | } |
109 | 109 | ||
110 | /** | ||
111 | * in4_pton - convert an IPv4 address from literal to binary representation | ||
112 | * @src: the start of the IPv4 address string | ||
113 | * @srclen: the length of the string, -1 means strlen(src) | ||
114 | * @dst: the binary (u8[4] array) representation of the IPv4 address | ||
115 | * @delim: the delimiter of the IPv4 address in @src, -1 means no delimiter | ||
116 | * @end: A pointer to the end of the parsed string will be placed here | ||
117 | * | ||
118 | * Return one on success, return zero when any error occurs | ||
119 | * and @end will point to the end of the parsed string. | ||
120 | * | ||
121 | */ | ||
110 | int in4_pton(const char *src, int srclen, | 122 | int in4_pton(const char *src, int srclen, |
111 | u8 *dst, | 123 | u8 *dst, |
112 | int delim, const char **end) | 124 | int delim, const char **end) |
@@ -161,6 +173,18 @@ out: | |||
161 | } | 173 | } |
162 | EXPORT_SYMBOL(in4_pton); | 174 | EXPORT_SYMBOL(in4_pton); |
163 | 175 | ||
176 | /** | ||
177 | * in6_pton - convert an IPv6 address from literal to binary representation | ||
178 | * @src: the start of the IPv6 address string | ||
179 | * @srclen: the length of the string, -1 means strlen(src) | ||
180 | * @dst: the binary (u8[16] array) representation of the IPv6 address | ||
181 | * @delim: the delimiter of the IPv6 address in @src, -1 means no delimiter | ||
182 | * @end: A pointer to the end of the parsed string will be placed here | ||
183 | * | ||
184 | * Return one on success, return zero when any error occurs | ||
185 | * and @end will point to the end of the parsed string. | ||
186 | * | ||
187 | */ | ||
164 | int in6_pton(const char *src, int srclen, | 188 | int in6_pton(const char *src, int srclen, |
165 | u8 *dst, | 189 | u8 *dst, |
166 | int delim, const char **end) | 190 | int delim, const char **end) |
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index e65f2c856e06..faf7cc3483fe 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -220,7 +220,7 @@ static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) | |||
220 | dn_rt_cache_flush(-1); | 220 | dn_rt_cache_flush(-1); |
221 | } | 221 | } |
222 | 222 | ||
223 | static const struct fib_rules_ops __net_initdata dn_fib_rules_ops_template = { | 223 | static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = { |
224 | .family = AF_DECnet, | 224 | .family = AF_DECnet, |
225 | .rule_size = sizeof(struct dn_fib_rule), | 225 | .rule_size = sizeof(struct dn_fib_rule), |
226 | .addr_size = sizeof(u16), | 226 | .addr_size = sizeof(u16), |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 9807945a56d9..8aa4b1115384 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
@@ -59,13 +59,13 @@ const struct cred *dns_resolver_cache; | |||
59 | * "ip1,ip2,...#foo=bar" | 59 | * "ip1,ip2,...#foo=bar" |
60 | */ | 60 | */ |
61 | static int | 61 | static int |
62 | dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) | 62 | dns_resolver_instantiate(struct key *key, struct key_preparsed_payload *prep) |
63 | { | 63 | { |
64 | struct user_key_payload *upayload; | 64 | struct user_key_payload *upayload; |
65 | unsigned long derrno; | 65 | unsigned long derrno; |
66 | int ret; | 66 | int ret; |
67 | size_t result_len = 0; | 67 | size_t datalen = prep->datalen, result_len = 0; |
68 | const char *data = _data, *end, *opt; | 68 | const char *data = prep->data, *end, *opt; |
69 | 69 | ||
70 | kenter("%%%d,%s,'%*.*s',%zu", | 70 | kenter("%%%d,%s,'%*.*s',%zu", |
71 | key->serial, key->description, | 71 | key->serial, key->description, |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 68c93d1bb03a..825c608826de 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -322,7 +322,8 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, | |||
322 | { | 322 | { |
323 | int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); | 323 | int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); |
324 | 324 | ||
325 | if (!r && !fib_num_tclassid_users(dev_net(dev))) { | 325 | if (!r && !fib_num_tclassid_users(dev_net(dev)) && |
326 | (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) { | ||
326 | *itag = 0; | 327 | *itag = 0; |
327 | return 0; | 328 | return 0; |
328 | } | 329 | } |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 274309d3aded..26aa65d1fce4 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -262,7 +262,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops) | |||
262 | rt_cache_flush(ops->fro_net); | 262 | rt_cache_flush(ops->fro_net); |
263 | } | 263 | } |
264 | 264 | ||
265 | static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { | 265 | static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = { |
266 | .family = AF_INET, | 266 | .family = AF_INET, |
267 | .rule_size = sizeof(struct fib4_rule), | 267 | .rule_size = sizeof(struct fib4_rule), |
268 | .addr_size = sizeof(u32), | 268 | .addr_size = sizeof(u32), |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 3509065e409a..71b125cd5db1 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -314,6 +314,7 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi) | |||
314 | nfi->fib_scope == fi->fib_scope && | 314 | nfi->fib_scope == fi->fib_scope && |
315 | nfi->fib_prefsrc == fi->fib_prefsrc && | 315 | nfi->fib_prefsrc == fi->fib_prefsrc && |
316 | nfi->fib_priority == fi->fib_priority && | 316 | nfi->fib_priority == fi->fib_priority && |
317 | nfi->fib_type == fi->fib_type && | ||
317 | memcmp(nfi->fib_metrics, fi->fib_metrics, | 318 | memcmp(nfi->fib_metrics, fi->fib_metrics, |
318 | sizeof(u32) * RTAX_MAX) == 0 && | 319 | sizeof(u32) * RTAX_MAX) == 0 && |
319 | ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && | 320 | ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && |
@@ -833,11 +834,14 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
833 | fi->fib_flags = cfg->fc_flags; | 834 | fi->fib_flags = cfg->fc_flags; |
834 | fi->fib_priority = cfg->fc_priority; | 835 | fi->fib_priority = cfg->fc_priority; |
835 | fi->fib_prefsrc = cfg->fc_prefsrc; | 836 | fi->fib_prefsrc = cfg->fc_prefsrc; |
837 | fi->fib_type = cfg->fc_type; | ||
836 | 838 | ||
837 | fi->fib_nhs = nhs; | 839 | fi->fib_nhs = nhs; |
838 | change_nexthops(fi) { | 840 | change_nexthops(fi) { |
839 | nexthop_nh->nh_parent = fi; | 841 | nexthop_nh->nh_parent = fi; |
840 | nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *); | 842 | nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *); |
843 | if (!nexthop_nh->nh_pcpu_rth_output) | ||
844 | goto failure; | ||
841 | } endfor_nexthops(fi) | 845 | } endfor_nexthops(fi) |
842 | 846 | ||
843 | if (cfg->fc_mx) { | 847 | if (cfg->fc_mx) { |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f0c5b9c1a957..d34ce2972c8f 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -406,7 +406,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, | |||
406 | rt = ip_route_output_flow(net, fl4, sk); | 406 | rt = ip_route_output_flow(net, fl4, sk); |
407 | if (IS_ERR(rt)) | 407 | if (IS_ERR(rt)) |
408 | goto no_route; | 408 | goto no_route; |
409 | if (opt && opt->opt.is_strictroute && rt->rt_gateway) | 409 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
410 | goto route_err; | 410 | goto route_err; |
411 | return &rt->dst; | 411 | return &rt->dst; |
412 | 412 | ||
@@ -442,7 +442,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, | |||
442 | rt = ip_route_output_flow(net, fl4, sk); | 442 | rt = ip_route_output_flow(net, fl4, sk); |
443 | if (IS_ERR(rt)) | 443 | if (IS_ERR(rt)) |
444 | goto no_route; | 444 | goto no_route; |
445 | if (opt && opt->opt.is_strictroute && rt->rt_gateway) | 445 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
446 | goto route_err; | 446 | goto route_err; |
447 | rcu_read_unlock(); | 447 | rcu_read_unlock(); |
448 | return &rt->dst; | 448 | return &rt->dst; |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index ab09b126423c..694de3b7aebf 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -85,7 +85,7 @@ int ip_forward(struct sk_buff *skb) | |||
85 | 85 | ||
86 | rt = skb_rtable(skb); | 86 | rt = skb_rtable(skb); |
87 | 87 | ||
88 | if (opt->is_strictroute && opt->nexthop != rt->rt_gateway) | 88 | if (opt->is_strictroute && rt->rt_uses_gateway) |
89 | goto sr_failed; | 89 | goto sr_failed; |
90 | 90 | ||
91 | if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && | 91 | if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 24a29a39e9a8..6537a408a4fb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -193,7 +193,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
193 | } | 193 | } |
194 | 194 | ||
195 | rcu_read_lock_bh(); | 195 | rcu_read_lock_bh(); |
196 | nexthop = rt->rt_gateway ? rt->rt_gateway : ip_hdr(skb)->daddr; | 196 | nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr); |
197 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); | 197 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); |
198 | if (unlikely(!neigh)) | 198 | if (unlikely(!neigh)) |
199 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); | 199 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); |
@@ -371,7 +371,7 @@ int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl) | |||
371 | skb_dst_set_noref(skb, &rt->dst); | 371 | skb_dst_set_noref(skb, &rt->dst); |
372 | 372 | ||
373 | packet_routed: | 373 | packet_routed: |
374 | if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gateway) | 374 | if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) |
375 | goto no_route; | 375 | goto no_route; |
376 | 376 | ||
377 | /* OK, we know where to send it, allocate and build IP header. */ | 377 | /* OK, we know where to send it, allocate and build IP header. */ |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 978bca4818ae..1831092f999f 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -374,7 +374,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
374 | 374 | ||
375 | memset(&fl4, 0, sizeof(fl4)); | 375 | memset(&fl4, 0, sizeof(fl4)); |
376 | flowi4_init_output(&fl4, tunnel->parms.link, | 376 | flowi4_init_output(&fl4, tunnel->parms.link, |
377 | htonl(tunnel->parms.i_key), RT_TOS(tos), | 377 | be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), |
378 | RT_SCOPE_UNIVERSE, | 378 | RT_SCOPE_UNIVERSE, |
379 | IPPROTO_IPIP, 0, | 379 | IPPROTO_IPIP, 0, |
380 | dst, tiph->saddr, 0, 0); | 380 | dst, tiph->saddr, 0, 0); |
@@ -441,7 +441,7 @@ static int vti_tunnel_bind_dev(struct net_device *dev) | |||
441 | struct flowi4 fl4; | 441 | struct flowi4 fl4; |
442 | memset(&fl4, 0, sizeof(fl4)); | 442 | memset(&fl4, 0, sizeof(fl4)); |
443 | flowi4_init_output(&fl4, tunnel->parms.link, | 443 | flowi4_init_output(&fl4, tunnel->parms.link, |
444 | htonl(tunnel->parms.i_key), | 444 | be32_to_cpu(tunnel->parms.i_key), |
445 | RT_TOS(iph->tos), RT_SCOPE_UNIVERSE, | 445 | RT_TOS(iph->tos), RT_SCOPE_UNIVERSE, |
446 | IPPROTO_IPIP, 0, | 446 | IPPROTO_IPIP, 0, |
447 | iph->daddr, iph->saddr, 0, 0); | 447 | iph->daddr, iph->saddr, 0, 0); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 1daa95c2a0ba..6168c4dc58b1 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -221,7 +221,7 @@ static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = { | 224 | static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { |
225 | .family = RTNL_FAMILY_IPMR, | 225 | .family = RTNL_FAMILY_IPMR, |
226 | .rule_size = sizeof(struct ipmr_rule), | 226 | .rule_size = sizeof(struct ipmr_rule), |
227 | .addr_size = sizeof(u32), | 227 | .addr_size = sizeof(u32), |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ff622069fcef..432f4bb77238 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -802,7 +802,8 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
802 | net = dev_net(rt->dst.dev); | 802 | net = dev_net(rt->dst.dev); |
803 | peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); | 803 | peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); |
804 | if (!peer) { | 804 | if (!peer) { |
805 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); | 805 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, |
806 | rt_nexthop(rt, ip_hdr(skb)->daddr)); | ||
806 | return; | 807 | return; |
807 | } | 808 | } |
808 | 809 | ||
@@ -827,7 +828,9 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
827 | time_after(jiffies, | 828 | time_after(jiffies, |
828 | (peer->rate_last + | 829 | (peer->rate_last + |
829 | (ip_rt_redirect_load << peer->rate_tokens)))) { | 830 | (ip_rt_redirect_load << peer->rate_tokens)))) { |
830 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); | 831 | __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); |
832 | |||
833 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); | ||
831 | peer->rate_last = jiffies; | 834 | peer->rate_last = jiffies; |
832 | ++peer->rate_tokens; | 835 | ++peer->rate_tokens; |
833 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 836 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
@@ -835,7 +838,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
835 | peer->rate_tokens == ip_rt_redirect_number) | 838 | peer->rate_tokens == ip_rt_redirect_number) |
836 | net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", | 839 | net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", |
837 | &ip_hdr(skb)->saddr, inet_iif(skb), | 840 | &ip_hdr(skb)->saddr, inet_iif(skb), |
838 | &ip_hdr(skb)->daddr, &rt->rt_gateway); | 841 | &ip_hdr(skb)->daddr, &gw); |
839 | #endif | 842 | #endif |
840 | } | 843 | } |
841 | out_put_peer: | 844 | out_put_peer: |
@@ -904,22 +907,32 @@ out: kfree_skb(skb); | |||
904 | return 0; | 907 | return 0; |
905 | } | 908 | } |
906 | 909 | ||
907 | static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) | 910 | static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) |
908 | { | 911 | { |
912 | struct dst_entry *dst = &rt->dst; | ||
909 | struct fib_result res; | 913 | struct fib_result res; |
910 | 914 | ||
915 | if (dst->dev->mtu < mtu) | ||
916 | return; | ||
917 | |||
911 | if (mtu < ip_rt_min_pmtu) | 918 | if (mtu < ip_rt_min_pmtu) |
912 | mtu = ip_rt_min_pmtu; | 919 | mtu = ip_rt_min_pmtu; |
913 | 920 | ||
921 | if (!rt->rt_pmtu) { | ||
922 | dst->obsolete = DST_OBSOLETE_KILL; | ||
923 | } else { | ||
924 | rt->rt_pmtu = mtu; | ||
925 | dst->expires = max(1UL, jiffies + ip_rt_mtu_expires); | ||
926 | } | ||
927 | |||
914 | rcu_read_lock(); | 928 | rcu_read_lock(); |
915 | if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { | 929 | if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) { |
916 | struct fib_nh *nh = &FIB_RES_NH(res); | 930 | struct fib_nh *nh = &FIB_RES_NH(res); |
917 | 931 | ||
918 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, | 932 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, |
919 | jiffies + ip_rt_mtu_expires); | 933 | jiffies + ip_rt_mtu_expires); |
920 | } | 934 | } |
921 | rcu_read_unlock(); | 935 | rcu_read_unlock(); |
922 | return mtu; | ||
923 | } | 936 | } |
924 | 937 | ||
925 | static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | 938 | static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, |
@@ -929,14 +942,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | |||
929 | struct flowi4 fl4; | 942 | struct flowi4 fl4; |
930 | 943 | ||
931 | ip_rt_build_flow_key(&fl4, sk, skb); | 944 | ip_rt_build_flow_key(&fl4, sk, skb); |
932 | mtu = __ip_rt_update_pmtu(rt, &fl4, mtu); | 945 | __ip_rt_update_pmtu(rt, &fl4, mtu); |
933 | |||
934 | if (!rt->rt_pmtu) { | ||
935 | dst->obsolete = DST_OBSOLETE_KILL; | ||
936 | } else { | ||
937 | rt->rt_pmtu = mtu; | ||
938 | rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires); | ||
939 | } | ||
940 | } | 946 | } |
941 | 947 | ||
942 | void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, | 948 | void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, |
@@ -1120,7 +1126,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) | |||
1120 | mtu = dst->dev->mtu; | 1126 | mtu = dst->dev->mtu; |
1121 | 1127 | ||
1122 | if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { | 1128 | if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { |
1123 | if (rt->rt_gateway && mtu > 576) | 1129 | if (rt->rt_uses_gateway && mtu > 576) |
1124 | mtu = 576; | 1130 | mtu = 576; |
1125 | } | 1131 | } |
1126 | 1132 | ||
@@ -1171,7 +1177,9 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, | |||
1171 | if (fnhe->fnhe_gw) { | 1177 | if (fnhe->fnhe_gw) { |
1172 | rt->rt_flags |= RTCF_REDIRECTED; | 1178 | rt->rt_flags |= RTCF_REDIRECTED; |
1173 | rt->rt_gateway = fnhe->fnhe_gw; | 1179 | rt->rt_gateway = fnhe->fnhe_gw; |
1174 | } | 1180 | rt->rt_uses_gateway = 1; |
1181 | } else if (!rt->rt_gateway) | ||
1182 | rt->rt_gateway = daddr; | ||
1175 | 1183 | ||
1176 | orig = rcu_dereference(fnhe->fnhe_rth); | 1184 | orig = rcu_dereference(fnhe->fnhe_rth); |
1177 | rcu_assign_pointer(fnhe->fnhe_rth, rt); | 1185 | rcu_assign_pointer(fnhe->fnhe_rth, rt); |
@@ -1180,13 +1188,6 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, | |||
1180 | 1188 | ||
1181 | fnhe->fnhe_stamp = jiffies; | 1189 | fnhe->fnhe_stamp = jiffies; |
1182 | ret = true; | 1190 | ret = true; |
1183 | } else { | ||
1184 | /* Routes we intend to cache in nexthop exception have | ||
1185 | * the DST_NOCACHE bit clear. However, if we are | ||
1186 | * unsuccessful at storing this route into the cache | ||
1187 | * we really need to set it. | ||
1188 | */ | ||
1189 | rt->dst.flags |= DST_NOCACHE; | ||
1190 | } | 1191 | } |
1191 | spin_unlock_bh(&fnhe_lock); | 1192 | spin_unlock_bh(&fnhe_lock); |
1192 | 1193 | ||
@@ -1201,8 +1202,6 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) | |||
1201 | if (rt_is_input_route(rt)) { | 1202 | if (rt_is_input_route(rt)) { |
1202 | p = (struct rtable **)&nh->nh_rth_input; | 1203 | p = (struct rtable **)&nh->nh_rth_input; |
1203 | } else { | 1204 | } else { |
1204 | if (!nh->nh_pcpu_rth_output) | ||
1205 | goto nocache; | ||
1206 | p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); | 1205 | p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); |
1207 | } | 1206 | } |
1208 | orig = *p; | 1207 | orig = *p; |
@@ -1211,16 +1210,8 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) | |||
1211 | if (prev == orig) { | 1210 | if (prev == orig) { |
1212 | if (orig) | 1211 | if (orig) |
1213 | rt_free(orig); | 1212 | rt_free(orig); |
1214 | } else { | 1213 | } else |
1215 | /* Routes we intend to cache in the FIB nexthop have | ||
1216 | * the DST_NOCACHE bit clear. However, if we are | ||
1217 | * unsuccessful at storing this route into the cache | ||
1218 | * we really need to set it. | ||
1219 | */ | ||
1220 | nocache: | ||
1221 | rt->dst.flags |= DST_NOCACHE; | ||
1222 | ret = false; | 1214 | ret = false; |
1223 | } | ||
1224 | 1215 | ||
1225 | return ret; | 1216 | return ret; |
1226 | } | 1217 | } |
@@ -1281,8 +1272,10 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, | |||
1281 | if (fi) { | 1272 | if (fi) { |
1282 | struct fib_nh *nh = &FIB_RES_NH(*res); | 1273 | struct fib_nh *nh = &FIB_RES_NH(*res); |
1283 | 1274 | ||
1284 | if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) | 1275 | if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) { |
1285 | rt->rt_gateway = nh->nh_gw; | 1276 | rt->rt_gateway = nh->nh_gw; |
1277 | rt->rt_uses_gateway = 1; | ||
1278 | } | ||
1286 | dst_init_metrics(&rt->dst, fi->fib_metrics, true); | 1279 | dst_init_metrics(&rt->dst, fi->fib_metrics, true); |
1287 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1280 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1288 | rt->dst.tclassid = nh->nh_tclassid; | 1281 | rt->dst.tclassid = nh->nh_tclassid; |
@@ -1291,8 +1284,18 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, | |||
1291 | cached = rt_bind_exception(rt, fnhe, daddr); | 1284 | cached = rt_bind_exception(rt, fnhe, daddr); |
1292 | else if (!(rt->dst.flags & DST_NOCACHE)) | 1285 | else if (!(rt->dst.flags & DST_NOCACHE)) |
1293 | cached = rt_cache_route(nh, rt); | 1286 | cached = rt_cache_route(nh, rt); |
1294 | } | 1287 | if (unlikely(!cached)) { |
1295 | if (unlikely(!cached)) | 1288 | /* Routes we intend to cache in nexthop exception or |
1289 | * FIB nexthop have the DST_NOCACHE bit clear. | ||
1290 | * However, if we are unsuccessful at storing this | ||
1291 | * route into the cache we really need to set it. | ||
1292 | */ | ||
1293 | rt->dst.flags |= DST_NOCACHE; | ||
1294 | if (!rt->rt_gateway) | ||
1295 | rt->rt_gateway = daddr; | ||
1296 | rt_add_uncached_list(rt); | ||
1297 | } | ||
1298 | } else | ||
1296 | rt_add_uncached_list(rt); | 1299 | rt_add_uncached_list(rt); |
1297 | 1300 | ||
1298 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1301 | #ifdef CONFIG_IP_ROUTE_CLASSID |
@@ -1360,6 +1363,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1360 | rth->rt_iif = 0; | 1363 | rth->rt_iif = 0; |
1361 | rth->rt_pmtu = 0; | 1364 | rth->rt_pmtu = 0; |
1362 | rth->rt_gateway = 0; | 1365 | rth->rt_gateway = 0; |
1366 | rth->rt_uses_gateway = 0; | ||
1363 | INIT_LIST_HEAD(&rth->rt_uncached); | 1367 | INIT_LIST_HEAD(&rth->rt_uncached); |
1364 | if (our) { | 1368 | if (our) { |
1365 | rth->dst.input= ip_local_deliver; | 1369 | rth->dst.input= ip_local_deliver; |
@@ -1429,7 +1433,6 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1429 | return -EINVAL; | 1433 | return -EINVAL; |
1430 | } | 1434 | } |
1431 | 1435 | ||
1432 | |||
1433 | err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), | 1436 | err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), |
1434 | in_dev->dev, in_dev, &itag); | 1437 | in_dev->dev, in_dev, &itag); |
1435 | if (err < 0) { | 1438 | if (err < 0) { |
@@ -1439,10 +1442,13 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1439 | goto cleanup; | 1442 | goto cleanup; |
1440 | } | 1443 | } |
1441 | 1444 | ||
1442 | if (out_dev == in_dev && err && | 1445 | do_cache = res->fi && !itag; |
1446 | if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && | ||
1443 | (IN_DEV_SHARED_MEDIA(out_dev) || | 1447 | (IN_DEV_SHARED_MEDIA(out_dev) || |
1444 | inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) | 1448 | inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { |
1445 | flags |= RTCF_DOREDIRECT; | 1449 | flags |= RTCF_DOREDIRECT; |
1450 | do_cache = false; | ||
1451 | } | ||
1446 | 1452 | ||
1447 | if (skb->protocol != htons(ETH_P_IP)) { | 1453 | if (skb->protocol != htons(ETH_P_IP)) { |
1448 | /* Not IP (i.e. ARP). Do not create route, if it is | 1454 | /* Not IP (i.e. ARP). Do not create route, if it is |
@@ -1459,15 +1465,11 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1459 | } | 1465 | } |
1460 | } | 1466 | } |
1461 | 1467 | ||
1462 | do_cache = false; | 1468 | if (do_cache) { |
1463 | if (res->fi) { | 1469 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); |
1464 | if (!itag) { | 1470 | if (rt_cache_valid(rth)) { |
1465 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); | 1471 | skb_dst_set_noref(skb, &rth->dst); |
1466 | if (rt_cache_valid(rth)) { | 1472 | goto out; |
1467 | skb_dst_set_noref(skb, &rth->dst); | ||
1468 | goto out; | ||
1469 | } | ||
1470 | do_cache = true; | ||
1471 | } | 1473 | } |
1472 | } | 1474 | } |
1473 | 1475 | ||
@@ -1486,6 +1488,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1486 | rth->rt_iif = 0; | 1488 | rth->rt_iif = 0; |
1487 | rth->rt_pmtu = 0; | 1489 | rth->rt_pmtu = 0; |
1488 | rth->rt_gateway = 0; | 1490 | rth->rt_gateway = 0; |
1491 | rth->rt_uses_gateway = 0; | ||
1489 | INIT_LIST_HEAD(&rth->rt_uncached); | 1492 | INIT_LIST_HEAD(&rth->rt_uncached); |
1490 | 1493 | ||
1491 | rth->dst.input = ip_forward; | 1494 | rth->dst.input = ip_forward; |
@@ -1656,6 +1659,7 @@ local_input: | |||
1656 | rth->rt_iif = 0; | 1659 | rth->rt_iif = 0; |
1657 | rth->rt_pmtu = 0; | 1660 | rth->rt_pmtu = 0; |
1658 | rth->rt_gateway = 0; | 1661 | rth->rt_gateway = 0; |
1662 | rth->rt_uses_gateway = 0; | ||
1659 | INIT_LIST_HEAD(&rth->rt_uncached); | 1663 | INIT_LIST_HEAD(&rth->rt_uncached); |
1660 | if (res.type == RTN_UNREACHABLE) { | 1664 | if (res.type == RTN_UNREACHABLE) { |
1661 | rth->dst.input= ip_error; | 1665 | rth->dst.input= ip_error; |
@@ -1758,6 +1762,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
1758 | struct in_device *in_dev; | 1762 | struct in_device *in_dev; |
1759 | u16 type = res->type; | 1763 | u16 type = res->type; |
1760 | struct rtable *rth; | 1764 | struct rtable *rth; |
1765 | bool do_cache; | ||
1761 | 1766 | ||
1762 | in_dev = __in_dev_get_rcu(dev_out); | 1767 | in_dev = __in_dev_get_rcu(dev_out); |
1763 | if (!in_dev) | 1768 | if (!in_dev) |
@@ -1794,24 +1799,36 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
1794 | } | 1799 | } |
1795 | 1800 | ||
1796 | fnhe = NULL; | 1801 | fnhe = NULL; |
1802 | do_cache = fi != NULL; | ||
1797 | if (fi) { | 1803 | if (fi) { |
1798 | struct rtable __rcu **prth; | 1804 | struct rtable __rcu **prth; |
1805 | struct fib_nh *nh = &FIB_RES_NH(*res); | ||
1799 | 1806 | ||
1800 | fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr); | 1807 | fnhe = find_exception(nh, fl4->daddr); |
1801 | if (fnhe) | 1808 | if (fnhe) |
1802 | prth = &fnhe->fnhe_rth; | 1809 | prth = &fnhe->fnhe_rth; |
1803 | else | 1810 | else { |
1804 | prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output); | 1811 | if (unlikely(fl4->flowi4_flags & |
1812 | FLOWI_FLAG_KNOWN_NH && | ||
1813 | !(nh->nh_gw && | ||
1814 | nh->nh_scope == RT_SCOPE_LINK))) { | ||
1815 | do_cache = false; | ||
1816 | goto add; | ||
1817 | } | ||
1818 | prth = __this_cpu_ptr(nh->nh_pcpu_rth_output); | ||
1819 | } | ||
1805 | rth = rcu_dereference(*prth); | 1820 | rth = rcu_dereference(*prth); |
1806 | if (rt_cache_valid(rth)) { | 1821 | if (rt_cache_valid(rth)) { |
1807 | dst_hold(&rth->dst); | 1822 | dst_hold(&rth->dst); |
1808 | return rth; | 1823 | return rth; |
1809 | } | 1824 | } |
1810 | } | 1825 | } |
1826 | |||
1827 | add: | ||
1811 | rth = rt_dst_alloc(dev_out, | 1828 | rth = rt_dst_alloc(dev_out, |
1812 | IN_DEV_CONF_GET(in_dev, NOPOLICY), | 1829 | IN_DEV_CONF_GET(in_dev, NOPOLICY), |
1813 | IN_DEV_CONF_GET(in_dev, NOXFRM), | 1830 | IN_DEV_CONF_GET(in_dev, NOXFRM), |
1814 | fi); | 1831 | do_cache); |
1815 | if (!rth) | 1832 | if (!rth) |
1816 | return ERR_PTR(-ENOBUFS); | 1833 | return ERR_PTR(-ENOBUFS); |
1817 | 1834 | ||
@@ -1824,6 +1841,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
1824 | rth->rt_iif = orig_oif ? : 0; | 1841 | rth->rt_iif = orig_oif ? : 0; |
1825 | rth->rt_pmtu = 0; | 1842 | rth->rt_pmtu = 0; |
1826 | rth->rt_gateway = 0; | 1843 | rth->rt_gateway = 0; |
1844 | rth->rt_uses_gateway = 0; | ||
1827 | INIT_LIST_HEAD(&rth->rt_uncached); | 1845 | INIT_LIST_HEAD(&rth->rt_uncached); |
1828 | 1846 | ||
1829 | RT_CACHE_STAT_INC(out_slow_tot); | 1847 | RT_CACHE_STAT_INC(out_slow_tot); |
@@ -2102,6 +2120,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
2102 | rt->rt_flags = ort->rt_flags; | 2120 | rt->rt_flags = ort->rt_flags; |
2103 | rt->rt_type = ort->rt_type; | 2121 | rt->rt_type = ort->rt_type; |
2104 | rt->rt_gateway = ort->rt_gateway; | 2122 | rt->rt_gateway = ort->rt_gateway; |
2123 | rt->rt_uses_gateway = ort->rt_uses_gateway; | ||
2105 | 2124 | ||
2106 | INIT_LIST_HEAD(&rt->rt_uncached); | 2125 | INIT_LIST_HEAD(&rt->rt_uncached); |
2107 | 2126 | ||
@@ -2180,28 +2199,31 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
2180 | if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr)) | 2199 | if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr)) |
2181 | goto nla_put_failure; | 2200 | goto nla_put_failure; |
2182 | } | 2201 | } |
2183 | if (rt->rt_gateway && | 2202 | if (rt->rt_uses_gateway && |
2184 | nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) | 2203 | nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) |
2185 | goto nla_put_failure; | 2204 | goto nla_put_failure; |
2186 | 2205 | ||
2206 | expires = rt->dst.expires; | ||
2207 | if (expires) { | ||
2208 | unsigned long now = jiffies; | ||
2209 | |||
2210 | if (time_before(now, expires)) | ||
2211 | expires -= now; | ||
2212 | else | ||
2213 | expires = 0; | ||
2214 | } | ||
2215 | |||
2187 | memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); | 2216 | memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); |
2188 | if (rt->rt_pmtu) | 2217 | if (rt->rt_pmtu && expires) |
2189 | metrics[RTAX_MTU - 1] = rt->rt_pmtu; | 2218 | metrics[RTAX_MTU - 1] = rt->rt_pmtu; |
2190 | if (rtnetlink_put_metrics(skb, metrics) < 0) | 2219 | if (rtnetlink_put_metrics(skb, metrics) < 0) |
2191 | goto nla_put_failure; | 2220 | goto nla_put_failure; |
2192 | 2221 | ||
2193 | if (fl4->flowi4_mark && | 2222 | if (fl4->flowi4_mark && |
2194 | nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark)) | 2223 | nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) |
2195 | goto nla_put_failure; | 2224 | goto nla_put_failure; |
2196 | 2225 | ||
2197 | error = rt->dst.error; | 2226 | error = rt->dst.error; |
2198 | expires = rt->dst.expires; | ||
2199 | if (expires) { | ||
2200 | if (time_before(jiffies, expires)) | ||
2201 | expires -= jiffies; | ||
2202 | else | ||
2203 | expires = 0; | ||
2204 | } | ||
2205 | 2227 | ||
2206 | if (rt_is_input_route(rt)) { | 2228 | if (rt_is_input_route(rt)) { |
2207 | if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) | 2229 | if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 9205e492dc9d..63d4eccc674d 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -248,6 +248,8 @@ int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer, | |||
248 | ctxt = rcu_dereference(tcp_fastopen_ctx); | 248 | ctxt = rcu_dereference(tcp_fastopen_ctx); |
249 | if (ctxt) | 249 | if (ctxt) |
250 | memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); | 250 | memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); |
251 | else | ||
252 | memset(user_key, 0, sizeof(user_key)); | ||
251 | rcu_read_unlock(); | 253 | rcu_read_unlock(); |
252 | 254 | ||
253 | snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", | 255 | snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 75735c9a6a9d..ef998b008a57 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -708,10 +708,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
708 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 708 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
709 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; | 709 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; |
710 | /* When socket is gone, all binding information is lost. | 710 | /* When socket is gone, all binding information is lost. |
711 | * routing might fail in this case. using iif for oif to | 711 | * routing might fail in this case. No choice here, if we choose to force |
712 | * make sure we can deliver it | 712 | * input interface, we will misroute in case of asymmetric route. |
713 | */ | 713 | */ |
714 | arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb); | 714 | if (sk) |
715 | arg.bound_dev_if = sk->sk_bound_dev_if; | ||
715 | 716 | ||
716 | net = dev_net(skb_dst(skb)->dev); | 717 | net = dev_net(skb_dst(skb)->dev); |
717 | arg.tos = ip_hdr(skb)->tos; | 718 | arg.tos = ip_hdr(skb)->tos; |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 681ea2f413e2..05c5ab8d983c 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -91,6 +91,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
91 | RTCF_LOCAL); | 91 | RTCF_LOCAL); |
92 | xdst->u.rt.rt_type = rt->rt_type; | 92 | xdst->u.rt.rt_type = rt->rt_type; |
93 | xdst->u.rt.rt_gateway = rt->rt_gateway; | 93 | xdst->u.rt.rt_gateway = rt->rt_gateway; |
94 | xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; | ||
94 | xdst->u.rt.rt_pmtu = rt->rt_pmtu; | 95 | xdst->u.rt.rt_pmtu = rt->rt_pmtu; |
95 | INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); | 96 | INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); |
96 | 97 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 480e68422efb..d7c56f8a5b4e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1769,14 +1769,6 @@ static void sit_route_add(struct net_device *dev) | |||
1769 | } | 1769 | } |
1770 | #endif | 1770 | #endif |
1771 | 1771 | ||
1772 | static void addrconf_add_lroute(struct net_device *dev) | ||
1773 | { | ||
1774 | struct in6_addr addr; | ||
1775 | |||
1776 | ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); | ||
1777 | addrconf_prefix_route(&addr, 64, dev, 0, 0); | ||
1778 | } | ||
1779 | |||
1780 | static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | 1772 | static struct inet6_dev *addrconf_add_dev(struct net_device *dev) |
1781 | { | 1773 | { |
1782 | struct inet6_dev *idev; | 1774 | struct inet6_dev *idev; |
@@ -1794,8 +1786,6 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | |||
1794 | if (!(dev->flags & IFF_LOOPBACK)) | 1786 | if (!(dev->flags & IFF_LOOPBACK)) |
1795 | addrconf_add_mroute(dev); | 1787 | addrconf_add_mroute(dev); |
1796 | 1788 | ||
1797 | /* Add link local route */ | ||
1798 | addrconf_add_lroute(dev); | ||
1799 | return idev; | 1789 | return idev; |
1800 | } | 1790 | } |
1801 | 1791 | ||
@@ -2474,10 +2464,9 @@ static void addrconf_sit_config(struct net_device *dev) | |||
2474 | 2464 | ||
2475 | sit_add_v4_addrs(idev); | 2465 | sit_add_v4_addrs(idev); |
2476 | 2466 | ||
2477 | if (dev->flags&IFF_POINTOPOINT) { | 2467 | if (dev->flags&IFF_POINTOPOINT) |
2478 | addrconf_add_mroute(dev); | 2468 | addrconf_add_mroute(dev); |
2479 | addrconf_add_lroute(dev); | 2469 | else |
2480 | } else | ||
2481 | sit_route_add(dev); | 2470 | sit_route_add(dev); |
2482 | } | 2471 | } |
2483 | #endif | 2472 | #endif |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 4be23da32b89..ff76eecfd622 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -79,7 +79,7 @@ struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) | |||
79 | 79 | ||
80 | #define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL | 80 | #define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL |
81 | 81 | ||
82 | static const __net_initdata struct ip6addrlbl_init_table | 82 | static const __net_initconst struct ip6addrlbl_init_table |
83 | { | 83 | { |
84 | const struct in6_addr *prefix; | 84 | const struct in6_addr *prefix; |
85 | int prefixlen; | 85 | int prefixlen; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e22e6d88bac6..a974247a9ae4 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -822,13 +822,6 @@ out: | |||
822 | return segs; | 822 | return segs; |
823 | } | 823 | } |
824 | 824 | ||
825 | struct ipv6_gro_cb { | ||
826 | struct napi_gro_cb napi; | ||
827 | int proto; | ||
828 | }; | ||
829 | |||
830 | #define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb) | ||
831 | |||
832 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | 825 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, |
833 | struct sk_buff *skb) | 826 | struct sk_buff *skb) |
834 | { | 827 | { |
@@ -874,28 +867,31 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
874 | iph = ipv6_hdr(skb); | 867 | iph = ipv6_hdr(skb); |
875 | } | 868 | } |
876 | 869 | ||
877 | IPV6_GRO_CB(skb)->proto = proto; | 870 | NAPI_GRO_CB(skb)->proto = proto; |
878 | 871 | ||
879 | flush--; | 872 | flush--; |
880 | nlen = skb_network_header_len(skb); | 873 | nlen = skb_network_header_len(skb); |
881 | 874 | ||
882 | for (p = *head; p; p = p->next) { | 875 | for (p = *head; p; p = p->next) { |
883 | struct ipv6hdr *iph2; | 876 | const struct ipv6hdr *iph2; |
877 | __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ | ||
884 | 878 | ||
885 | if (!NAPI_GRO_CB(p)->same_flow) | 879 | if (!NAPI_GRO_CB(p)->same_flow) |
886 | continue; | 880 | continue; |
887 | 881 | ||
888 | iph2 = ipv6_hdr(p); | 882 | iph2 = ipv6_hdr(p); |
883 | first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ; | ||
889 | 884 | ||
890 | /* All fields must match except length. */ | 885 | /* All fields must match except length and Traffic Class. */ |
891 | if (nlen != skb_network_header_len(p) || | 886 | if (nlen != skb_network_header_len(p) || |
892 | memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) || | 887 | (first_word & htonl(0xF00FFFFF)) || |
893 | memcmp(&iph->nexthdr, &iph2->nexthdr, | 888 | memcmp(&iph->nexthdr, &iph2->nexthdr, |
894 | nlen - offsetof(struct ipv6hdr, nexthdr))) { | 889 | nlen - offsetof(struct ipv6hdr, nexthdr))) { |
895 | NAPI_GRO_CB(p)->same_flow = 0; | 890 | NAPI_GRO_CB(p)->same_flow = 0; |
896 | continue; | 891 | continue; |
897 | } | 892 | } |
898 | 893 | /* flush if Traffic Class fields are different */ | |
894 | NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); | ||
899 | NAPI_GRO_CB(p)->flush |= flush; | 895 | NAPI_GRO_CB(p)->flush |= flush; |
900 | } | 896 | } |
901 | 897 | ||
@@ -927,7 +923,7 @@ static int ipv6_gro_complete(struct sk_buff *skb) | |||
927 | sizeof(*iph)); | 923 | sizeof(*iph)); |
928 | 924 | ||
929 | rcu_read_lock(); | 925 | rcu_read_lock(); |
930 | ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]); | 926 | ops = rcu_dereference(inet6_protos[NAPI_GRO_CB(skb)->proto]); |
931 | if (WARN_ON(!ops || !ops->gro_complete)) | 927 | if (WARN_ON(!ops || !ops->gro_complete)) |
932 | goto out_unlock; | 928 | goto out_unlock; |
933 | 929 | ||
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 0ff1cfd55bc4..d9fb9110f607 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -238,7 +238,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) | |||
238 | + nla_total_size(16); /* src */ | 238 | + nla_total_size(16); /* src */ |
239 | } | 239 | } |
240 | 240 | ||
241 | static const struct fib_rules_ops __net_initdata fib6_rules_ops_template = { | 241 | static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = { |
242 | .family = AF_INET6, | 242 | .family = AF_INET6, |
243 | .rule_size = sizeof(struct fib6_rule), | 243 | .rule_size = sizeof(struct fib6_rule), |
244 | .addr_size = sizeof(struct in6_addr), | 244 | .addr_size = sizeof(struct in6_addr), |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 08ea3f0b6e55..f7c7c6319720 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -205,7 +205,7 @@ static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
205 | return 0; | 205 | return 0; |
206 | } | 206 | } |
207 | 207 | ||
208 | static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = { | 208 | static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = { |
209 | .family = RTNL_FAMILY_IP6MR, | 209 | .family = RTNL_FAMILY_IP6MR, |
210 | .rule_size = sizeof(struct ip6mr_rule), | 210 | .rule_size = sizeof(struct ip6mr_rule), |
211 | .addr_size = sizeof(struct in6_addr), | 211 | .addr_size = sizeof(struct in6_addr), |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index d1ddbc6ddac5..7c7e963260e1 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1593,17 +1593,18 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) | |||
1593 | struct fib6_table *table; | 1593 | struct fib6_table *table; |
1594 | struct net *net = dev_net(rt->dst.dev); | 1594 | struct net *net = dev_net(rt->dst.dev); |
1595 | 1595 | ||
1596 | if (rt == net->ipv6.ip6_null_entry) | 1596 | if (rt == net->ipv6.ip6_null_entry) { |
1597 | return -ENOENT; | 1597 | err = -ENOENT; |
1598 | goto out; | ||
1599 | } | ||
1598 | 1600 | ||
1599 | table = rt->rt6i_table; | 1601 | table = rt->rt6i_table; |
1600 | write_lock_bh(&table->tb6_lock); | 1602 | write_lock_bh(&table->tb6_lock); |
1601 | |||
1602 | err = fib6_del(rt, info); | 1603 | err = fib6_del(rt, info); |
1603 | dst_release(&rt->dst); | ||
1604 | |||
1605 | write_unlock_bh(&table->tb6_lock); | 1604 | write_unlock_bh(&table->tb6_lock); |
1606 | 1605 | ||
1606 | out: | ||
1607 | dst_release(&rt->dst); | ||
1607 | return err; | 1608 | return err; |
1608 | } | 1609 | } |
1609 | 1610 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 49c890386ce9..26175bffbaa0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -877,7 +877,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
877 | __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); | 877 | __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); |
878 | 878 | ||
879 | fl6.flowi6_proto = IPPROTO_TCP; | 879 | fl6.flowi6_proto = IPPROTO_TCP; |
880 | fl6.flowi6_oif = inet6_iif(skb); | 880 | if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) |
881 | fl6.flowi6_oif = inet6_iif(skb); | ||
881 | fl6.fl6_dport = t1->dest; | 882 | fl6.fl6_dport = t1->dest; |
882 | fl6.fl6_sport = t1->source; | 883 | fl6.fl6_sport = t1->source; |
883 | security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); | 884 | security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index bb738c9f9146..b833677d83d6 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -468,7 +468,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) | |||
468 | notify_t notify; | 468 | notify_t notify; |
469 | 469 | ||
470 | if (self->tsap) { | 470 | if (self->tsap) { |
471 | IRDA_WARNING("%s: busy!\n", __func__); | 471 | IRDA_DEBUG(0, "%s: busy!\n", __func__); |
472 | return -EBUSY; | 472 | return -EBUSY; |
473 | } | 473 | } |
474 | 474 | ||
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 5c93f2952b08..1002e3396f72 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
@@ -440,7 +440,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) | |||
440 | */ | 440 | */ |
441 | lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); | 441 | lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); |
442 | if (lsap == NULL) { | 442 | if (lsap == NULL) { |
443 | IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__); | 443 | IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__); |
444 | return NULL; | 444 | return NULL; |
445 | } | 445 | } |
446 | 446 | ||
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index accfa00ffcdf..a16b7b4b1e02 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c | |||
@@ -56,7 +56,6 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) | |||
56 | u64 tsfdelta; | 56 | u64 tsfdelta; |
57 | 57 | ||
58 | spin_lock_bh(&ifmsh->sync_offset_lock); | 58 | spin_lock_bh(&ifmsh->sync_offset_lock); |
59 | |||
60 | if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { | 59 | if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { |
61 | msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n", | 60 | msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n", |
62 | (long long) ifmsh->sync_offset_clockdrift_max); | 61 | (long long) ifmsh->sync_offset_clockdrift_max); |
@@ -69,11 +68,11 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) | |||
69 | tsfdelta = -beacon_int_fraction; | 68 | tsfdelta = -beacon_int_fraction; |
70 | ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; | 69 | ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; |
71 | } | 70 | } |
71 | spin_unlock_bh(&ifmsh->sync_offset_lock); | ||
72 | 72 | ||
73 | tsf = drv_get_tsf(local, sdata); | 73 | tsf = drv_get_tsf(local, sdata); |
74 | if (tsf != -1ULL) | 74 | if (tsf != -1ULL) |
75 | drv_set_tsf(local, sdata, tsf + tsfdelta); | 75 | drv_set_tsf(local, sdata, tsf + tsfdelta); |
76 | spin_unlock_bh(&ifmsh->sync_offset_lock); | ||
77 | } | 76 | } |
78 | 77 | ||
79 | static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | 78 | static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 2ce89732d0f2..3af0cc4130f1 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -34,7 +34,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, | |||
34 | skb_queue_len(&local->skb_queue_unreliable); | 34 | skb_queue_len(&local->skb_queue_unreliable); |
35 | while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && | 35 | while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && |
36 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { | 36 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { |
37 | dev_kfree_skb_irq(skb); | 37 | ieee80211_free_txskb(hw, skb); |
38 | tmp--; | 38 | tmp--; |
39 | I802_DEBUG_INC(local->tx_status_drop); | 39 | I802_DEBUG_INC(local->tx_status_drop); |
40 | } | 40 | } |
@@ -159,7 +159,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
159 | "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", | 159 | "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", |
160 | skb_queue_len(&sta->tx_filtered[ac]), | 160 | skb_queue_len(&sta->tx_filtered[ac]), |
161 | !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies); | 161 | !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies); |
162 | dev_kfree_skb(skb); | 162 | ieee80211_free_txskb(&local->hw, skb); |
163 | } | 163 | } |
164 | 164 | ||
165 | static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid) | 165 | static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid) |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e0e0d1d0e830..c9bf83f36657 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -354,7 +354,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
354 | total += skb_queue_len(&sta->ps_tx_buf[ac]); | 354 | total += skb_queue_len(&sta->ps_tx_buf[ac]); |
355 | if (skb) { | 355 | if (skb) { |
356 | purged++; | 356 | purged++; |
357 | dev_kfree_skb(skb); | 357 | ieee80211_free_txskb(&local->hw, skb); |
358 | break; | 358 | break; |
359 | } | 359 | } |
360 | } | 360 | } |
@@ -466,7 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
466 | ps_dbg(tx->sdata, | 466 | ps_dbg(tx->sdata, |
467 | "STA %pM TX buffer for AC %d full - dropping oldest frame\n", | 467 | "STA %pM TX buffer for AC %d full - dropping oldest frame\n", |
468 | sta->sta.addr, ac); | 468 | sta->sta.addr, ac); |
469 | dev_kfree_skb(old); | 469 | ieee80211_free_txskb(&local->hw, old); |
470 | } else | 470 | } else |
471 | tx->local->total_ps_buffered++; | 471 | tx->local->total_ps_buffered++; |
472 | 472 | ||
@@ -1103,7 +1103,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, | |||
1103 | spin_unlock(&tx->sta->lock); | 1103 | spin_unlock(&tx->sta->lock); |
1104 | 1104 | ||
1105 | if (purge_skb) | 1105 | if (purge_skb) |
1106 | dev_kfree_skb(purge_skb); | 1106 | ieee80211_free_txskb(&tx->local->hw, purge_skb); |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | /* reset session timer */ | 1109 | /* reset session timer */ |
@@ -1214,7 +1214,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, | |||
1214 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1214 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1215 | if (WARN_ON_ONCE(q >= local->hw.queues)) { | 1215 | if (WARN_ON_ONCE(q >= local->hw.queues)) { |
1216 | __skb_unlink(skb, skbs); | 1216 | __skb_unlink(skb, skbs); |
1217 | dev_kfree_skb(skb); | 1217 | ieee80211_free_txskb(&local->hw, skb); |
1218 | continue; | 1218 | continue; |
1219 | } | 1219 | } |
1220 | #endif | 1220 | #endif |
@@ -1356,7 +1356,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1356 | if (unlikely(res == TX_DROP)) { | 1356 | if (unlikely(res == TX_DROP)) { |
1357 | I802_DEBUG_INC(tx->local->tx_handlers_drop); | 1357 | I802_DEBUG_INC(tx->local->tx_handlers_drop); |
1358 | if (tx->skb) | 1358 | if (tx->skb) |
1359 | dev_kfree_skb(tx->skb); | 1359 | ieee80211_free_txskb(&tx->local->hw, tx->skb); |
1360 | else | 1360 | else |
1361 | __skb_queue_purge(&tx->skbs); | 1361 | __skb_queue_purge(&tx->skbs); |
1362 | return -1; | 1362 | return -1; |
@@ -1393,7 +1393,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, | |||
1393 | res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); | 1393 | res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); |
1394 | 1394 | ||
1395 | if (unlikely(res_prepare == TX_DROP)) { | 1395 | if (unlikely(res_prepare == TX_DROP)) { |
1396 | dev_kfree_skb(skb); | 1396 | ieee80211_free_txskb(&local->hw, skb); |
1397 | goto out; | 1397 | goto out; |
1398 | } else if (unlikely(res_prepare == TX_QUEUED)) { | 1398 | } else if (unlikely(res_prepare == TX_QUEUED)) { |
1399 | goto out; | 1399 | goto out; |
@@ -1465,7 +1465,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
1465 | headroom = max_t(int, 0, headroom); | 1465 | headroom = max_t(int, 0, headroom); |
1466 | 1466 | ||
1467 | if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { | 1467 | if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { |
1468 | dev_kfree_skb(skb); | 1468 | ieee80211_free_txskb(&local->hw, skb); |
1469 | rcu_read_unlock(); | 1469 | rcu_read_unlock(); |
1470 | return; | 1470 | return; |
1471 | } | 1471 | } |
@@ -2050,8 +2050,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
2050 | head_need += IEEE80211_ENCRYPT_HEADROOM; | 2050 | head_need += IEEE80211_ENCRYPT_HEADROOM; |
2051 | head_need += local->tx_headroom; | 2051 | head_need += local->tx_headroom; |
2052 | head_need = max_t(int, 0, head_need); | 2052 | head_need = max_t(int, 0, head_need); |
2053 | if (ieee80211_skb_resize(sdata, skb, head_need, true)) | 2053 | if (ieee80211_skb_resize(sdata, skb, head_need, true)) { |
2054 | goto fail; | 2054 | ieee80211_free_txskb(&local->hw, skb); |
2055 | return NETDEV_TX_OK; | ||
2056 | } | ||
2055 | } | 2057 | } |
2056 | 2058 | ||
2057 | if (encaps_data) { | 2059 | if (encaps_data) { |
@@ -2184,7 +2186,7 @@ void ieee80211_tx_pending(unsigned long data) | |||
2184 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2186 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2185 | 2187 | ||
2186 | if (WARN_ON(!info->control.vif)) { | 2188 | if (WARN_ON(!info->control.vif)) { |
2187 | kfree_skb(skb); | 2189 | ieee80211_free_txskb(&local->hw, skb); |
2188 | continue; | 2190 | continue; |
2189 | } | 2191 | } |
2190 | 2192 | ||
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 56f6d5d81a77..cc4c8095681a 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -50,6 +50,7 @@ enum { | |||
50 | * local | 50 | * local |
51 | */ | 51 | */ |
52 | IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */ | 52 | IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */ |
53 | IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */ | ||
53 | }; | 54 | }; |
54 | 55 | ||
55 | /* | 56 | /* |
@@ -113,6 +114,8 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr, | |||
113 | fl4.daddr = daddr; | 114 | fl4.daddr = daddr; |
114 | fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0; | 115 | fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0; |
115 | fl4.flowi4_tos = rtos; | 116 | fl4.flowi4_tos = rtos; |
117 | fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? | ||
118 | FLOWI_FLAG_KNOWN_NH : 0; | ||
116 | 119 | ||
117 | retry: | 120 | retry: |
118 | rt = ip_route_output_key(net, &fl4); | 121 | rt = ip_route_output_key(net, &fl4); |
@@ -1061,7 +1064,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1061 | if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, | 1064 | if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, |
1062 | RT_TOS(iph->tos), | 1065 | RT_TOS(iph->tos), |
1063 | IP_VS_RT_MODE_LOCAL | | 1066 | IP_VS_RT_MODE_LOCAL | |
1064 | IP_VS_RT_MODE_NON_LOCAL, NULL))) | 1067 | IP_VS_RT_MODE_NON_LOCAL | |
1068 | IP_VS_RT_MODE_KNOWN_NH, NULL))) | ||
1065 | goto tx_error_icmp; | 1069 | goto tx_error_icmp; |
1066 | if (rt->rt_flags & RTCF_LOCAL) { | 1070 | if (rt->rt_flags & RTCF_LOCAL) { |
1067 | ip_rt_put(rt); | 1071 | ip_rt_put(rt); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 0f2e3ad69c47..01e944a017a4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -169,6 +169,8 @@ static void netlink_sock_destruct(struct sock *sk) | |||
169 | if (nlk->cb) { | 169 | if (nlk->cb) { |
170 | if (nlk->cb->done) | 170 | if (nlk->cb->done) |
171 | nlk->cb->done(nlk->cb); | 171 | nlk->cb->done(nlk->cb); |
172 | |||
173 | module_put(nlk->cb->module); | ||
172 | netlink_destroy_callback(nlk->cb); | 174 | netlink_destroy_callback(nlk->cb); |
173 | } | 175 | } |
174 | 176 | ||
@@ -1758,6 +1760,7 @@ static int netlink_dump(struct sock *sk) | |||
1758 | nlk->cb = NULL; | 1760 | nlk->cb = NULL; |
1759 | mutex_unlock(nlk->cb_mutex); | 1761 | mutex_unlock(nlk->cb_mutex); |
1760 | 1762 | ||
1763 | module_put(cb->module); | ||
1761 | netlink_consume_callback(cb); | 1764 | netlink_consume_callback(cb); |
1762 | return 0; | 1765 | return 0; |
1763 | 1766 | ||
@@ -1767,9 +1770,9 @@ errout_skb: | |||
1767 | return err; | 1770 | return err; |
1768 | } | 1771 | } |
1769 | 1772 | ||
1770 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | 1773 | int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, |
1771 | const struct nlmsghdr *nlh, | 1774 | const struct nlmsghdr *nlh, |
1772 | struct netlink_dump_control *control) | 1775 | struct netlink_dump_control *control) |
1773 | { | 1776 | { |
1774 | struct netlink_callback *cb; | 1777 | struct netlink_callback *cb; |
1775 | struct sock *sk; | 1778 | struct sock *sk; |
@@ -1784,6 +1787,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1784 | cb->done = control->done; | 1787 | cb->done = control->done; |
1785 | cb->nlh = nlh; | 1788 | cb->nlh = nlh; |
1786 | cb->data = control->data; | 1789 | cb->data = control->data; |
1790 | cb->module = control->module; | ||
1787 | cb->min_dump_alloc = control->min_dump_alloc; | 1791 | cb->min_dump_alloc = control->min_dump_alloc; |
1788 | atomic_inc(&skb->users); | 1792 | atomic_inc(&skb->users); |
1789 | cb->skb = skb; | 1793 | cb->skb = skb; |
@@ -1794,19 +1798,28 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1794 | return -ECONNREFUSED; | 1798 | return -ECONNREFUSED; |
1795 | } | 1799 | } |
1796 | nlk = nlk_sk(sk); | 1800 | nlk = nlk_sk(sk); |
1797 | /* A dump is in progress... */ | 1801 | |
1798 | mutex_lock(nlk->cb_mutex); | 1802 | mutex_lock(nlk->cb_mutex); |
1803 | /* A dump is in progress... */ | ||
1799 | if (nlk->cb) { | 1804 | if (nlk->cb) { |
1800 | mutex_unlock(nlk->cb_mutex); | 1805 | mutex_unlock(nlk->cb_mutex); |
1801 | netlink_destroy_callback(cb); | 1806 | netlink_destroy_callback(cb); |
1802 | sock_put(sk); | 1807 | ret = -EBUSY; |
1803 | return -EBUSY; | 1808 | goto out; |
1804 | } | 1809 | } |
1810 | /* add reference of module which cb->dump belongs to */ | ||
1811 | if (!try_module_get(cb->module)) { | ||
1812 | mutex_unlock(nlk->cb_mutex); | ||
1813 | netlink_destroy_callback(cb); | ||
1814 | ret = -EPROTONOSUPPORT; | ||
1815 | goto out; | ||
1816 | } | ||
1817 | |||
1805 | nlk->cb = cb; | 1818 | nlk->cb = cb; |
1806 | mutex_unlock(nlk->cb_mutex); | 1819 | mutex_unlock(nlk->cb_mutex); |
1807 | 1820 | ||
1808 | ret = netlink_dump(sk); | 1821 | ret = netlink_dump(sk); |
1809 | 1822 | out: | |
1810 | sock_put(sk); | 1823 | sock_put(sk); |
1811 | 1824 | ||
1812 | if (ret) | 1825 | if (ret) |
@@ -1817,7 +1830,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1817 | */ | 1830 | */ |
1818 | return -EINTR; | 1831 | return -EINTR; |
1819 | } | 1832 | } |
1820 | EXPORT_SYMBOL(netlink_dump_start); | 1833 | EXPORT_SYMBOL(__netlink_dump_start); |
1821 | 1834 | ||
1822 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | 1835 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) |
1823 | { | 1836 | { |
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 40f056debf9a..63e4cdc92376 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -497,15 +497,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, | |||
497 | pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); | 497 | pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); |
498 | 498 | ||
499 | if (!addr || len < sizeof(struct sockaddr_nfc) || | 499 | if (!addr || len < sizeof(struct sockaddr_nfc) || |
500 | addr->sa_family != AF_NFC) { | 500 | addr->sa_family != AF_NFC) |
501 | pr_err("Invalid socket\n"); | ||
502 | return -EINVAL; | 501 | return -EINVAL; |
503 | } | ||
504 | 502 | ||
505 | if (addr->service_name_len == 0 && addr->dsap == 0) { | 503 | if (addr->service_name_len == 0 && addr->dsap == 0) |
506 | pr_err("Missing service name or dsap\n"); | ||
507 | return -EINVAL; | 504 | return -EINVAL; |
508 | } | ||
509 | 505 | ||
510 | pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, | 506 | pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, |
511 | addr->target_idx, addr->nfc_protocol); | 507 | addr->target_idx, addr->nfc_protocol); |
diff --git a/net/rds/send.c b/net/rds/send.c index 96531d4033a2..88eace57dd6b 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) | |||
1122 | rds_stats_inc(s_send_pong); | 1122 | rds_stats_inc(s_send_pong); |
1123 | 1123 | ||
1124 | if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | 1124 | if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) |
1125 | rds_send_xmit(conn); | 1125 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
1126 | 1126 | ||
1127 | rds_message_put(rm); | 1127 | rds_message_put(rm); |
1128 | return 0; | 1128 | return 0; |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 011d2384b115..7633a752c65e 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
@@ -26,8 +26,8 @@ | |||
26 | #include "ar-internal.h" | 26 | #include "ar-internal.h" |
27 | 27 | ||
28 | static int rxrpc_vet_description_s(const char *); | 28 | static int rxrpc_vet_description_s(const char *); |
29 | static int rxrpc_instantiate(struct key *, const void *, size_t); | 29 | static int rxrpc_instantiate(struct key *, struct key_preparsed_payload *); |
30 | static int rxrpc_instantiate_s(struct key *, const void *, size_t); | 30 | static int rxrpc_instantiate_s(struct key *, struct key_preparsed_payload *); |
31 | static void rxrpc_destroy(struct key *); | 31 | static void rxrpc_destroy(struct key *); |
32 | static void rxrpc_destroy_s(struct key *); | 32 | static void rxrpc_destroy_s(struct key *); |
33 | static void rxrpc_describe(const struct key *, struct seq_file *); | 33 | static void rxrpc_describe(const struct key *, struct seq_file *); |
@@ -678,7 +678,7 @@ error: | |||
678 | * | 678 | * |
679 | * if no data is provided, then a no-security key is made | 679 | * if no data is provided, then a no-security key is made |
680 | */ | 680 | */ |
681 | static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | 681 | static int rxrpc_instantiate(struct key *key, struct key_preparsed_payload *prep) |
682 | { | 682 | { |
683 | const struct rxrpc_key_data_v1 *v1; | 683 | const struct rxrpc_key_data_v1 *v1; |
684 | struct rxrpc_key_token *token, **pp; | 684 | struct rxrpc_key_token *token, **pp; |
@@ -686,26 +686,26 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | |||
686 | u32 kver; | 686 | u32 kver; |
687 | int ret; | 687 | int ret; |
688 | 688 | ||
689 | _enter("{%x},,%zu", key_serial(key), datalen); | 689 | _enter("{%x},,%zu", key_serial(key), prep->datalen); |
690 | 690 | ||
691 | /* handle a no-security key */ | 691 | /* handle a no-security key */ |
692 | if (!data && datalen == 0) | 692 | if (!prep->data && prep->datalen == 0) |
693 | return 0; | 693 | return 0; |
694 | 694 | ||
695 | /* determine if the XDR payload format is being used */ | 695 | /* determine if the XDR payload format is being used */ |
696 | if (datalen > 7 * 4) { | 696 | if (prep->datalen > 7 * 4) { |
697 | ret = rxrpc_instantiate_xdr(key, data, datalen); | 697 | ret = rxrpc_instantiate_xdr(key, prep->data, prep->datalen); |
698 | if (ret != -EPROTO) | 698 | if (ret != -EPROTO) |
699 | return ret; | 699 | return ret; |
700 | } | 700 | } |
701 | 701 | ||
702 | /* get the key interface version number */ | 702 | /* get the key interface version number */ |
703 | ret = -EINVAL; | 703 | ret = -EINVAL; |
704 | if (datalen <= 4 || !data) | 704 | if (prep->datalen <= 4 || !prep->data) |
705 | goto error; | 705 | goto error; |
706 | memcpy(&kver, data, sizeof(kver)); | 706 | memcpy(&kver, prep->data, sizeof(kver)); |
707 | data += sizeof(kver); | 707 | prep->data += sizeof(kver); |
708 | datalen -= sizeof(kver); | 708 | prep->datalen -= sizeof(kver); |
709 | 709 | ||
710 | _debug("KEY I/F VERSION: %u", kver); | 710 | _debug("KEY I/F VERSION: %u", kver); |
711 | 711 | ||
@@ -715,11 +715,11 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | |||
715 | 715 | ||
716 | /* deal with a version 1 key */ | 716 | /* deal with a version 1 key */ |
717 | ret = -EINVAL; | 717 | ret = -EINVAL; |
718 | if (datalen < sizeof(*v1)) | 718 | if (prep->datalen < sizeof(*v1)) |
719 | goto error; | 719 | goto error; |
720 | 720 | ||
721 | v1 = data; | 721 | v1 = prep->data; |
722 | if (datalen != sizeof(*v1) + v1->ticket_length) | 722 | if (prep->datalen != sizeof(*v1) + v1->ticket_length) |
723 | goto error; | 723 | goto error; |
724 | 724 | ||
725 | _debug("SCIX: %u", v1->security_index); | 725 | _debug("SCIX: %u", v1->security_index); |
@@ -784,17 +784,17 @@ error: | |||
784 | * instantiate a server secret key | 784 | * instantiate a server secret key |
785 | * data should be a pointer to the 8-byte secret key | 785 | * data should be a pointer to the 8-byte secret key |
786 | */ | 786 | */ |
787 | static int rxrpc_instantiate_s(struct key *key, const void *data, | 787 | static int rxrpc_instantiate_s(struct key *key, |
788 | size_t datalen) | 788 | struct key_preparsed_payload *prep) |
789 | { | 789 | { |
790 | struct crypto_blkcipher *ci; | 790 | struct crypto_blkcipher *ci; |
791 | 791 | ||
792 | _enter("{%x},,%zu", key_serial(key), datalen); | 792 | _enter("{%x},,%zu", key_serial(key), prep->datalen); |
793 | 793 | ||
794 | if (datalen != 8) | 794 | if (prep->datalen != 8) |
795 | return -EINVAL; | 795 | return -EINVAL; |
796 | 796 | ||
797 | memcpy(&key->type_data, data, 8); | 797 | memcpy(&key->type_data, prep->data, 8); |
798 | 798 | ||
799 | ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC); | 799 | ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC); |
800 | if (IS_ERR(ci)) { | 800 | if (IS_ERR(ci)) { |
@@ -802,7 +802,7 @@ static int rxrpc_instantiate_s(struct key *key, const void *data, | |||
802 | return PTR_ERR(ci); | 802 | return PTR_ERR(ci); |
803 | } | 803 | } |
804 | 804 | ||
805 | if (crypto_blkcipher_setkey(ci, data, 8) < 0) | 805 | if (crypto_blkcipher_setkey(ci, prep->data, 8) < 0) |
806 | BUG(); | 806 | BUG(); |
807 | 807 | ||
808 | key->payload.data = ci; | 808 | key->payload.data = ci; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 25dfe7380479..8bd3c279427e 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -68,8 +68,8 @@ | |||
68 | static int sctp_rcv_ootb(struct sk_buff *); | 68 | static int sctp_rcv_ootb(struct sk_buff *); |
69 | static struct sctp_association *__sctp_rcv_lookup(struct net *net, | 69 | static struct sctp_association *__sctp_rcv_lookup(struct net *net, |
70 | struct sk_buff *skb, | 70 | struct sk_buff *skb, |
71 | const union sctp_addr *laddr, | ||
72 | const union sctp_addr *paddr, | 71 | const union sctp_addr *paddr, |
72 | const union sctp_addr *laddr, | ||
73 | struct sctp_transport **transportp); | 73 | struct sctp_transport **transportp); |
74 | static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net, | 74 | static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net, |
75 | const union sctp_addr *laddr); | 75 | const union sctp_addr *laddr); |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index d16632e1503a..1b4a7f8ec3fd 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -63,6 +63,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); | |||
63 | static void sctp_check_transmitted(struct sctp_outq *q, | 63 | static void sctp_check_transmitted(struct sctp_outq *q, |
64 | struct list_head *transmitted_queue, | 64 | struct list_head *transmitted_queue, |
65 | struct sctp_transport *transport, | 65 | struct sctp_transport *transport, |
66 | union sctp_addr *saddr, | ||
66 | struct sctp_sackhdr *sack, | 67 | struct sctp_sackhdr *sack, |
67 | __u32 *highest_new_tsn); | 68 | __u32 *highest_new_tsn); |
68 | 69 | ||
@@ -1139,9 +1140,10 @@ static void sctp_sack_update_unack_data(struct sctp_association *assoc, | |||
1139 | * Process the SACK against the outqueue. Mostly, this just frees | 1140 | * Process the SACK against the outqueue. Mostly, this just frees |
1140 | * things off the transmitted queue. | 1141 | * things off the transmitted queue. |
1141 | */ | 1142 | */ |
1142 | int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | 1143 | int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) |
1143 | { | 1144 | { |
1144 | struct sctp_association *asoc = q->asoc; | 1145 | struct sctp_association *asoc = q->asoc; |
1146 | struct sctp_sackhdr *sack = chunk->subh.sack_hdr; | ||
1145 | struct sctp_transport *transport; | 1147 | struct sctp_transport *transport; |
1146 | struct sctp_chunk *tchunk = NULL; | 1148 | struct sctp_chunk *tchunk = NULL; |
1147 | struct list_head *lchunk, *transport_list, *temp; | 1149 | struct list_head *lchunk, *transport_list, *temp; |
@@ -1210,7 +1212,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1210 | /* Run through the retransmit queue. Credit bytes received | 1212 | /* Run through the retransmit queue. Credit bytes received |
1211 | * and free those chunks that we can. | 1213 | * and free those chunks that we can. |
1212 | */ | 1214 | */ |
1213 | sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn); | 1215 | sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn); |
1214 | 1216 | ||
1215 | /* Run through the transmitted queue. | 1217 | /* Run through the transmitted queue. |
1216 | * Credit bytes received and free those chunks which we can. | 1218 | * Credit bytes received and free those chunks which we can. |
@@ -1219,7 +1221,8 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1219 | */ | 1221 | */ |
1220 | list_for_each_entry(transport, transport_list, transports) { | 1222 | list_for_each_entry(transport, transport_list, transports) { |
1221 | sctp_check_transmitted(q, &transport->transmitted, | 1223 | sctp_check_transmitted(q, &transport->transmitted, |
1222 | transport, sack, &highest_new_tsn); | 1224 | transport, &chunk->source, sack, |
1225 | &highest_new_tsn); | ||
1223 | /* | 1226 | /* |
1224 | * SFR-CACC algorithm: | 1227 | * SFR-CACC algorithm: |
1225 | * C) Let count_of_newacks be the number of | 1228 | * C) Let count_of_newacks be the number of |
@@ -1326,6 +1329,7 @@ int sctp_outq_is_empty(const struct sctp_outq *q) | |||
1326 | static void sctp_check_transmitted(struct sctp_outq *q, | 1329 | static void sctp_check_transmitted(struct sctp_outq *q, |
1327 | struct list_head *transmitted_queue, | 1330 | struct list_head *transmitted_queue, |
1328 | struct sctp_transport *transport, | 1331 | struct sctp_transport *transport, |
1332 | union sctp_addr *saddr, | ||
1329 | struct sctp_sackhdr *sack, | 1333 | struct sctp_sackhdr *sack, |
1330 | __u32 *highest_new_tsn_in_sack) | 1334 | __u32 *highest_new_tsn_in_sack) |
1331 | { | 1335 | { |
@@ -1633,8 +1637,9 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1633 | /* Mark the destination transport address as | 1637 | /* Mark the destination transport address as |
1634 | * active if it is not so marked. | 1638 | * active if it is not so marked. |
1635 | */ | 1639 | */ |
1636 | if ((transport->state == SCTP_INACTIVE) || | 1640 | if ((transport->state == SCTP_INACTIVE || |
1637 | (transport->state == SCTP_UNCONFIRMED)) { | 1641 | transport->state == SCTP_UNCONFIRMED) && |
1642 | sctp_cmp_addr_exact(&transport->ipaddr, saddr)) { | ||
1638 | sctp_assoc_control_transport( | 1643 | sctp_assoc_control_transport( |
1639 | transport->asoc, | 1644 | transport->asoc, |
1640 | transport, | 1645 | transport, |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index bcfebb91559d..57f7de839b03 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -752,11 +752,11 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | |||
752 | /* Helper function to process the process SACK command. */ | 752 | /* Helper function to process the process SACK command. */ |
753 | static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, | 753 | static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, |
754 | struct sctp_association *asoc, | 754 | struct sctp_association *asoc, |
755 | struct sctp_sackhdr *sackh) | 755 | struct sctp_chunk *chunk) |
756 | { | 756 | { |
757 | int err = 0; | 757 | int err = 0; |
758 | 758 | ||
759 | if (sctp_outq_sack(&asoc->outqueue, sackh)) { | 759 | if (sctp_outq_sack(&asoc->outqueue, chunk)) { |
760 | struct net *net = sock_net(asoc->base.sk); | 760 | struct net *net = sock_net(asoc->base.sk); |
761 | 761 | ||
762 | /* There are no more TSNs awaiting SACK. */ | 762 | /* There are no more TSNs awaiting SACK. */ |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 094813b6c3c3..b6adef8a1e93 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -3179,7 +3179,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, | |||
3179 | return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); | 3179 | return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); |
3180 | 3180 | ||
3181 | /* Return this SACK for further processing. */ | 3181 | /* Return this SACK for further processing. */ |
3182 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh)); | 3182 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk)); |
3183 | 3183 | ||
3184 | /* Note: We do the rest of the work on the PROCESS_SACK | 3184 | /* Note: We do the rest of the work on the PROCESS_SACK |
3185 | * sideeffect. | 3185 | * sideeffect. |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 34c522021004..909dc0c31aab 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -239,7 +239,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct | |||
239 | } | 239 | } |
240 | return q; | 240 | return q; |
241 | err: | 241 | err: |
242 | dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p)); | 242 | dprintk("RPC: %s returning %ld\n", __func__, -PTR_ERR(p)); |
243 | return p; | 243 | return p; |
244 | } | 244 | } |
245 | 245 | ||
@@ -301,10 +301,10 @@ __gss_find_upcall(struct rpc_pipe *pipe, uid_t uid) | |||
301 | if (pos->uid != uid) | 301 | if (pos->uid != uid) |
302 | continue; | 302 | continue; |
303 | atomic_inc(&pos->count); | 303 | atomic_inc(&pos->count); |
304 | dprintk("RPC: gss_find_upcall found msg %p\n", pos); | 304 | dprintk("RPC: %s found msg %p\n", __func__, pos); |
305 | return pos; | 305 | return pos; |
306 | } | 306 | } |
307 | dprintk("RPC: gss_find_upcall found nothing\n"); | 307 | dprintk("RPC: %s found nothing\n", __func__); |
308 | return NULL; | 308 | return NULL; |
309 | } | 309 | } |
310 | 310 | ||
@@ -507,8 +507,8 @@ gss_refresh_upcall(struct rpc_task *task) | |||
507 | struct rpc_pipe *pipe; | 507 | struct rpc_pipe *pipe; |
508 | int err = 0; | 508 | int err = 0; |
509 | 509 | ||
510 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, | 510 | dprintk("RPC: %5u %s for uid %u\n", |
511 | cred->cr_uid); | 511 | task->tk_pid, __func__, cred->cr_uid); |
512 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); | 512 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); |
513 | if (PTR_ERR(gss_msg) == -EAGAIN) { | 513 | if (PTR_ERR(gss_msg) == -EAGAIN) { |
514 | /* XXX: warning on the first, under the assumption we | 514 | /* XXX: warning on the first, under the assumption we |
@@ -539,8 +539,8 @@ gss_refresh_upcall(struct rpc_task *task) | |||
539 | spin_unlock(&pipe->lock); | 539 | spin_unlock(&pipe->lock); |
540 | gss_release_msg(gss_msg); | 540 | gss_release_msg(gss_msg); |
541 | out: | 541 | out: |
542 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", | 542 | dprintk("RPC: %5u %s for uid %u result %d\n", |
543 | task->tk_pid, cred->cr_uid, err); | 543 | task->tk_pid, __func__, cred->cr_uid, err); |
544 | return err; | 544 | return err; |
545 | } | 545 | } |
546 | 546 | ||
@@ -553,7 +553,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
553 | DEFINE_WAIT(wait); | 553 | DEFINE_WAIT(wait); |
554 | int err = 0; | 554 | int err = 0; |
555 | 555 | ||
556 | dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid); | 556 | dprintk("RPC: %s for uid %u\n", __func__, cred->cr_uid); |
557 | retry: | 557 | retry: |
558 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); | 558 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); |
559 | if (PTR_ERR(gss_msg) == -EAGAIN) { | 559 | if (PTR_ERR(gss_msg) == -EAGAIN) { |
@@ -594,8 +594,8 @@ out_intr: | |||
594 | finish_wait(&gss_msg->waitqueue, &wait); | 594 | finish_wait(&gss_msg->waitqueue, &wait); |
595 | gss_release_msg(gss_msg); | 595 | gss_release_msg(gss_msg); |
596 | out: | 596 | out: |
597 | dprintk("RPC: gss_create_upcall for uid %u result %d\n", | 597 | dprintk("RPC: %s for uid %u result %d\n", |
598 | cred->cr_uid, err); | 598 | __func__, cred->cr_uid, err); |
599 | return err; | 599 | return err; |
600 | } | 600 | } |
601 | 601 | ||
@@ -681,7 +681,7 @@ err_put_ctx: | |||
681 | err: | 681 | err: |
682 | kfree(buf); | 682 | kfree(buf); |
683 | out: | 683 | out: |
684 | dprintk("RPC: gss_pipe_downcall returning %Zd\n", err); | 684 | dprintk("RPC: %s returning %Zd\n", __func__, err); |
685 | return err; | 685 | return err; |
686 | } | 686 | } |
687 | 687 | ||
@@ -747,8 +747,8 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) | |||
747 | struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); | 747 | struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); |
748 | 748 | ||
749 | if (msg->errno < 0) { | 749 | if (msg->errno < 0) { |
750 | dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n", | 750 | dprintk("RPC: %s releasing msg %p\n", |
751 | gss_msg); | 751 | __func__, gss_msg); |
752 | atomic_inc(&gss_msg->count); | 752 | atomic_inc(&gss_msg->count); |
753 | gss_unhash_msg(gss_msg); | 753 | gss_unhash_msg(gss_msg); |
754 | if (msg->errno == -ETIMEDOUT) | 754 | if (msg->errno == -ETIMEDOUT) |
@@ -976,7 +976,7 @@ gss_destroying_context(struct rpc_cred *cred) | |||
976 | static void | 976 | static void |
977 | gss_do_free_ctx(struct gss_cl_ctx *ctx) | 977 | gss_do_free_ctx(struct gss_cl_ctx *ctx) |
978 | { | 978 | { |
979 | dprintk("RPC: gss_free_ctx\n"); | 979 | dprintk("RPC: %s\n", __func__); |
980 | 980 | ||
981 | gss_delete_sec_context(&ctx->gc_gss_ctx); | 981 | gss_delete_sec_context(&ctx->gc_gss_ctx); |
982 | kfree(ctx->gc_wire_ctx.data); | 982 | kfree(ctx->gc_wire_ctx.data); |
@@ -999,7 +999,7 @@ gss_free_ctx(struct gss_cl_ctx *ctx) | |||
999 | static void | 999 | static void |
1000 | gss_free_cred(struct gss_cred *gss_cred) | 1000 | gss_free_cred(struct gss_cred *gss_cred) |
1001 | { | 1001 | { |
1002 | dprintk("RPC: gss_free_cred %p\n", gss_cred); | 1002 | dprintk("RPC: %s cred=%p\n", __func__, gss_cred); |
1003 | kfree(gss_cred); | 1003 | kfree(gss_cred); |
1004 | } | 1004 | } |
1005 | 1005 | ||
@@ -1049,8 +1049,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
1049 | struct gss_cred *cred = NULL; | 1049 | struct gss_cred *cred = NULL; |
1050 | int err = -ENOMEM; | 1050 | int err = -ENOMEM; |
1051 | 1051 | ||
1052 | dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", | 1052 | dprintk("RPC: %s for uid %d, flavor %d\n", |
1053 | acred->uid, auth->au_flavor); | 1053 | __func__, acred->uid, auth->au_flavor); |
1054 | 1054 | ||
1055 | if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) | 1055 | if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) |
1056 | goto out_err; | 1056 | goto out_err; |
@@ -1069,7 +1069,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
1069 | return &cred->gc_base; | 1069 | return &cred->gc_base; |
1070 | 1070 | ||
1071 | out_err: | 1071 | out_err: |
1072 | dprintk("RPC: gss_create_cred failed with error %d\n", err); | 1072 | dprintk("RPC: %s failed with error %d\n", __func__, err); |
1073 | return ERR_PTR(err); | 1073 | return ERR_PTR(err); |
1074 | } | 1074 | } |
1075 | 1075 | ||
@@ -1127,7 +1127,7 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
1127 | struct kvec iov; | 1127 | struct kvec iov; |
1128 | struct xdr_buf verf_buf; | 1128 | struct xdr_buf verf_buf; |
1129 | 1129 | ||
1130 | dprintk("RPC: %5u gss_marshal\n", task->tk_pid); | 1130 | dprintk("RPC: %5u %s\n", task->tk_pid, __func__); |
1131 | 1131 | ||
1132 | *p++ = htonl(RPC_AUTH_GSS); | 1132 | *p++ = htonl(RPC_AUTH_GSS); |
1133 | cred_len = p++; | 1133 | cred_len = p++; |
@@ -1253,7 +1253,7 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
1253 | u32 flav,len; | 1253 | u32 flav,len; |
1254 | u32 maj_stat; | 1254 | u32 maj_stat; |
1255 | 1255 | ||
1256 | dprintk("RPC: %5u gss_validate\n", task->tk_pid); | 1256 | dprintk("RPC: %5u %s\n", task->tk_pid, __func__); |
1257 | 1257 | ||
1258 | flav = ntohl(*p++); | 1258 | flav = ntohl(*p++); |
1259 | if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) | 1259 | if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) |
@@ -1271,20 +1271,20 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
1271 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1271 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
1272 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); | 1272 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
1273 | if (maj_stat) { | 1273 | if (maj_stat) { |
1274 | dprintk("RPC: %5u gss_validate: gss_verify_mic returned " | 1274 | dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n", |
1275 | "error 0x%08x\n", task->tk_pid, maj_stat); | 1275 | task->tk_pid, __func__, maj_stat); |
1276 | goto out_bad; | 1276 | goto out_bad; |
1277 | } | 1277 | } |
1278 | /* We leave it to unwrap to calculate au_rslack. For now we just | 1278 | /* We leave it to unwrap to calculate au_rslack. For now we just |
1279 | * calculate the length of the verifier: */ | 1279 | * calculate the length of the verifier: */ |
1280 | cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; | 1280 | cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; |
1281 | gss_put_ctx(ctx); | 1281 | gss_put_ctx(ctx); |
1282 | dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", | 1282 | dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", |
1283 | task->tk_pid); | 1283 | task->tk_pid, __func__); |
1284 | return p + XDR_QUADLEN(len); | 1284 | return p + XDR_QUADLEN(len); |
1285 | out_bad: | 1285 | out_bad: |
1286 | gss_put_ctx(ctx); | 1286 | gss_put_ctx(ctx); |
1287 | dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid); | 1287 | dprintk("RPC: %5u %s failed.\n", task->tk_pid, __func__); |
1288 | return NULL; | 1288 | return NULL; |
1289 | } | 1289 | } |
1290 | 1290 | ||
@@ -1466,7 +1466,7 @@ gss_wrap_req(struct rpc_task *task, | |||
1466 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1466 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
1467 | int status = -EIO; | 1467 | int status = -EIO; |
1468 | 1468 | ||
1469 | dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid); | 1469 | dprintk("RPC: %5u %s\n", task->tk_pid, __func__); |
1470 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) { | 1470 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) { |
1471 | /* The spec seems a little ambiguous here, but I think that not | 1471 | /* The spec seems a little ambiguous here, but I think that not |
1472 | * wrapping context destruction requests makes the most sense. | 1472 | * wrapping context destruction requests makes the most sense. |
@@ -1489,7 +1489,7 @@ gss_wrap_req(struct rpc_task *task, | |||
1489 | } | 1489 | } |
1490 | out: | 1490 | out: |
1491 | gss_put_ctx(ctx); | 1491 | gss_put_ctx(ctx); |
1492 | dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status); | 1492 | dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status); |
1493 | return status; | 1493 | return status; |
1494 | } | 1494 | } |
1495 | 1495 | ||
@@ -1604,8 +1604,8 @@ out_decode: | |||
1604 | status = gss_unwrap_req_decode(decode, rqstp, p, obj); | 1604 | status = gss_unwrap_req_decode(decode, rqstp, p, obj); |
1605 | out: | 1605 | out: |
1606 | gss_put_ctx(ctx); | 1606 | gss_put_ctx(ctx); |
1607 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, | 1607 | dprintk("RPC: %5u %s returning %d\n", |
1608 | status); | 1608 | task->tk_pid, __func__, status); |
1609 | return status; | 1609 | return status; |
1610 | } | 1610 | } |
1611 | 1611 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index fa48c60aef23..cdc7564b4512 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -490,61 +490,86 @@ EXPORT_SYMBOL_GPL(rpc_create); | |||
490 | * same transport while varying parameters such as the authentication | 490 | * same transport while varying parameters such as the authentication |
491 | * flavour. | 491 | * flavour. |
492 | */ | 492 | */ |
493 | struct rpc_clnt * | 493 | static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, |
494 | rpc_clone_client(struct rpc_clnt *clnt) | 494 | struct rpc_clnt *clnt) |
495 | { | 495 | { |
496 | struct rpc_clnt *new; | ||
497 | struct rpc_xprt *xprt; | 496 | struct rpc_xprt *xprt; |
498 | int err = -ENOMEM; | 497 | struct rpc_clnt *new; |
498 | int err; | ||
499 | 499 | ||
500 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); | 500 | err = -ENOMEM; |
501 | if (!new) | ||
502 | goto out_no_clnt; | ||
503 | new->cl_parent = clnt; | ||
504 | /* Turn off autobind on clones */ | ||
505 | new->cl_autobind = 0; | ||
506 | INIT_LIST_HEAD(&new->cl_tasks); | ||
507 | spin_lock_init(&new->cl_lock); | ||
508 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval); | ||
509 | new->cl_metrics = rpc_alloc_iostats(clnt); | ||
510 | if (new->cl_metrics == NULL) | ||
511 | goto out_no_stats; | ||
512 | if (clnt->cl_principal) { | ||
513 | new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); | ||
514 | if (new->cl_principal == NULL) | ||
515 | goto out_no_principal; | ||
516 | } | ||
517 | rcu_read_lock(); | 501 | rcu_read_lock(); |
518 | xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); | 502 | xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); |
519 | rcu_read_unlock(); | 503 | rcu_read_unlock(); |
520 | if (xprt == NULL) | 504 | if (xprt == NULL) |
521 | goto out_no_transport; | 505 | goto out_err; |
522 | rcu_assign_pointer(new->cl_xprt, xprt); | 506 | args->servername = xprt->servername; |
523 | atomic_set(&new->cl_count, 1); | 507 | |
524 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); | 508 | new = rpc_new_client(args, xprt); |
525 | if (err != 0) | 509 | if (IS_ERR(new)) { |
526 | goto out_no_path; | 510 | err = PTR_ERR(new); |
527 | rpc_clnt_set_nodename(new, utsname()->nodename); | 511 | goto out_put; |
528 | if (new->cl_auth) | 512 | } |
529 | atomic_inc(&new->cl_auth->au_count); | 513 | |
530 | atomic_inc(&clnt->cl_count); | 514 | atomic_inc(&clnt->cl_count); |
531 | rpc_register_client(new); | 515 | new->cl_parent = clnt; |
532 | rpciod_up(); | 516 | |
517 | /* Turn off autobind on clones */ | ||
518 | new->cl_autobind = 0; | ||
519 | new->cl_softrtry = clnt->cl_softrtry; | ||
520 | new->cl_discrtry = clnt->cl_discrtry; | ||
521 | new->cl_chatty = clnt->cl_chatty; | ||
533 | return new; | 522 | return new; |
534 | out_no_path: | 523 | |
524 | out_put: | ||
535 | xprt_put(xprt); | 525 | xprt_put(xprt); |
536 | out_no_transport: | 526 | out_err: |
537 | kfree(new->cl_principal); | ||
538 | out_no_principal: | ||
539 | rpc_free_iostats(new->cl_metrics); | ||
540 | out_no_stats: | ||
541 | kfree(new); | ||
542 | out_no_clnt: | ||
543 | dprintk("RPC: %s: returned error %d\n", __func__, err); | 527 | dprintk("RPC: %s: returned error %d\n", __func__, err); |
544 | return ERR_PTR(err); | 528 | return ERR_PTR(err); |
545 | } | 529 | } |
530 | |||
531 | /** | ||
532 | * rpc_clone_client - Clone an RPC client structure | ||
533 | * | ||
534 | * @clnt: RPC client whose parameters are copied | ||
535 | * | ||
536 | * Returns a fresh RPC client or an ERR_PTR. | ||
537 | */ | ||
538 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) | ||
539 | { | ||
540 | struct rpc_create_args args = { | ||
541 | .program = clnt->cl_program, | ||
542 | .prognumber = clnt->cl_prog, | ||
543 | .version = clnt->cl_vers, | ||
544 | .authflavor = clnt->cl_auth->au_flavor, | ||
545 | .client_name = clnt->cl_principal, | ||
546 | }; | ||
547 | return __rpc_clone_client(&args, clnt); | ||
548 | } | ||
546 | EXPORT_SYMBOL_GPL(rpc_clone_client); | 549 | EXPORT_SYMBOL_GPL(rpc_clone_client); |
547 | 550 | ||
551 | /** | ||
552 | * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth | ||
553 | * | ||
554 | * @clnt: RPC client whose parameters are copied | ||
555 | * @auth: security flavor for new client | ||
556 | * | ||
557 | * Returns a fresh RPC client or an ERR_PTR. | ||
558 | */ | ||
559 | struct rpc_clnt * | ||
560 | rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | ||
561 | { | ||
562 | struct rpc_create_args args = { | ||
563 | .program = clnt->cl_program, | ||
564 | .prognumber = clnt->cl_prog, | ||
565 | .version = clnt->cl_vers, | ||
566 | .authflavor = flavor, | ||
567 | .client_name = clnt->cl_principal, | ||
568 | }; | ||
569 | return __rpc_clone_client(&args, clnt); | ||
570 | } | ||
571 | EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); | ||
572 | |||
548 | /* | 573 | /* |
549 | * Kill all tasks for the given client. | 574 | * Kill all tasks for the given client. |
550 | * XXX: kill their descendants as well? | 575 | * XXX: kill their descendants as well? |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 21fde99e5c56..80f5dd23417d 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -1119,8 +1119,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) | |||
1119 | return -ENOMEM; | 1119 | return -ENOMEM; |
1120 | if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) | 1120 | if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) |
1121 | return -ENOMEM; | 1121 | return -ENOMEM; |
1122 | dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, | 1122 | dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", |
1123 | NET_NAME(net)); | 1123 | net, NET_NAME(net)); |
1124 | sn->pipefs_sb = sb; | 1124 | sn->pipefs_sb = sb; |
1125 | err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, | 1125 | err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, |
1126 | RPC_PIPEFS_MOUNT, | 1126 | RPC_PIPEFS_MOUNT, |
@@ -1155,8 +1155,8 @@ static void rpc_kill_sb(struct super_block *sb) | |||
1155 | sn->pipefs_sb = NULL; | 1155 | sn->pipefs_sb = NULL; |
1156 | mutex_unlock(&sn->pipefs_sb_lock); | 1156 | mutex_unlock(&sn->pipefs_sb_lock); |
1157 | put_net(net); | 1157 | put_net(net); |
1158 | dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net, | 1158 | dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", |
1159 | NET_NAME(net)); | 1159 | net, NET_NAME(net)); |
1160 | blocking_notifier_call_chain(&rpc_pipefs_notifier_list, | 1160 | blocking_notifier_call_chain(&rpc_pipefs_notifier_list, |
1161 | RPC_PIPEFS_UMOUNT, | 1161 | RPC_PIPEFS_UMOUNT, |
1162 | sb); | 1162 | sb); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 128494ec9a64..6357fcb00c7e 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -1022,7 +1022,7 @@ static int rpciod_start(void) | |||
1022 | * Create the rpciod thread and wait for it to start. | 1022 | * Create the rpciod thread and wait for it to start. |
1023 | */ | 1023 | */ |
1024 | dprintk("RPC: creating workqueue rpciod\n"); | 1024 | dprintk("RPC: creating workqueue rpciod\n"); |
1025 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0); | 1025 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1); |
1026 | rpciod_workqueue = wq; | 1026 | rpciod_workqueue = wq; |
1027 | return rpciod_workqueue != NULL; | 1027 | return rpciod_workqueue != NULL; |
1028 | } | 1028 | } |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index bac973a31367..194d865fae72 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -208,6 +208,35 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, | |||
208 | return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); | 208 | return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); |
209 | } | 209 | } |
210 | 210 | ||
211 | /* | ||
212 | * svc_xprt_received conditionally queues the transport for processing | ||
213 | * by another thread. The caller must hold the XPT_BUSY bit and must | ||
214 | * not thereafter touch transport data. | ||
215 | * | ||
216 | * Note: XPT_DATA only gets cleared when a read-attempt finds no (or | ||
217 | * insufficient) data. | ||
218 | */ | ||
219 | static void svc_xprt_received(struct svc_xprt *xprt) | ||
220 | { | ||
221 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | ||
222 | /* As soon as we clear busy, the xprt could be closed and | ||
223 | * 'put', so we need a reference to call svc_xprt_enqueue with: | ||
224 | */ | ||
225 | svc_xprt_get(xprt); | ||
226 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
227 | svc_xprt_enqueue(xprt); | ||
228 | svc_xprt_put(xprt); | ||
229 | } | ||
230 | |||
231 | void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) | ||
232 | { | ||
233 | clear_bit(XPT_TEMP, &new->xpt_flags); | ||
234 | spin_lock_bh(&serv->sv_lock); | ||
235 | list_add(&new->xpt_list, &serv->sv_permsocks); | ||
236 | spin_unlock_bh(&serv->sv_lock); | ||
237 | svc_xprt_received(new); | ||
238 | } | ||
239 | |||
211 | int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | 240 | int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, |
212 | struct net *net, const int family, | 241 | struct net *net, const int family, |
213 | const unsigned short port, int flags) | 242 | const unsigned short port, int flags) |
@@ -232,13 +261,8 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | |||
232 | module_put(xcl->xcl_owner); | 261 | module_put(xcl->xcl_owner); |
233 | return PTR_ERR(newxprt); | 262 | return PTR_ERR(newxprt); |
234 | } | 263 | } |
235 | 264 | svc_add_new_perm_xprt(serv, newxprt); | |
236 | clear_bit(XPT_TEMP, &newxprt->xpt_flags); | ||
237 | spin_lock_bh(&serv->sv_lock); | ||
238 | list_add(&newxprt->xpt_list, &serv->sv_permsocks); | ||
239 | spin_unlock_bh(&serv->sv_lock); | ||
240 | newport = svc_xprt_local_port(newxprt); | 265 | newport = svc_xprt_local_port(newxprt); |
241 | clear_bit(XPT_BUSY, &newxprt->xpt_flags); | ||
242 | return newport; | 266 | return newport; |
243 | } | 267 | } |
244 | err: | 268 | err: |
@@ -394,27 +418,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | |||
394 | return xprt; | 418 | return xprt; |
395 | } | 419 | } |
396 | 420 | ||
397 | /* | ||
398 | * svc_xprt_received conditionally queues the transport for processing | ||
399 | * by another thread. The caller must hold the XPT_BUSY bit and must | ||
400 | * not thereafter touch transport data. | ||
401 | * | ||
402 | * Note: XPT_DATA only gets cleared when a read-attempt finds no (or | ||
403 | * insufficient) data. | ||
404 | */ | ||
405 | void svc_xprt_received(struct svc_xprt *xprt) | ||
406 | { | ||
407 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | ||
408 | /* As soon as we clear busy, the xprt could be closed and | ||
409 | * 'put', so we need a reference to call svc_xprt_enqueue with: | ||
410 | */ | ||
411 | svc_xprt_get(xprt); | ||
412 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
413 | svc_xprt_enqueue(xprt); | ||
414 | svc_xprt_put(xprt); | ||
415 | } | ||
416 | EXPORT_SYMBOL_GPL(svc_xprt_received); | ||
417 | |||
418 | /** | 421 | /** |
419 | * svc_reserve - change the space reserved for the reply to a request. | 422 | * svc_reserve - change the space reserved for the reply to a request. |
420 | * @rqstp: The request in question | 423 | * @rqstp: The request in question |
@@ -565,33 +568,12 @@ static void svc_check_conn_limits(struct svc_serv *serv) | |||
565 | } | 568 | } |
566 | } | 569 | } |
567 | 570 | ||
568 | /* | 571 | int svc_alloc_arg(struct svc_rqst *rqstp) |
569 | * Receive the next request on any transport. This code is carefully | ||
570 | * organised not to touch any cachelines in the shared svc_serv | ||
571 | * structure, only cachelines in the local svc_pool. | ||
572 | */ | ||
573 | int svc_recv(struct svc_rqst *rqstp, long timeout) | ||
574 | { | 572 | { |
575 | struct svc_xprt *xprt = NULL; | 573 | struct svc_serv *serv = rqstp->rq_server; |
576 | struct svc_serv *serv = rqstp->rq_server; | 574 | struct xdr_buf *arg; |
577 | struct svc_pool *pool = rqstp->rq_pool; | 575 | int pages; |
578 | int len, i; | 576 | int i; |
579 | int pages; | ||
580 | struct xdr_buf *arg; | ||
581 | DECLARE_WAITQUEUE(wait, current); | ||
582 | long time_left; | ||
583 | |||
584 | dprintk("svc: server %p waiting for data (to = %ld)\n", | ||
585 | rqstp, timeout); | ||
586 | |||
587 | if (rqstp->rq_xprt) | ||
588 | printk(KERN_ERR | ||
589 | "svc_recv: service %p, transport not NULL!\n", | ||
590 | rqstp); | ||
591 | if (waitqueue_active(&rqstp->rq_wait)) | ||
592 | printk(KERN_ERR | ||
593 | "svc_recv: service %p, wait queue active!\n", | ||
594 | rqstp); | ||
595 | 577 | ||
596 | /* now allocate needed pages. If we get a failure, sleep briefly */ | 578 | /* now allocate needed pages. If we get a failure, sleep briefly */ |
597 | pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; | 579 | pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; |
@@ -621,11 +603,15 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
621 | arg->page_len = (pages-2)*PAGE_SIZE; | 603 | arg->page_len = (pages-2)*PAGE_SIZE; |
622 | arg->len = (pages-1)*PAGE_SIZE; | 604 | arg->len = (pages-1)*PAGE_SIZE; |
623 | arg->tail[0].iov_len = 0; | 605 | arg->tail[0].iov_len = 0; |
606 | return 0; | ||
607 | } | ||
624 | 608 | ||
625 | try_to_freeze(); | 609 | struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) |
626 | cond_resched(); | 610 | { |
627 | if (signalled() || kthread_should_stop()) | 611 | struct svc_xprt *xprt; |
628 | return -EINTR; | 612 | struct svc_pool *pool = rqstp->rq_pool; |
613 | DECLARE_WAITQUEUE(wait, current); | ||
614 | long time_left; | ||
629 | 615 | ||
630 | /* Normally we will wait up to 5 seconds for any required | 616 | /* Normally we will wait up to 5 seconds for any required |
631 | * cache information to be provided. | 617 | * cache information to be provided. |
@@ -663,7 +649,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
663 | if (kthread_should_stop()) { | 649 | if (kthread_should_stop()) { |
664 | set_current_state(TASK_RUNNING); | 650 | set_current_state(TASK_RUNNING); |
665 | spin_unlock_bh(&pool->sp_lock); | 651 | spin_unlock_bh(&pool->sp_lock); |
666 | return -EINTR; | 652 | return ERR_PTR(-EINTR); |
667 | } | 653 | } |
668 | 654 | ||
669 | add_wait_queue(&rqstp->rq_wait, &wait); | 655 | add_wait_queue(&rqstp->rq_wait, &wait); |
@@ -684,48 +670,58 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
684 | spin_unlock_bh(&pool->sp_lock); | 670 | spin_unlock_bh(&pool->sp_lock); |
685 | dprintk("svc: server %p, no data yet\n", rqstp); | 671 | dprintk("svc: server %p, no data yet\n", rqstp); |
686 | if (signalled() || kthread_should_stop()) | 672 | if (signalled() || kthread_should_stop()) |
687 | return -EINTR; | 673 | return ERR_PTR(-EINTR); |
688 | else | 674 | else |
689 | return -EAGAIN; | 675 | return ERR_PTR(-EAGAIN); |
690 | } | 676 | } |
691 | } | 677 | } |
692 | spin_unlock_bh(&pool->sp_lock); | 678 | spin_unlock_bh(&pool->sp_lock); |
679 | return xprt; | ||
680 | } | ||
681 | |||
682 | void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) | ||
683 | { | ||
684 | spin_lock_bh(&serv->sv_lock); | ||
685 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | ||
686 | list_add(&newxpt->xpt_list, &serv->sv_tempsocks); | ||
687 | serv->sv_tmpcnt++; | ||
688 | if (serv->sv_temptimer.function == NULL) { | ||
689 | /* setup timer to age temp transports */ | ||
690 | setup_timer(&serv->sv_temptimer, svc_age_temp_xprts, | ||
691 | (unsigned long)serv); | ||
692 | mod_timer(&serv->sv_temptimer, | ||
693 | jiffies + svc_conn_age_period * HZ); | ||
694 | } | ||
695 | spin_unlock_bh(&serv->sv_lock); | ||
696 | svc_xprt_received(newxpt); | ||
697 | } | ||
698 | |||
699 | static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) | ||
700 | { | ||
701 | struct svc_serv *serv = rqstp->rq_server; | ||
702 | int len = 0; | ||
693 | 703 | ||
694 | len = 0; | ||
695 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | 704 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
696 | dprintk("svc_recv: found XPT_CLOSE\n"); | 705 | dprintk("svc_recv: found XPT_CLOSE\n"); |
697 | svc_delete_xprt(xprt); | 706 | svc_delete_xprt(xprt); |
698 | /* Leave XPT_BUSY set on the dead xprt: */ | 707 | /* Leave XPT_BUSY set on the dead xprt: */ |
699 | goto out; | 708 | return 0; |
700 | } | 709 | } |
701 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | 710 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { |
702 | struct svc_xprt *newxpt; | 711 | struct svc_xprt *newxpt; |
712 | /* | ||
713 | * We know this module_get will succeed because the | ||
714 | * listener holds a reference too | ||
715 | */ | ||
716 | __module_get(xprt->xpt_class->xcl_owner); | ||
717 | svc_check_conn_limits(xprt->xpt_server); | ||
703 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | 718 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
704 | if (newxpt) { | 719 | if (newxpt) |
705 | /* | 720 | svc_add_new_temp_xprt(serv, newxpt); |
706 | * We know this module_get will succeed because the | ||
707 | * listener holds a reference too | ||
708 | */ | ||
709 | __module_get(newxpt->xpt_class->xcl_owner); | ||
710 | svc_check_conn_limits(xprt->xpt_server); | ||
711 | spin_lock_bh(&serv->sv_lock); | ||
712 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | ||
713 | list_add(&newxpt->xpt_list, &serv->sv_tempsocks); | ||
714 | serv->sv_tmpcnt++; | ||
715 | if (serv->sv_temptimer.function == NULL) { | ||
716 | /* setup timer to age temp transports */ | ||
717 | setup_timer(&serv->sv_temptimer, | ||
718 | svc_age_temp_xprts, | ||
719 | (unsigned long)serv); | ||
720 | mod_timer(&serv->sv_temptimer, | ||
721 | jiffies + svc_conn_age_period * HZ); | ||
722 | } | ||
723 | spin_unlock_bh(&serv->sv_lock); | ||
724 | svc_xprt_received(newxpt); | ||
725 | } | ||
726 | } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { | 721 | } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { |
722 | /* XPT_DATA|XPT_DEFERRED case: */ | ||
727 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | 723 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
728 | rqstp, pool->sp_id, xprt, | 724 | rqstp, rqstp->rq_pool->sp_id, xprt, |
729 | atomic_read(&xprt->xpt_ref.refcount)); | 725 | atomic_read(&xprt->xpt_ref.refcount)); |
730 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); | 726 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); |
731 | if (rqstp->rq_deferred) | 727 | if (rqstp->rq_deferred) |
@@ -736,10 +732,51 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
736 | rqstp->rq_reserved = serv->sv_max_mesg; | 732 | rqstp->rq_reserved = serv->sv_max_mesg; |
737 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 733 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
738 | } | 734 | } |
735 | /* clear XPT_BUSY: */ | ||
739 | svc_xprt_received(xprt); | 736 | svc_xprt_received(xprt); |
737 | return len; | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * Receive the next request on any transport. This code is carefully | ||
742 | * organised not to touch any cachelines in the shared svc_serv | ||
743 | * structure, only cachelines in the local svc_pool. | ||
744 | */ | ||
745 | int svc_recv(struct svc_rqst *rqstp, long timeout) | ||
746 | { | ||
747 | struct svc_xprt *xprt = NULL; | ||
748 | struct svc_serv *serv = rqstp->rq_server; | ||
749 | int len, err; | ||
750 | |||
751 | dprintk("svc: server %p waiting for data (to = %ld)\n", | ||
752 | rqstp, timeout); | ||
753 | |||
754 | if (rqstp->rq_xprt) | ||
755 | printk(KERN_ERR | ||
756 | "svc_recv: service %p, transport not NULL!\n", | ||
757 | rqstp); | ||
758 | if (waitqueue_active(&rqstp->rq_wait)) | ||
759 | printk(KERN_ERR | ||
760 | "svc_recv: service %p, wait queue active!\n", | ||
761 | rqstp); | ||
762 | |||
763 | err = svc_alloc_arg(rqstp); | ||
764 | if (err) | ||
765 | return err; | ||
766 | |||
767 | try_to_freeze(); | ||
768 | cond_resched(); | ||
769 | if (signalled() || kthread_should_stop()) | ||
770 | return -EINTR; | ||
771 | |||
772 | xprt = svc_get_next_xprt(rqstp, timeout); | ||
773 | if (IS_ERR(xprt)) | ||
774 | return PTR_ERR(xprt); | ||
775 | |||
776 | len = svc_handle_xprt(rqstp, xprt); | ||
740 | 777 | ||
741 | /* No data, incomplete (TCP) read, or accept() */ | 778 | /* No data, incomplete (TCP) read, or accept() */ |
742 | if (len == 0 || len == -EAGAIN) | 779 | if (len <= 0) |
743 | goto out; | 780 | goto out; |
744 | 781 | ||
745 | clear_bit(XPT_OLD, &xprt->xpt_flags); | 782 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
@@ -917,16 +954,18 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
917 | } | 954 | } |
918 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 955 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
919 | 956 | ||
920 | static void svc_close_list(struct list_head *xprt_list, struct net *net) | 957 | static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) |
921 | { | 958 | { |
922 | struct svc_xprt *xprt; | 959 | struct svc_xprt *xprt; |
923 | 960 | ||
961 | spin_lock(&serv->sv_lock); | ||
924 | list_for_each_entry(xprt, xprt_list, xpt_list) { | 962 | list_for_each_entry(xprt, xprt_list, xpt_list) { |
925 | if (xprt->xpt_net != net) | 963 | if (xprt->xpt_net != net) |
926 | continue; | 964 | continue; |
927 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 965 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
928 | set_bit(XPT_BUSY, &xprt->xpt_flags); | 966 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
929 | } | 967 | } |
968 | spin_unlock(&serv->sv_lock); | ||
930 | } | 969 | } |
931 | 970 | ||
932 | static void svc_clear_pools(struct svc_serv *serv, struct net *net) | 971 | static void svc_clear_pools(struct svc_serv *serv, struct net *net) |
@@ -949,24 +988,28 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net) | |||
949 | } | 988 | } |
950 | } | 989 | } |
951 | 990 | ||
952 | static void svc_clear_list(struct list_head *xprt_list, struct net *net) | 991 | static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) |
953 | { | 992 | { |
954 | struct svc_xprt *xprt; | 993 | struct svc_xprt *xprt; |
955 | struct svc_xprt *tmp; | 994 | struct svc_xprt *tmp; |
995 | LIST_HEAD(victims); | ||
956 | 996 | ||
997 | spin_lock(&serv->sv_lock); | ||
957 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | 998 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { |
958 | if (xprt->xpt_net != net) | 999 | if (xprt->xpt_net != net) |
959 | continue; | 1000 | continue; |
960 | svc_delete_xprt(xprt); | 1001 | list_move(&xprt->xpt_list, &victims); |
961 | } | 1002 | } |
962 | list_for_each_entry(xprt, xprt_list, xpt_list) | 1003 | spin_unlock(&serv->sv_lock); |
963 | BUG_ON(xprt->xpt_net == net); | 1004 | |
1005 | list_for_each_entry_safe(xprt, tmp, &victims, xpt_list) | ||
1006 | svc_delete_xprt(xprt); | ||
964 | } | 1007 | } |
965 | 1008 | ||
966 | void svc_close_net(struct svc_serv *serv, struct net *net) | 1009 | void svc_close_net(struct svc_serv *serv, struct net *net) |
967 | { | 1010 | { |
968 | svc_close_list(&serv->sv_tempsocks, net); | 1011 | svc_close_list(serv, &serv->sv_tempsocks, net); |
969 | svc_close_list(&serv->sv_permsocks, net); | 1012 | svc_close_list(serv, &serv->sv_permsocks, net); |
970 | 1013 | ||
971 | svc_clear_pools(serv, net); | 1014 | svc_clear_pools(serv, net); |
972 | /* | 1015 | /* |
@@ -974,8 +1017,8 @@ void svc_close_net(struct svc_serv *serv, struct net *net) | |||
974 | * svc_xprt_enqueue will not add new entries without taking the | 1017 | * svc_xprt_enqueue will not add new entries without taking the |
975 | * sp_lock and checking XPT_BUSY. | 1018 | * sp_lock and checking XPT_BUSY. |
976 | */ | 1019 | */ |
977 | svc_clear_list(&serv->sv_tempsocks, net); | 1020 | svc_clear_list(serv, &serv->sv_tempsocks, net); |
978 | svc_clear_list(&serv->sv_permsocks, net); | 1021 | svc_clear_list(serv, &serv->sv_permsocks, net); |
979 | } | 1022 | } |
980 | 1023 | ||
981 | /* | 1024 | /* |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 998aa8c1807c..03827cef1fa7 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -59,7 +59,7 @@ | |||
59 | 59 | ||
60 | 60 | ||
61 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, | 61 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, |
62 | int *errp, int flags); | 62 | int flags); |
63 | static void svc_udp_data_ready(struct sock *, int); | 63 | static void svc_udp_data_ready(struct sock *, int); |
64 | static int svc_udp_recvfrom(struct svc_rqst *); | 64 | static int svc_udp_recvfrom(struct svc_rqst *); |
65 | static int svc_udp_sendto(struct svc_rqst *); | 65 | static int svc_udp_sendto(struct svc_rqst *); |
@@ -305,57 +305,6 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) | |||
305 | return len; | 305 | return len; |
306 | } | 306 | } |
307 | 307 | ||
308 | /** | ||
309 | * svc_sock_names - construct a list of listener names in a string | ||
310 | * @serv: pointer to RPC service | ||
311 | * @buf: pointer to a buffer to fill in with socket names | ||
312 | * @buflen: size of the buffer to be filled | ||
313 | * @toclose: pointer to '\0'-terminated C string containing the name | ||
314 | * of a listener to be closed | ||
315 | * | ||
316 | * Fills in @buf with a '\n'-separated list of names of listener | ||
317 | * sockets. If @toclose is not NULL, the socket named by @toclose | ||
318 | * is closed, and is not included in the output list. | ||
319 | * | ||
320 | * Returns positive length of the socket name string, or a negative | ||
321 | * errno value on error. | ||
322 | */ | ||
323 | int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen, | ||
324 | const char *toclose) | ||
325 | { | ||
326 | struct svc_sock *svsk, *closesk = NULL; | ||
327 | int len = 0; | ||
328 | |||
329 | if (!serv) | ||
330 | return 0; | ||
331 | |||
332 | spin_lock_bh(&serv->sv_lock); | ||
333 | list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) { | ||
334 | int onelen = svc_one_sock_name(svsk, buf + len, buflen - len); | ||
335 | if (onelen < 0) { | ||
336 | len = onelen; | ||
337 | break; | ||
338 | } | ||
339 | if (toclose && strcmp(toclose, buf + len) == 0) { | ||
340 | closesk = svsk; | ||
341 | svc_xprt_get(&closesk->sk_xprt); | ||
342 | } else | ||
343 | len += onelen; | ||
344 | } | ||
345 | spin_unlock_bh(&serv->sv_lock); | ||
346 | |||
347 | if (closesk) { | ||
348 | /* Should unregister with portmap, but you cannot | ||
349 | * unregister just one protocol... | ||
350 | */ | ||
351 | svc_close_xprt(&closesk->sk_xprt); | ||
352 | svc_xprt_put(&closesk->sk_xprt); | ||
353 | } else if (toclose) | ||
354 | return -ENOENT; | ||
355 | return len; | ||
356 | } | ||
357 | EXPORT_SYMBOL_GPL(svc_sock_names); | ||
358 | |||
359 | /* | 308 | /* |
360 | * Check input queue length | 309 | * Check input queue length |
361 | */ | 310 | */ |
@@ -598,11 +547,9 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
598 | dprintk("svc: recvfrom returned error %d\n", -err); | 547 | dprintk("svc: recvfrom returned error %d\n", -err); |
599 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 548 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
600 | } | 549 | } |
601 | return -EAGAIN; | 550 | return 0; |
602 | } | 551 | } |
603 | len = svc_addr_len(svc_addr(rqstp)); | 552 | len = svc_addr_len(svc_addr(rqstp)); |
604 | if (len == 0) | ||
605 | return -EAFNOSUPPORT; | ||
606 | rqstp->rq_addrlen = len; | 553 | rqstp->rq_addrlen = len; |
607 | if (skb->tstamp.tv64 == 0) { | 554 | if (skb->tstamp.tv64 == 0) { |
608 | skb->tstamp = ktime_get_real(); | 555 | skb->tstamp = ktime_get_real(); |
@@ -620,10 +567,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
620 | if (!svc_udp_get_dest_address(rqstp, cmh)) { | 567 | if (!svc_udp_get_dest_address(rqstp, cmh)) { |
621 | net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", | 568 | net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", |
622 | cmh->cmsg_level, cmh->cmsg_type); | 569 | cmh->cmsg_level, cmh->cmsg_type); |
623 | out_free: | 570 | goto out_free; |
624 | trace_kfree_skb(skb, svc_udp_recvfrom); | ||
625 | skb_free_datagram_locked(svsk->sk_sk, skb); | ||
626 | return 0; | ||
627 | } | 571 | } |
628 | rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp)); | 572 | rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp)); |
629 | 573 | ||
@@ -662,6 +606,10 @@ out_free: | |||
662 | serv->sv_stats->netudpcnt++; | 606 | serv->sv_stats->netudpcnt++; |
663 | 607 | ||
664 | return len; | 608 | return len; |
609 | out_free: | ||
610 | trace_kfree_skb(skb, svc_udp_recvfrom); | ||
611 | skb_free_datagram_locked(svsk->sk_sk, skb); | ||
612 | return 0; | ||
665 | } | 613 | } |
666 | 614 | ||
667 | static int | 615 | static int |
@@ -900,8 +848,9 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) | |||
900 | */ | 848 | */ |
901 | newsock->sk->sk_sndtimeo = HZ*30; | 849 | newsock->sk->sk_sndtimeo = HZ*30; |
902 | 850 | ||
903 | if (!(newsvsk = svc_setup_socket(serv, newsock, &err, | 851 | newsvsk = svc_setup_socket(serv, newsock, |
904 | (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)))) | 852 | (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)); |
853 | if (IS_ERR(newsvsk)) | ||
905 | goto failed; | 854 | goto failed; |
906 | svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen); | 855 | svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen); |
907 | err = kernel_getsockname(newsock, sin, &slen); | 856 | err = kernel_getsockname(newsock, sin, &slen); |
@@ -1174,13 +1123,13 @@ error: | |||
1174 | if (len != -EAGAIN) | 1123 | if (len != -EAGAIN) |
1175 | goto err_other; | 1124 | goto err_other; |
1176 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); | 1125 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); |
1177 | return -EAGAIN; | 1126 | return 0; |
1178 | err_other: | 1127 | err_other: |
1179 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", | 1128 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", |
1180 | svsk->sk_xprt.xpt_server->sv_name, -len); | 1129 | svsk->sk_xprt.xpt_server->sv_name, -len); |
1181 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 1130 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
1182 | err_noclose: | 1131 | err_noclose: |
1183 | return -EAGAIN; /* record not complete */ | 1132 | return 0; /* record not complete */ |
1184 | } | 1133 | } |
1185 | 1134 | ||
1186 | /* | 1135 | /* |
@@ -1383,29 +1332,29 @@ EXPORT_SYMBOL_GPL(svc_sock_update_bufs); | |||
1383 | */ | 1332 | */ |
1384 | static struct svc_sock *svc_setup_socket(struct svc_serv *serv, | 1333 | static struct svc_sock *svc_setup_socket(struct svc_serv *serv, |
1385 | struct socket *sock, | 1334 | struct socket *sock, |
1386 | int *errp, int flags) | 1335 | int flags) |
1387 | { | 1336 | { |
1388 | struct svc_sock *svsk; | 1337 | struct svc_sock *svsk; |
1389 | struct sock *inet; | 1338 | struct sock *inet; |
1390 | int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); | 1339 | int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); |
1340 | int err = 0; | ||
1391 | 1341 | ||
1392 | dprintk("svc: svc_setup_socket %p\n", sock); | 1342 | dprintk("svc: svc_setup_socket %p\n", sock); |
1393 | if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { | 1343 | svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); |
1394 | *errp = -ENOMEM; | 1344 | if (!svsk) |
1395 | return NULL; | 1345 | return ERR_PTR(-ENOMEM); |
1396 | } | ||
1397 | 1346 | ||
1398 | inet = sock->sk; | 1347 | inet = sock->sk; |
1399 | 1348 | ||
1400 | /* Register socket with portmapper */ | 1349 | /* Register socket with portmapper */ |
1401 | if (*errp >= 0 && pmap_register) | 1350 | if (pmap_register) |
1402 | *errp = svc_register(serv, sock_net(sock->sk), inet->sk_family, | 1351 | err = svc_register(serv, sock_net(sock->sk), inet->sk_family, |
1403 | inet->sk_protocol, | 1352 | inet->sk_protocol, |
1404 | ntohs(inet_sk(inet)->inet_sport)); | 1353 | ntohs(inet_sk(inet)->inet_sport)); |
1405 | 1354 | ||
1406 | if (*errp < 0) { | 1355 | if (err < 0) { |
1407 | kfree(svsk); | 1356 | kfree(svsk); |
1408 | return NULL; | 1357 | return ERR_PTR(err); |
1409 | } | 1358 | } |
1410 | 1359 | ||
1411 | inet->sk_user_data = svsk; | 1360 | inet->sk_user_data = svsk; |
@@ -1450,42 +1399,38 @@ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, | |||
1450 | int err = 0; | 1399 | int err = 0; |
1451 | struct socket *so = sockfd_lookup(fd, &err); | 1400 | struct socket *so = sockfd_lookup(fd, &err); |
1452 | struct svc_sock *svsk = NULL; | 1401 | struct svc_sock *svsk = NULL; |
1402 | struct sockaddr_storage addr; | ||
1403 | struct sockaddr *sin = (struct sockaddr *)&addr; | ||
1404 | int salen; | ||
1453 | 1405 | ||
1454 | if (!so) | 1406 | if (!so) |
1455 | return err; | 1407 | return err; |
1408 | err = -EAFNOSUPPORT; | ||
1456 | if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) | 1409 | if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) |
1457 | err = -EAFNOSUPPORT; | 1410 | goto out; |
1458 | else if (so->sk->sk_protocol != IPPROTO_TCP && | 1411 | err = -EPROTONOSUPPORT; |
1412 | if (so->sk->sk_protocol != IPPROTO_TCP && | ||
1459 | so->sk->sk_protocol != IPPROTO_UDP) | 1413 | so->sk->sk_protocol != IPPROTO_UDP) |
1460 | err = -EPROTONOSUPPORT; | 1414 | goto out; |
1461 | else if (so->state > SS_UNCONNECTED) | 1415 | err = -EISCONN; |
1462 | err = -EISCONN; | 1416 | if (so->state > SS_UNCONNECTED) |
1463 | else { | 1417 | goto out; |
1464 | if (!try_module_get(THIS_MODULE)) | 1418 | err = -ENOENT; |
1465 | err = -ENOENT; | 1419 | if (!try_module_get(THIS_MODULE)) |
1466 | else | 1420 | goto out; |
1467 | svsk = svc_setup_socket(serv, so, &err, | 1421 | svsk = svc_setup_socket(serv, so, SVC_SOCK_DEFAULTS); |
1468 | SVC_SOCK_DEFAULTS); | 1422 | if (IS_ERR(svsk)) { |
1469 | if (svsk) { | 1423 | module_put(THIS_MODULE); |
1470 | struct sockaddr_storage addr; | 1424 | err = PTR_ERR(svsk); |
1471 | struct sockaddr *sin = (struct sockaddr *)&addr; | 1425 | goto out; |
1472 | int salen; | ||
1473 | if (kernel_getsockname(svsk->sk_sock, sin, &salen) == 0) | ||
1474 | svc_xprt_set_local(&svsk->sk_xprt, sin, salen); | ||
1475 | clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags); | ||
1476 | spin_lock_bh(&serv->sv_lock); | ||
1477 | list_add(&svsk->sk_xprt.xpt_list, &serv->sv_permsocks); | ||
1478 | spin_unlock_bh(&serv->sv_lock); | ||
1479 | svc_xprt_received(&svsk->sk_xprt); | ||
1480 | err = 0; | ||
1481 | } else | ||
1482 | module_put(THIS_MODULE); | ||
1483 | } | ||
1484 | if (err) { | ||
1485 | sockfd_put(so); | ||
1486 | return err; | ||
1487 | } | 1426 | } |
1427 | if (kernel_getsockname(svsk->sk_sock, sin, &salen) == 0) | ||
1428 | svc_xprt_set_local(&svsk->sk_xprt, sin, salen); | ||
1429 | svc_add_new_perm_xprt(serv, &svsk->sk_xprt); | ||
1488 | return svc_one_sock_name(svsk, name_return, len); | 1430 | return svc_one_sock_name(svsk, name_return, len); |
1431 | out: | ||
1432 | sockfd_put(so); | ||
1433 | return err; | ||
1489 | } | 1434 | } |
1490 | EXPORT_SYMBOL_GPL(svc_addsock); | 1435 | EXPORT_SYMBOL_GPL(svc_addsock); |
1491 | 1436 | ||
@@ -1563,11 +1508,13 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, | |||
1563 | goto bummer; | 1508 | goto bummer; |
1564 | } | 1509 | } |
1565 | 1510 | ||
1566 | if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) { | 1511 | svsk = svc_setup_socket(serv, sock, flags); |
1567 | svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); | 1512 | if (IS_ERR(svsk)) { |
1568 | return (struct svc_xprt *)svsk; | 1513 | error = PTR_ERR(svsk); |
1514 | goto bummer; | ||
1569 | } | 1515 | } |
1570 | 1516 | svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); | |
1517 | return (struct svc_xprt *)svsk; | ||
1571 | bummer: | 1518 | bummer: |
1572 | dprintk("svc: svc_create_socket error = %d\n", -error); | 1519 | dprintk("svc: svc_create_socket error = %d\n", -error); |
1573 | sock_release(sock); | 1520 | sock_release(sock); |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 0afba1b4b656..08f50afd5f2a 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -730,19 +730,24 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) | |||
730 | 730 | ||
731 | if (xdr->nwords == 0) | 731 | if (xdr->nwords == 0) |
732 | return 0; | 732 | return 0; |
733 | if (nwords > xdr->nwords) { | ||
734 | nwords = xdr->nwords; | ||
735 | len = nwords << 2; | ||
736 | } | ||
737 | /* Realign pages to current pointer position */ | 733 | /* Realign pages to current pointer position */ |
738 | iov = buf->head; | 734 | iov = buf->head; |
739 | if (iov->iov_len > cur) | 735 | if (iov->iov_len > cur) { |
740 | xdr_shrink_bufhead(buf, iov->iov_len - cur); | 736 | xdr_shrink_bufhead(buf, iov->iov_len - cur); |
737 | xdr->nwords = XDR_QUADLEN(buf->len - cur); | ||
738 | } | ||
741 | 739 | ||
742 | /* Truncate page data and move it into the tail */ | 740 | if (nwords > xdr->nwords) { |
743 | if (buf->page_len > len) | 741 | nwords = xdr->nwords; |
742 | len = nwords << 2; | ||
743 | } | ||
744 | if (buf->page_len <= len) | ||
745 | len = buf->page_len; | ||
746 | else if (nwords < xdr->nwords) { | ||
747 | /* Truncate page data and move it into the tail */ | ||
744 | xdr_shrink_pagelen(buf, buf->page_len - len); | 748 | xdr_shrink_pagelen(buf, buf->page_len - len); |
745 | xdr->nwords = XDR_QUADLEN(buf->len - cur); | 749 | xdr->nwords = XDR_QUADLEN(buf->len - cur); |
750 | } | ||
746 | return len; | 751 | return len; |
747 | } | 752 | } |
748 | 753 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 5d7f61d7559c..bd462a532acf 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -231,7 +231,7 @@ EXPORT_SYMBOL_GPL(xprt_reserve_xprt); | |||
231 | static void xprt_clear_locked(struct rpc_xprt *xprt) | 231 | static void xprt_clear_locked(struct rpc_xprt *xprt) |
232 | { | 232 | { |
233 | xprt->snd_task = NULL; | 233 | xprt->snd_task = NULL; |
234 | if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) { | 234 | if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { |
235 | smp_mb__before_clear_bit(); | 235 | smp_mb__before_clear_bit(); |
236 | clear_bit(XPRT_LOCKED, &xprt->state); | 236 | clear_bit(XPRT_LOCKED, &xprt->state); |
237 | smp_mb__after_clear_bit(); | 237 | smp_mb__after_clear_bit(); |
@@ -504,9 +504,6 @@ EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); | |||
504 | */ | 504 | */ |
505 | void xprt_write_space(struct rpc_xprt *xprt) | 505 | void xprt_write_space(struct rpc_xprt *xprt) |
506 | { | 506 | { |
507 | if (unlikely(xprt->shutdown)) | ||
508 | return; | ||
509 | |||
510 | spin_lock_bh(&xprt->transport_lock); | 507 | spin_lock_bh(&xprt->transport_lock); |
511 | if (xprt->snd_task) { | 508 | if (xprt->snd_task) { |
512 | dprintk("RPC: write space: waking waiting task on " | 509 | dprintk("RPC: write space: waking waiting task on " |
@@ -679,7 +676,7 @@ xprt_init_autodisconnect(unsigned long data) | |||
679 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; | 676 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; |
680 | 677 | ||
681 | spin_lock(&xprt->transport_lock); | 678 | spin_lock(&xprt->transport_lock); |
682 | if (!list_empty(&xprt->recv) || xprt->shutdown) | 679 | if (!list_empty(&xprt->recv)) |
683 | goto out_abort; | 680 | goto out_abort; |
684 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 681 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
685 | goto out_abort; | 682 | goto out_abort; |
@@ -1262,7 +1259,6 @@ out: | |||
1262 | static void xprt_destroy(struct rpc_xprt *xprt) | 1259 | static void xprt_destroy(struct rpc_xprt *xprt) |
1263 | { | 1260 | { |
1264 | dprintk("RPC: destroying transport %p\n", xprt); | 1261 | dprintk("RPC: destroying transport %p\n", xprt); |
1265 | xprt->shutdown = 1; | ||
1266 | del_timer_sync(&xprt->timer); | 1262 | del_timer_sync(&xprt->timer); |
1267 | 1263 | ||
1268 | rpc_destroy_wait_queue(&xprt->binding); | 1264 | rpc_destroy_wait_queue(&xprt->binding); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 73b428bef598..62e4f9bcc387 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -578,10 +578,6 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) | |||
578 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); | 578 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); |
579 | spin_unlock_bh(&listen_xprt->sc_lock); | 579 | spin_unlock_bh(&listen_xprt->sc_lock); |
580 | 580 | ||
581 | /* | ||
582 | * Can't use svc_xprt_received here because we are not on a | ||
583 | * rqstp thread | ||
584 | */ | ||
585 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); | 581 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); |
586 | svc_xprt_enqueue(&listen_xprt->sc_xprt); | 582 | svc_xprt_enqueue(&listen_xprt->sc_xprt); |
587 | } | 583 | } |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 5d9202dc7cb1..c9aa7a35f3bf 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -199,21 +199,15 @@ xprt_rdma_connect_worker(struct work_struct *work) | |||
199 | struct rpc_xprt *xprt = &r_xprt->xprt; | 199 | struct rpc_xprt *xprt = &r_xprt->xprt; |
200 | int rc = 0; | 200 | int rc = 0; |
201 | 201 | ||
202 | if (!xprt->shutdown) { | 202 | current->flags |= PF_FSTRANS; |
203 | current->flags |= PF_FSTRANS; | 203 | xprt_clear_connected(xprt); |
204 | xprt_clear_connected(xprt); | 204 | |
205 | 205 | dprintk("RPC: %s: %sconnect\n", __func__, | |
206 | dprintk("RPC: %s: %sconnect\n", __func__, | 206 | r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); |
207 | r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); | 207 | rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); |
208 | rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); | 208 | if (rc) |
209 | if (rc) | 209 | xprt_wake_pending_tasks(xprt, rc); |
210 | goto out; | ||
211 | } | ||
212 | goto out_clear; | ||
213 | 210 | ||
214 | out: | ||
215 | xprt_wake_pending_tasks(xprt, rc); | ||
216 | out_clear: | ||
217 | dprintk("RPC: %s: exit\n", __func__); | 211 | dprintk("RPC: %s: exit\n", __func__); |
218 | xprt_clear_connecting(xprt); | 212 | xprt_clear_connecting(xprt); |
219 | current->flags &= ~PF_FSTRANS; | 213 | current->flags &= ~PF_FSTRANS; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index a35b8e52e551..aaaadfbe36e9 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -917,9 +917,6 @@ static void xs_local_data_ready(struct sock *sk, int len) | |||
917 | if (skb == NULL) | 917 | if (skb == NULL) |
918 | goto out; | 918 | goto out; |
919 | 919 | ||
920 | if (xprt->shutdown) | ||
921 | goto dropit; | ||
922 | |||
923 | repsize = skb->len - sizeof(rpc_fraghdr); | 920 | repsize = skb->len - sizeof(rpc_fraghdr); |
924 | if (repsize < 4) { | 921 | if (repsize < 4) { |
925 | dprintk("RPC: impossible RPC reply size %d\n", repsize); | 922 | dprintk("RPC: impossible RPC reply size %d\n", repsize); |
@@ -981,9 +978,6 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
981 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) | 978 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) |
982 | goto out; | 979 | goto out; |
983 | 980 | ||
984 | if (xprt->shutdown) | ||
985 | goto dropit; | ||
986 | |||
987 | repsize = skb->len - sizeof(struct udphdr); | 981 | repsize = skb->len - sizeof(struct udphdr); |
988 | if (repsize < 4) { | 982 | if (repsize < 4) { |
989 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); | 983 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); |
@@ -1025,6 +1019,16 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
1025 | read_unlock_bh(&sk->sk_callback_lock); | 1019 | read_unlock_bh(&sk->sk_callback_lock); |
1026 | } | 1020 | } |
1027 | 1021 | ||
1022 | /* | ||
1023 | * Helper function to force a TCP close if the server is sending | ||
1024 | * junk and/or it has put us in CLOSE_WAIT | ||
1025 | */ | ||
1026 | static void xs_tcp_force_close(struct rpc_xprt *xprt) | ||
1027 | { | ||
1028 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
1029 | xprt_force_disconnect(xprt); | ||
1030 | } | ||
1031 | |||
1028 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) | 1032 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) |
1029 | { | 1033 | { |
1030 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 1034 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
@@ -1051,7 +1055,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea | |||
1051 | /* Sanity check of the record length */ | 1055 | /* Sanity check of the record length */ |
1052 | if (unlikely(transport->tcp_reclen < 8)) { | 1056 | if (unlikely(transport->tcp_reclen < 8)) { |
1053 | dprintk("RPC: invalid TCP record fragment length\n"); | 1057 | dprintk("RPC: invalid TCP record fragment length\n"); |
1054 | xprt_force_disconnect(xprt); | 1058 | xs_tcp_force_close(xprt); |
1055 | return; | 1059 | return; |
1056 | } | 1060 | } |
1057 | dprintk("RPC: reading TCP record fragment of length %d\n", | 1061 | dprintk("RPC: reading TCP record fragment of length %d\n", |
@@ -1132,7 +1136,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport, | |||
1132 | break; | 1136 | break; |
1133 | default: | 1137 | default: |
1134 | dprintk("RPC: invalid request message type\n"); | 1138 | dprintk("RPC: invalid request message type\n"); |
1135 | xprt_force_disconnect(&transport->xprt); | 1139 | xs_tcp_force_close(&transport->xprt); |
1136 | } | 1140 | } |
1137 | xs_tcp_check_fraghdr(transport); | 1141 | xs_tcp_check_fraghdr(transport); |
1138 | } | 1142 | } |
@@ -1402,9 +1406,6 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes) | |||
1402 | read_lock_bh(&sk->sk_callback_lock); | 1406 | read_lock_bh(&sk->sk_callback_lock); |
1403 | if (!(xprt = xprt_from_sock(sk))) | 1407 | if (!(xprt = xprt_from_sock(sk))) |
1404 | goto out; | 1408 | goto out; |
1405 | if (xprt->shutdown) | ||
1406 | goto out; | ||
1407 | |||
1408 | /* Any data means we had a useful conversation, so | 1409 | /* Any data means we had a useful conversation, so |
1409 | * the we don't need to delay the next reconnect | 1410 | * the we don't need to delay the next reconnect |
1410 | */ | 1411 | */ |
@@ -1455,6 +1456,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) | |||
1455 | static void xs_sock_mark_closed(struct rpc_xprt *xprt) | 1456 | static void xs_sock_mark_closed(struct rpc_xprt *xprt) |
1456 | { | 1457 | { |
1457 | smp_mb__before_clear_bit(); | 1458 | smp_mb__before_clear_bit(); |
1459 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | ||
1460 | clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
1458 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | 1461 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); |
1459 | clear_bit(XPRT_CLOSING, &xprt->state); | 1462 | clear_bit(XPRT_CLOSING, &xprt->state); |
1460 | smp_mb__after_clear_bit(); | 1463 | smp_mb__after_clear_bit(); |
@@ -1512,8 +1515,8 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1512 | break; | 1515 | break; |
1513 | case TCP_CLOSE_WAIT: | 1516 | case TCP_CLOSE_WAIT: |
1514 | /* The server initiated a shutdown of the socket */ | 1517 | /* The server initiated a shutdown of the socket */ |
1515 | xprt_force_disconnect(xprt); | ||
1516 | xprt->connect_cookie++; | 1518 | xprt->connect_cookie++; |
1519 | xs_tcp_force_close(xprt); | ||
1517 | case TCP_CLOSING: | 1520 | case TCP_CLOSING: |
1518 | /* | 1521 | /* |
1519 | * If the server closed down the connection, make sure that | 1522 | * If the server closed down the connection, make sure that |
@@ -1889,9 +1892,6 @@ static void xs_local_setup_socket(struct work_struct *work) | |||
1889 | struct socket *sock; | 1892 | struct socket *sock; |
1890 | int status = -EIO; | 1893 | int status = -EIO; |
1891 | 1894 | ||
1892 | if (xprt->shutdown) | ||
1893 | goto out; | ||
1894 | |||
1895 | current->flags |= PF_FSTRANS; | 1895 | current->flags |= PF_FSTRANS; |
1896 | 1896 | ||
1897 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | 1897 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); |
@@ -2008,9 +2008,6 @@ static void xs_udp_setup_socket(struct work_struct *work) | |||
2008 | struct socket *sock = transport->sock; | 2008 | struct socket *sock = transport->sock; |
2009 | int status = -EIO; | 2009 | int status = -EIO; |
2010 | 2010 | ||
2011 | if (xprt->shutdown) | ||
2012 | goto out; | ||
2013 | |||
2014 | current->flags |= PF_FSTRANS; | 2011 | current->flags |= PF_FSTRANS; |
2015 | 2012 | ||
2016 | /* Start by resetting any existing state */ | 2013 | /* Start by resetting any existing state */ |
@@ -2156,9 +2153,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2156 | struct rpc_xprt *xprt = &transport->xprt; | 2153 | struct rpc_xprt *xprt = &transport->xprt; |
2157 | int status = -EIO; | 2154 | int status = -EIO; |
2158 | 2155 | ||
2159 | if (xprt->shutdown) | ||
2160 | goto out; | ||
2161 | |||
2162 | current->flags |= PF_FSTRANS; | 2156 | current->flags |= PF_FSTRANS; |
2163 | 2157 | ||
2164 | if (!sock) { | 2158 | if (!sock) { |
@@ -2199,8 +2193,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2199 | /* We're probably in TIME_WAIT. Get rid of existing socket, | 2193 | /* We're probably in TIME_WAIT. Get rid of existing socket, |
2200 | * and retry | 2194 | * and retry |
2201 | */ | 2195 | */ |
2202 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | 2196 | xs_tcp_force_close(xprt); |
2203 | xprt_force_disconnect(xprt); | ||
2204 | break; | 2197 | break; |
2205 | case -ECONNREFUSED: | 2198 | case -ECONNREFUSED: |
2206 | case -ECONNRESET: | 2199 | case -ECONNRESET: |
@@ -2528,6 +2521,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
2528 | static struct rpc_xprt_ops bc_tcp_ops = { | 2521 | static struct rpc_xprt_ops bc_tcp_ops = { |
2529 | .reserve_xprt = xprt_reserve_xprt, | 2522 | .reserve_xprt = xprt_reserve_xprt, |
2530 | .release_xprt = xprt_release_xprt, | 2523 | .release_xprt = xprt_release_xprt, |
2524 | .alloc_slot = xprt_alloc_slot, | ||
2531 | .rpcbind = xs_local_rpcbind, | 2525 | .rpcbind = xs_local_rpcbind, |
2532 | .buf_alloc = bc_malloc, | 2526 | .buf_alloc = bc_malloc, |
2533 | .buf_free = bc_free, | 2527 | .buf_free = bc_free, |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 09dc5b97e079..fd5f042dbff4 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -220,6 +220,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, | |||
220 | 220 | ||
221 | sock_init_data(sock, sk); | 221 | sock_init_data(sock, sk); |
222 | sk->sk_backlog_rcv = backlog_rcv; | 222 | sk->sk_backlog_rcv = backlog_rcv; |
223 | sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2; | ||
223 | tipc_sk(sk)->p = tp_ptr; | 224 | tipc_sk(sk)->p = tp_ptr; |
224 | tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; | 225 | tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; |
225 | 226 | ||