diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-03-19 17:59:44 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-03-19 17:59:44 -0400 |
commit | c7c350e92aab1bba68f26a6027b734adcf9824ba (patch) | |
tree | aa99bd94c3049dd871d9c030d70a5f3d87591a95 /net | |
parent | 2f42b5d043ee271d1e5d30ecd77186b6c4d4e534 (diff) | |
parent | f8512ad0da16cbe156f3a7627971cdf0b39c4138 (diff) |
Merge branch 'hotfixes' into devel
Diffstat (limited to 'net')
-rw-r--r-- | net/bluetooth/bnep/bnep.h | 2 | ||||
-rw-r--r-- | net/bluetooth/bnep/sock.c | 4 | ||||
-rw-r--r-- | net/bluetooth/hci_core.c | 4 | ||||
-rw-r--r-- | net/bluetooth/hci_sock.c | 4 | ||||
-rw-r--r-- | net/core/sock.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 12 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_expect.c | 2 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_extend.c | 19 | ||||
-rw-r--r-- | net/netfilter/nf_queue.c | 2 | ||||
-rw-r--r-- | net/netfilter/nfnetlink_log.c | 32 | ||||
-rw-r--r-- | net/netfilter/nfnetlink_queue.c | 17 | ||||
-rw-r--r-- | net/netfilter/xt_time.c | 7 | ||||
-rw-r--r-- | net/rxrpc/ar-recvmsg.c | 3 | ||||
-rw-r--r-- | net/sctp/bind_addr.c | 4 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 4 | ||||
-rw-r--r-- | net/sctp/protocol.c | 4 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 8 | ||||
-rw-r--r-- | net/sctp/socket.c | 73 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 96 |
20 files changed, 187 insertions, 116 deletions
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index a2992280c3d1..e69244dd8de8 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -174,7 +174,7 @@ struct bnep_session { | |||
174 | 174 | ||
175 | void bnep_net_setup(struct net_device *dev); | 175 | void bnep_net_setup(struct net_device *dev); |
176 | int bnep_sock_init(void); | 176 | int bnep_sock_init(void); |
177 | int bnep_sock_cleanup(void); | 177 | void bnep_sock_cleanup(void); |
178 | 178 | ||
179 | static inline int bnep_mc_hash(__u8 *addr) | 179 | static inline int bnep_mc_hash(__u8 *addr) |
180 | { | 180 | { |
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 81065e548a1f..201e5b1ce473 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -257,12 +257,10 @@ error: | |||
257 | return err; | 257 | return err; |
258 | } | 258 | } |
259 | 259 | ||
260 | int __exit bnep_sock_cleanup(void) | 260 | void __exit bnep_sock_cleanup(void) |
261 | { | 261 | { |
262 | if (bt_sock_unregister(BTPROTO_BNEP) < 0) | 262 | if (bt_sock_unregister(BTPROTO_BNEP) < 0) |
263 | BT_ERR("Can't unregister BNEP socket"); | 263 | BT_ERR("Can't unregister BNEP socket"); |
264 | 264 | ||
265 | proto_unregister(&bnep_proto); | 265 | proto_unregister(&bnep_proto); |
266 | |||
267 | return 0; | ||
268 | } | 266 | } |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 930b58e7149a..aec6929f5c16 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -902,8 +902,6 @@ int hci_unregister_dev(struct hci_dev *hdev) | |||
902 | 902 | ||
903 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); | 903 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); |
904 | 904 | ||
905 | hci_unregister_sysfs(hdev); | ||
906 | |||
907 | write_lock_bh(&hci_dev_list_lock); | 905 | write_lock_bh(&hci_dev_list_lock); |
908 | list_del(&hdev->list); | 906 | list_del(&hdev->list); |
909 | write_unlock_bh(&hci_dev_list_lock); | 907 | write_unlock_bh(&hci_dev_list_lock); |
@@ -915,6 +913,8 @@ int hci_unregister_dev(struct hci_dev *hdev) | |||
915 | 913 | ||
916 | hci_notify(hdev, HCI_DEV_UNREG); | 914 | hci_notify(hdev, HCI_DEV_UNREG); |
917 | 915 | ||
916 | hci_unregister_sysfs(hdev); | ||
917 | |||
918 | __hci_dev_put(hdev); | 918 | __hci_dev_put(hdev); |
919 | 919 | ||
920 | return 0; | 920 | return 0; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 14991323c273..b5d4019d3572 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -734,7 +734,7 @@ error: | |||
734 | return err; | 734 | return err; |
735 | } | 735 | } |
736 | 736 | ||
737 | int __exit hci_sock_cleanup(void) | 737 | void __exit hci_sock_cleanup(void) |
738 | { | 738 | { |
739 | if (bt_sock_unregister(BTPROTO_HCI) < 0) | 739 | if (bt_sock_unregister(BTPROTO_HCI) < 0) |
740 | BT_ERR("HCI socket unregistration failed"); | 740 | BT_ERR("HCI socket unregistration failed"); |
@@ -742,6 +742,4 @@ int __exit hci_sock_cleanup(void) | |||
742 | hci_unregister_notifier(&hci_sock_nblock); | 742 | hci_unregister_notifier(&hci_sock_nblock); |
743 | 743 | ||
744 | proto_unregister(&hci_sk_proto); | 744 | proto_unregister(&hci_sk_proto); |
745 | |||
746 | return 0; | ||
747 | } | 745 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 09cb3a74de7f..2654c147c004 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1621,7 +1621,7 @@ static void sock_def_readable(struct sock *sk, int len) | |||
1621 | { | 1621 | { |
1622 | read_lock(&sk->sk_callback_lock); | 1622 | read_lock(&sk->sk_callback_lock); |
1623 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1623 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
1624 | wake_up_interruptible(sk->sk_sleep); | 1624 | wake_up_interruptible_sync(sk->sk_sleep); |
1625 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | 1625 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
1626 | read_unlock(&sk->sk_callback_lock); | 1626 | read_unlock(&sk->sk_callback_lock); |
1627 | } | 1627 | } |
@@ -1635,7 +1635,7 @@ static void sock_def_write_space(struct sock *sk) | |||
1635 | */ | 1635 | */ |
1636 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | 1636 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
1637 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1637 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
1638 | wake_up_interruptible(sk->sk_sleep); | 1638 | wake_up_interruptible_sync(sk->sk_sleep); |
1639 | 1639 | ||
1640 | /* Should agree with poll, otherwise some programs break */ | 1640 | /* Should agree with poll, otherwise some programs break */ |
1641 | if (sock_writeable(sk)) | 1641 | if (sock_writeable(sk)) |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ed750f9ceb07..01578f544ad6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1035,6 +1035,13 @@ static void tcp_cwnd_validate(struct sock *sk) | |||
1035 | * introducing MSS oddities to segment boundaries. In rare cases where | 1035 | * introducing MSS oddities to segment boundaries. In rare cases where |
1036 | * mss_now != mss_cache, we will request caller to create a small skb | 1036 | * mss_now != mss_cache, we will request caller to create a small skb |
1037 | * per input skb which could be mostly avoided here (if desired). | 1037 | * per input skb which could be mostly avoided here (if desired). |
1038 | * | ||
1039 | * We explicitly want to create a request for splitting write queue tail | ||
1040 | * to a small skb for Nagle purposes while avoiding unnecessary modulos, | ||
1041 | * thus all the complexity (cwnd_len is always MSS multiple which we | ||
1042 | * return whenever allowed by the other factors). Basically we need the | ||
1043 | * modulo only when the receiver window alone is the limiting factor or | ||
1044 | * when we would be allowed to send the split-due-to-Nagle skb fully. | ||
1038 | */ | 1045 | */ |
1039 | static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, | 1046 | static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, |
1040 | unsigned int mss_now, unsigned int cwnd) | 1047 | unsigned int mss_now, unsigned int cwnd) |
@@ -1048,10 +1055,11 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, | |||
1048 | if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) | 1055 | if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) |
1049 | return cwnd_len; | 1056 | return cwnd_len; |
1050 | 1057 | ||
1051 | if (skb == tcp_write_queue_tail(sk) && cwnd_len <= skb->len) | 1058 | needed = min(skb->len, window); |
1059 | |||
1060 | if (skb == tcp_write_queue_tail(sk) && cwnd_len <= needed) | ||
1052 | return cwnd_len; | 1061 | return cwnd_len; |
1053 | 1062 | ||
1054 | needed = min(skb->len, window); | ||
1055 | return needed - needed % mss_now; | 1063 | return needed - needed % mss_now; |
1056 | } | 1064 | } |
1057 | 1065 | ||
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index e06bf0028bb1..684ec9c1ad38 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -381,7 +381,7 @@ int nf_ct_expect_related(struct nf_conntrack_expect *expect) | |||
381 | if (nf_ct_expect_count >= nf_ct_expect_max) { | 381 | if (nf_ct_expect_count >= nf_ct_expect_max) { |
382 | if (net_ratelimit()) | 382 | if (net_ratelimit()) |
383 | printk(KERN_WARNING | 383 | printk(KERN_WARNING |
384 | "nf_conntrack: expectation table full"); | 384 | "nf_conntrack: expectation table full\n"); |
385 | ret = -EMFILE; | 385 | ret = -EMFILE; |
386 | goto out; | 386 | goto out; |
387 | } | 387 | } |
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 8b9be1e978cd..2bd9963b5b3e 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c | |||
@@ -19,14 +19,6 @@ | |||
19 | static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM]; | 19 | static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM]; |
20 | static DEFINE_MUTEX(nf_ct_ext_type_mutex); | 20 | static DEFINE_MUTEX(nf_ct_ext_type_mutex); |
21 | 21 | ||
22 | /* Horrible trick to figure out smallest amount worth kmallocing. */ | ||
23 | #define CACHE(x) (x) + 0 * | ||
24 | enum { | ||
25 | NF_CT_EXT_MIN_SIZE = | ||
26 | #include <linux/kmalloc_sizes.h> | ||
27 | 1 }; | ||
28 | #undef CACHE | ||
29 | |||
30 | void __nf_ct_ext_destroy(struct nf_conn *ct) | 22 | void __nf_ct_ext_destroy(struct nf_conn *ct) |
31 | { | 23 | { |
32 | unsigned int i; | 24 | unsigned int i; |
@@ -53,7 +45,7 @@ EXPORT_SYMBOL(__nf_ct_ext_destroy); | |||
53 | static void * | 45 | static void * |
54 | nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) | 46 | nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) |
55 | { | 47 | { |
56 | unsigned int off, len, real_len; | 48 | unsigned int off, len; |
57 | struct nf_ct_ext_type *t; | 49 | struct nf_ct_ext_type *t; |
58 | 50 | ||
59 | rcu_read_lock(); | 51 | rcu_read_lock(); |
@@ -61,16 +53,14 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) | |||
61 | BUG_ON(t == NULL); | 53 | BUG_ON(t == NULL); |
62 | off = ALIGN(sizeof(struct nf_ct_ext), t->align); | 54 | off = ALIGN(sizeof(struct nf_ct_ext), t->align); |
63 | len = off + t->len; | 55 | len = off + t->len; |
64 | real_len = t->alloc_size; | ||
65 | rcu_read_unlock(); | 56 | rcu_read_unlock(); |
66 | 57 | ||
67 | *ext = kzalloc(real_len, gfp); | 58 | *ext = kzalloc(t->alloc_size, gfp); |
68 | if (!*ext) | 59 | if (!*ext) |
69 | return NULL; | 60 | return NULL; |
70 | 61 | ||
71 | (*ext)->offset[id] = off; | 62 | (*ext)->offset[id] = off; |
72 | (*ext)->len = len; | 63 | (*ext)->len = len; |
73 | (*ext)->real_len = real_len; | ||
74 | 64 | ||
75 | return (void *)(*ext) + off; | 65 | return (void *)(*ext) + off; |
76 | } | 66 | } |
@@ -95,7 +85,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) | |||
95 | newlen = newoff + t->len; | 85 | newlen = newoff + t->len; |
96 | rcu_read_unlock(); | 86 | rcu_read_unlock(); |
97 | 87 | ||
98 | if (newlen >= ct->ext->real_len) { | 88 | if (newlen >= ksize(ct->ext)) { |
99 | new = kmalloc(newlen, gfp); | 89 | new = kmalloc(newlen, gfp); |
100 | if (!new) | 90 | if (!new) |
101 | return NULL; | 91 | return NULL; |
@@ -114,7 +104,6 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) | |||
114 | rcu_read_unlock(); | 104 | rcu_read_unlock(); |
115 | } | 105 | } |
116 | kfree(ct->ext); | 106 | kfree(ct->ext); |
117 | new->real_len = newlen; | ||
118 | ct->ext = new; | 107 | ct->ext = new; |
119 | } | 108 | } |
120 | 109 | ||
@@ -156,8 +145,6 @@ static void update_alloc_size(struct nf_ct_ext_type *type) | |||
156 | t1->alloc_size = ALIGN(t1->alloc_size, t2->align) | 145 | t1->alloc_size = ALIGN(t1->alloc_size, t2->align) |
157 | + t2->len; | 146 | + t2->len; |
158 | } | 147 | } |
159 | if (t1->alloc_size < NF_CT_EXT_MIN_SIZE) | ||
160 | t1->alloc_size = NF_CT_EXT_MIN_SIZE; | ||
161 | } | 148 | } |
162 | } | 149 | } |
163 | 150 | ||
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index bfc2928c1912..ddc80ea114cd 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -51,7 +51,7 @@ int nf_unregister_queue_handler(int pf, const struct nf_queue_handler *qh) | |||
51 | return -EINVAL; | 51 | return -EINVAL; |
52 | 52 | ||
53 | mutex_lock(&queue_handler_mutex); | 53 | mutex_lock(&queue_handler_mutex); |
54 | if (queue_handler[pf] != qh) { | 54 | if (queue_handler[pf] && queue_handler[pf] != qh) { |
55 | mutex_unlock(&queue_handler_mutex); | 55 | mutex_unlock(&queue_handler_mutex); |
56 | return -EINVAL; | 56 | return -EINVAL; |
57 | } | 57 | } |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 7efa40d47393..bf3f19b21fe4 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -556,7 +556,7 @@ nfulnl_log_packet(unsigned int pf, | |||
556 | /* FIXME: do we want to make the size calculation conditional based on | 556 | /* FIXME: do we want to make the size calculation conditional based on |
557 | * what is actually present? way more branches and checks, but more | 557 | * what is actually present? way more branches and checks, but more |
558 | * memory efficient... */ | 558 | * memory efficient... */ |
559 | size = NLMSG_ALIGN(sizeof(struct nfgenmsg)) | 559 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) |
560 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) | 560 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) |
561 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 561 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
562 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 562 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
@@ -702,20 +702,30 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
702 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 702 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
703 | u_int16_t group_num = ntohs(nfmsg->res_id); | 703 | u_int16_t group_num = ntohs(nfmsg->res_id); |
704 | struct nfulnl_instance *inst; | 704 | struct nfulnl_instance *inst; |
705 | struct nfulnl_msg_config_cmd *cmd = NULL; | ||
705 | int ret = 0; | 706 | int ret = 0; |
706 | 707 | ||
708 | if (nfula[NFULA_CFG_CMD]) { | ||
709 | u_int8_t pf = nfmsg->nfgen_family; | ||
710 | cmd = nla_data(nfula[NFULA_CFG_CMD]); | ||
711 | |||
712 | /* Commands without queue context */ | ||
713 | switch (cmd->command) { | ||
714 | case NFULNL_CFG_CMD_PF_BIND: | ||
715 | return nf_log_register(pf, &nfulnl_logger); | ||
716 | case NFULNL_CFG_CMD_PF_UNBIND: | ||
717 | nf_log_unregister_pf(pf); | ||
718 | return 0; | ||
719 | } | ||
720 | } | ||
721 | |||
707 | inst = instance_lookup_get(group_num); | 722 | inst = instance_lookup_get(group_num); |
708 | if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { | 723 | if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { |
709 | ret = -EPERM; | 724 | ret = -EPERM; |
710 | goto out_put; | 725 | goto out_put; |
711 | } | 726 | } |
712 | 727 | ||
713 | if (nfula[NFULA_CFG_CMD]) { | 728 | if (cmd != NULL) { |
714 | u_int8_t pf = nfmsg->nfgen_family; | ||
715 | struct nfulnl_msg_config_cmd *cmd; | ||
716 | |||
717 | cmd = nla_data(nfula[NFULA_CFG_CMD]); | ||
718 | |||
719 | switch (cmd->command) { | 729 | switch (cmd->command) { |
720 | case NFULNL_CFG_CMD_BIND: | 730 | case NFULNL_CFG_CMD_BIND: |
721 | if (inst) { | 731 | if (inst) { |
@@ -738,14 +748,6 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
738 | 748 | ||
739 | instance_destroy(inst); | 749 | instance_destroy(inst); |
740 | goto out; | 750 | goto out; |
741 | case NFULNL_CFG_CMD_PF_BIND: | ||
742 | ret = nf_log_register(pf, &nfulnl_logger); | ||
743 | break; | ||
744 | case NFULNL_CFG_CMD_PF_UNBIND: | ||
745 | /* This is a bug and a feature. We cannot unregister | ||
746 | * other handlers, like nfnetlink_inst can */ | ||
747 | nf_log_unregister_pf(pf); | ||
748 | break; | ||
749 | default: | 751 | default: |
750 | ret = -ENOTSUPP; | 752 | ret = -ENOTSUPP; |
751 | break; | 753 | break; |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 0043d3a9f87e..012cb6910820 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -224,7 +224,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
224 | struct net_device *indev; | 224 | struct net_device *indev; |
225 | struct net_device *outdev; | 225 | struct net_device *outdev; |
226 | 226 | ||
227 | size = NLMSG_ALIGN(sizeof(struct nfgenmsg)) | 227 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) |
228 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) | 228 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) |
229 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 229 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
230 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ | 230 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
@@ -703,19 +703,12 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
703 | /* Commands without queue context - might sleep */ | 703 | /* Commands without queue context - might sleep */ |
704 | switch (cmd->command) { | 704 | switch (cmd->command) { |
705 | case NFQNL_CFG_CMD_PF_BIND: | 705 | case NFQNL_CFG_CMD_PF_BIND: |
706 | ret = nf_register_queue_handler(ntohs(cmd->pf), | 706 | return nf_register_queue_handler(ntohs(cmd->pf), |
707 | &nfqh); | 707 | &nfqh); |
708 | break; | ||
709 | case NFQNL_CFG_CMD_PF_UNBIND: | 708 | case NFQNL_CFG_CMD_PF_UNBIND: |
710 | ret = nf_unregister_queue_handler(ntohs(cmd->pf), | 709 | return nf_unregister_queue_handler(ntohs(cmd->pf), |
711 | &nfqh); | 710 | &nfqh); |
712 | break; | ||
713 | default: | ||
714 | break; | ||
715 | } | 711 | } |
716 | |||
717 | if (ret < 0) | ||
718 | return ret; | ||
719 | } | 712 | } |
720 | 713 | ||
721 | rcu_read_lock(); | 714 | rcu_read_lock(); |
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index e9a8794bc3ab..9fa2e0824708 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c | |||
@@ -95,8 +95,11 @@ static inline void localtime_2(struct xtm *r, time_t time) | |||
95 | */ | 95 | */ |
96 | r->dse = time / 86400; | 96 | r->dse = time / 86400; |
97 | 97 | ||
98 | /* 1970-01-01 (w=0) was a Thursday (4). */ | 98 | /* |
99 | r->weekday = (4 + r->dse) % 7; | 99 | * 1970-01-01 (w=0) was a Thursday (4). |
100 | * -1 and +1 map Sunday properly onto 7. | ||
101 | */ | ||
102 | r->weekday = (4 + r->dse - 1) % 7 + 1; | ||
100 | } | 103 | } |
101 | 104 | ||
102 | static void localtime_3(struct xtm *r, time_t time) | 105 | static void localtime_3(struct xtm *r, time_t time) |
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index f19121d4795b..a39bf97f8830 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c | |||
@@ -143,7 +143,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
143 | /* copy the peer address and timestamp */ | 143 | /* copy the peer address and timestamp */ |
144 | if (!continue_call) { | 144 | if (!continue_call) { |
145 | if (msg->msg_name && msg->msg_namelen > 0) | 145 | if (msg->msg_name && msg->msg_namelen > 0) |
146 | memcpy(&msg->msg_name, &call->conn->trans->peer->srx, | 146 | memcpy(msg->msg_name, |
147 | &call->conn->trans->peer->srx, | ||
147 | sizeof(call->conn->trans->peer->srx)); | 148 | sizeof(call->conn->trans->peer->srx)); |
148 | sock_recv_timestamp(msg, &rx->sk, skb); | 149 | sock_recv_timestamp(msg, &rx->sk, skb); |
149 | } | 150 | } |
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index a27511ebc4cb..ceefda025e2d 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -209,6 +209,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, | |||
209 | int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) | 209 | int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) |
210 | { | 210 | { |
211 | struct sctp_sockaddr_entry *addr, *temp; | 211 | struct sctp_sockaddr_entry *addr, *temp; |
212 | int found = 0; | ||
212 | 213 | ||
213 | /* We hold the socket lock when calling this function, | 214 | /* We hold the socket lock when calling this function, |
214 | * and that acts as a writer synchronizing lock. | 215 | * and that acts as a writer synchronizing lock. |
@@ -216,13 +217,14 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) | |||
216 | list_for_each_entry_safe(addr, temp, &bp->address_list, list) { | 217 | list_for_each_entry_safe(addr, temp, &bp->address_list, list) { |
217 | if (sctp_cmp_addr_exact(&addr->a, del_addr)) { | 218 | if (sctp_cmp_addr_exact(&addr->a, del_addr)) { |
218 | /* Found the exact match. */ | 219 | /* Found the exact match. */ |
220 | found = 1; | ||
219 | addr->valid = 0; | 221 | addr->valid = 0; |
220 | list_del_rcu(&addr->list); | 222 | list_del_rcu(&addr->list); |
221 | break; | 223 | break; |
222 | } | 224 | } |
223 | } | 225 | } |
224 | 226 | ||
225 | if (addr && !addr->valid) { | 227 | if (found) { |
226 | call_rcu(&addr->rcu, sctp_local_addr_free); | 228 | call_rcu(&addr->rcu, sctp_local_addr_free); |
227 | SCTP_DBG_OBJCNT_DEC(addr); | 229 | SCTP_DBG_OBJCNT_DEC(addr); |
228 | return 0; | 230 | return 0; |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 87f940587d5f..9aa0733aee87 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -89,6 +89,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | |||
89 | struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; | 89 | struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; |
90 | struct sctp_sockaddr_entry *addr = NULL; | 90 | struct sctp_sockaddr_entry *addr = NULL; |
91 | struct sctp_sockaddr_entry *temp; | 91 | struct sctp_sockaddr_entry *temp; |
92 | int found = 0; | ||
92 | 93 | ||
93 | switch (ev) { | 94 | switch (ev) { |
94 | case NETDEV_UP: | 95 | case NETDEV_UP: |
@@ -111,13 +112,14 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | |||
111 | &sctp_local_addr_list, list) { | 112 | &sctp_local_addr_list, list) { |
112 | if (ipv6_addr_equal(&addr->a.v6.sin6_addr, | 113 | if (ipv6_addr_equal(&addr->a.v6.sin6_addr, |
113 | &ifa->addr)) { | 114 | &ifa->addr)) { |
115 | found = 1; | ||
114 | addr->valid = 0; | 116 | addr->valid = 0; |
115 | list_del_rcu(&addr->list); | 117 | list_del_rcu(&addr->list); |
116 | break; | 118 | break; |
117 | } | 119 | } |
118 | } | 120 | } |
119 | spin_unlock_bh(&sctp_local_addr_lock); | 121 | spin_unlock_bh(&sctp_local_addr_lock); |
120 | if (addr && !addr->valid) | 122 | if (found) |
121 | call_rcu(&addr->rcu, sctp_local_addr_free); | 123 | call_rcu(&addr->rcu, sctp_local_addr_free); |
122 | break; | 124 | break; |
123 | } | 125 | } |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 688546dccd82..ad0a4069b95b 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -628,6 +628,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
628 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; | 628 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
629 | struct sctp_sockaddr_entry *addr = NULL; | 629 | struct sctp_sockaddr_entry *addr = NULL; |
630 | struct sctp_sockaddr_entry *temp; | 630 | struct sctp_sockaddr_entry *temp; |
631 | int found = 0; | ||
631 | 632 | ||
632 | switch (ev) { | 633 | switch (ev) { |
633 | case NETDEV_UP: | 634 | case NETDEV_UP: |
@@ -647,13 +648,14 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
647 | list_for_each_entry_safe(addr, temp, | 648 | list_for_each_entry_safe(addr, temp, |
648 | &sctp_local_addr_list, list) { | 649 | &sctp_local_addr_list, list) { |
649 | if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { | 650 | if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { |
651 | found = 1; | ||
650 | addr->valid = 0; | 652 | addr->valid = 0; |
651 | list_del_rcu(&addr->list); | 653 | list_del_rcu(&addr->list); |
652 | break; | 654 | break; |
653 | } | 655 | } |
654 | } | 656 | } |
655 | spin_unlock_bh(&sctp_local_addr_lock); | 657 | spin_unlock_bh(&sctp_local_addr_lock); |
656 | if (addr && !addr->valid) | 658 | if (found) |
657 | call_rcu(&addr->rcu, sctp_local_addr_free); | 659 | call_rcu(&addr->rcu, sctp_local_addr_free); |
658 | break; | 660 | break; |
659 | } | 661 | } |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e45be4e3f80d..578630e8e00d 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2375,6 +2375,14 @@ static int sctp_process_param(struct sctp_association *asoc, | |||
2375 | asoc->peer.ipv4_address = 0; | 2375 | asoc->peer.ipv4_address = 0; |
2376 | asoc->peer.ipv6_address = 0; | 2376 | asoc->peer.ipv6_address = 0; |
2377 | 2377 | ||
2378 | /* Assume that peer supports the address family | ||
2379 | * by which it sends a packet. | ||
2380 | */ | ||
2381 | if (peer_addr->sa.sa_family == AF_INET6) | ||
2382 | asoc->peer.ipv6_address = 1; | ||
2383 | else if (peer_addr->sa.sa_family == AF_INET) | ||
2384 | asoc->peer.ipv4_address = 1; | ||
2385 | |||
2378 | /* Cycle through address types; avoid divide by 0. */ | 2386 | /* Cycle through address types; avoid divide by 0. */ |
2379 | sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); | 2387 | sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); |
2380 | if (sat) | 2388 | if (sat) |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 939892691a26..d994d822900d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2933,17 +2933,39 @@ static int sctp_setsockopt_maxburst(struct sock *sk, | |||
2933 | char __user *optval, | 2933 | char __user *optval, |
2934 | int optlen) | 2934 | int optlen) |
2935 | { | 2935 | { |
2936 | struct sctp_assoc_value params; | ||
2937 | struct sctp_sock *sp; | ||
2938 | struct sctp_association *asoc; | ||
2936 | int val; | 2939 | int val; |
2940 | int assoc_id = 0; | ||
2937 | 2941 | ||
2938 | if (optlen != sizeof(int)) | 2942 | if (optlen < sizeof(int)) |
2939 | return -EINVAL; | 2943 | return -EINVAL; |
2940 | if (get_user(val, (int __user *)optval)) | ||
2941 | return -EFAULT; | ||
2942 | 2944 | ||
2943 | if (val < 0) | 2945 | if (optlen == sizeof(int)) { |
2946 | printk(KERN_WARNING | ||
2947 | "SCTP: Use of int in max_burst socket option deprecated\n"); | ||
2948 | printk(KERN_WARNING | ||
2949 | "SCTP: Use struct sctp_assoc_value instead\n"); | ||
2950 | if (copy_from_user(&val, optval, optlen)) | ||
2951 | return -EFAULT; | ||
2952 | } else if (optlen == sizeof(struct sctp_assoc_value)) { | ||
2953 | if (copy_from_user(¶ms, optval, optlen)) | ||
2954 | return -EFAULT; | ||
2955 | val = params.assoc_value; | ||
2956 | assoc_id = params.assoc_id; | ||
2957 | } else | ||
2944 | return -EINVAL; | 2958 | return -EINVAL; |
2945 | 2959 | ||
2946 | sctp_sk(sk)->max_burst = val; | 2960 | sp = sctp_sk(sk); |
2961 | |||
2962 | if (assoc_id != 0) { | ||
2963 | asoc = sctp_id2assoc(sk, assoc_id); | ||
2964 | if (!asoc) | ||
2965 | return -EINVAL; | ||
2966 | asoc->max_burst = val; | ||
2967 | } else | ||
2968 | sp->max_burst = val; | ||
2947 | 2969 | ||
2948 | return 0; | 2970 | return 0; |
2949 | } | 2971 | } |
@@ -5005,20 +5027,45 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, | |||
5005 | char __user *optval, | 5027 | char __user *optval, |
5006 | int __user *optlen) | 5028 | int __user *optlen) |
5007 | { | 5029 | { |
5008 | int val; | 5030 | struct sctp_assoc_value params; |
5031 | struct sctp_sock *sp; | ||
5032 | struct sctp_association *asoc; | ||
5009 | 5033 | ||
5010 | if (len < sizeof(int)) | 5034 | if (len < sizeof(int)) |
5011 | return -EINVAL; | 5035 | return -EINVAL; |
5012 | 5036 | ||
5013 | len = sizeof(int); | 5037 | if (len == sizeof(int)) { |
5038 | printk(KERN_WARNING | ||
5039 | "SCTP: Use of int in max_burst socket option deprecated\n"); | ||
5040 | printk(KERN_WARNING | ||
5041 | "SCTP: Use struct sctp_assoc_value instead\n"); | ||
5042 | params.assoc_id = 0; | ||
5043 | } else if (len == sizeof (struct sctp_assoc_value)) { | ||
5044 | if (copy_from_user(¶ms, optval, len)) | ||
5045 | return -EFAULT; | ||
5046 | } else | ||
5047 | return -EINVAL; | ||
5014 | 5048 | ||
5015 | val = sctp_sk(sk)->max_burst; | 5049 | sp = sctp_sk(sk); |
5016 | if (put_user(len, optlen)) | 5050 | |
5017 | return -EFAULT; | 5051 | if (params.assoc_id != 0) { |
5018 | if (copy_to_user(optval, &val, len)) | 5052 | asoc = sctp_id2assoc(sk, params.assoc_id); |
5019 | return -EFAULT; | 5053 | if (!asoc) |
5054 | return -EINVAL; | ||
5055 | params.assoc_value = asoc->max_burst; | ||
5056 | } else | ||
5057 | params.assoc_value = sp->max_burst; | ||
5058 | |||
5059 | if (len == sizeof(int)) { | ||
5060 | if (copy_to_user(optval, ¶ms.assoc_value, len)) | ||
5061 | return -EFAULT; | ||
5062 | } else { | ||
5063 | if (copy_to_user(optval, ¶ms, len)) | ||
5064 | return -EFAULT; | ||
5065 | } | ||
5066 | |||
5067 | return 0; | ||
5020 | 5068 | ||
5021 | return -ENOTSUPP; | ||
5022 | } | 5069 | } |
5023 | 5070 | ||
5024 | static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, | 5071 | static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 0598b229c11d..981f190c1b39 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -156,7 +156,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
156 | struct svc_rdma_op_ctxt *ctxt; | 156 | struct svc_rdma_op_ctxt *ctxt; |
157 | int ret = 0; | 157 | int ret = 0; |
158 | 158 | ||
159 | BUG_ON(sge_count >= 32); | 159 | BUG_ON(sge_count > RPCSVC_MAXPAGES); |
160 | dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " | 160 | dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " |
161 | "write_len=%d, xdr_sge=%p, sge_count=%d\n", | 161 | "write_len=%d, xdr_sge=%p, sge_count=%d\n", |
162 | rmr, (unsigned long long)to, xdr_off, | 162 | rmr, (unsigned long long)to, xdr_off, |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index f09444c451bc..16fd3f6718ff 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -54,7 +54,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |||
54 | int flags); | 54 | int flags); |
55 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); | 55 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); |
56 | static void svc_rdma_release_rqst(struct svc_rqst *); | 56 | static void svc_rdma_release_rqst(struct svc_rqst *); |
57 | static void rdma_destroy_xprt(struct svcxprt_rdma *xprt); | ||
58 | static void dto_tasklet_func(unsigned long data); | 57 | static void dto_tasklet_func(unsigned long data); |
59 | static void svc_rdma_detach(struct svc_xprt *xprt); | 58 | static void svc_rdma_detach(struct svc_xprt *xprt); |
60 | static void svc_rdma_free(struct svc_xprt *xprt); | 59 | static void svc_rdma_free(struct svc_xprt *xprt); |
@@ -247,6 +246,7 @@ static void dto_tasklet_func(unsigned long data) | |||
247 | sq_cq_reap(xprt); | 246 | sq_cq_reap(xprt); |
248 | } | 247 | } |
249 | 248 | ||
249 | svc_xprt_put(&xprt->sc_xprt); | ||
250 | spin_lock_irqsave(&dto_lock, flags); | 250 | spin_lock_irqsave(&dto_lock, flags); |
251 | } | 251 | } |
252 | spin_unlock_irqrestore(&dto_lock, flags); | 252 | spin_unlock_irqrestore(&dto_lock, flags); |
@@ -275,8 +275,10 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context) | |||
275 | * add it | 275 | * add it |
276 | */ | 276 | */ |
277 | spin_lock_irqsave(&dto_lock, flags); | 277 | spin_lock_irqsave(&dto_lock, flags); |
278 | if (list_empty(&xprt->sc_dto_q)) | 278 | if (list_empty(&xprt->sc_dto_q)) { |
279 | svc_xprt_get(&xprt->sc_xprt); | ||
279 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); | 280 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
281 | } | ||
280 | spin_unlock_irqrestore(&dto_lock, flags); | 282 | spin_unlock_irqrestore(&dto_lock, flags); |
281 | 283 | ||
282 | /* Tasklet does all the work to avoid irqsave locks. */ | 284 | /* Tasklet does all the work to avoid irqsave locks. */ |
@@ -386,8 +388,10 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context) | |||
386 | * add it | 388 | * add it |
387 | */ | 389 | */ |
388 | spin_lock_irqsave(&dto_lock, flags); | 390 | spin_lock_irqsave(&dto_lock, flags); |
389 | if (list_empty(&xprt->sc_dto_q)) | 391 | if (list_empty(&xprt->sc_dto_q)) { |
392 | svc_xprt_get(&xprt->sc_xprt); | ||
390 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); | 393 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
394 | } | ||
391 | spin_unlock_irqrestore(&dto_lock, flags); | 395 | spin_unlock_irqrestore(&dto_lock, flags); |
392 | 396 | ||
393 | /* Tasklet does all the work to avoid irqsave locks. */ | 397 | /* Tasklet does all the work to avoid irqsave locks. */ |
@@ -611,6 +615,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id, | |||
611 | switch (event->event) { | 615 | switch (event->event) { |
612 | case RDMA_CM_EVENT_ESTABLISHED: | 616 | case RDMA_CM_EVENT_ESTABLISHED: |
613 | /* Accept complete */ | 617 | /* Accept complete */ |
618 | svc_xprt_get(xprt); | ||
614 | dprintk("svcrdma: Connection completed on DTO xprt=%p, " | 619 | dprintk("svcrdma: Connection completed on DTO xprt=%p, " |
615 | "cm_id=%p\n", xprt, cma_id); | 620 | "cm_id=%p\n", xprt, cma_id); |
616 | clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); | 621 | clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); |
@@ -661,15 +666,15 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |||
661 | 666 | ||
662 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); | 667 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); |
663 | if (IS_ERR(listen_id)) { | 668 | if (IS_ERR(listen_id)) { |
664 | rdma_destroy_xprt(cma_xprt); | 669 | svc_xprt_put(&cma_xprt->sc_xprt); |
665 | dprintk("svcrdma: rdma_create_id failed = %ld\n", | 670 | dprintk("svcrdma: rdma_create_id failed = %ld\n", |
666 | PTR_ERR(listen_id)); | 671 | PTR_ERR(listen_id)); |
667 | return (void *)listen_id; | 672 | return (void *)listen_id; |
668 | } | 673 | } |
669 | ret = rdma_bind_addr(listen_id, sa); | 674 | ret = rdma_bind_addr(listen_id, sa); |
670 | if (ret) { | 675 | if (ret) { |
671 | rdma_destroy_xprt(cma_xprt); | ||
672 | rdma_destroy_id(listen_id); | 676 | rdma_destroy_id(listen_id); |
677 | svc_xprt_put(&cma_xprt->sc_xprt); | ||
673 | dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); | 678 | dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); |
674 | return ERR_PTR(ret); | 679 | return ERR_PTR(ret); |
675 | } | 680 | } |
@@ -678,8 +683,9 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |||
678 | ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); | 683 | ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); |
679 | if (ret) { | 684 | if (ret) { |
680 | rdma_destroy_id(listen_id); | 685 | rdma_destroy_id(listen_id); |
681 | rdma_destroy_xprt(cma_xprt); | 686 | svc_xprt_put(&cma_xprt->sc_xprt); |
682 | dprintk("svcrdma: rdma_listen failed = %d\n", ret); | 687 | dprintk("svcrdma: rdma_listen failed = %d\n", ret); |
688 | return ERR_PTR(ret); | ||
683 | } | 689 | } |
684 | 690 | ||
685 | /* | 691 | /* |
@@ -820,6 +826,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
820 | newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; | 826 | newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; |
821 | newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; | 827 | newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; |
822 | } | 828 | } |
829 | svc_xprt_get(&newxprt->sc_xprt); | ||
823 | newxprt->sc_qp = newxprt->sc_cm_id->qp; | 830 | newxprt->sc_qp = newxprt->sc_cm_id->qp; |
824 | 831 | ||
825 | /* Register all of physical memory */ | 832 | /* Register all of physical memory */ |
@@ -891,8 +898,15 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
891 | 898 | ||
892 | errout: | 899 | errout: |
893 | dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); | 900 | dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); |
901 | /* Take a reference in case the DTO handler runs */ | ||
902 | svc_xprt_get(&newxprt->sc_xprt); | ||
903 | if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) { | ||
904 | ib_destroy_qp(newxprt->sc_qp); | ||
905 | svc_xprt_put(&newxprt->sc_xprt); | ||
906 | } | ||
894 | rdma_destroy_id(newxprt->sc_cm_id); | 907 | rdma_destroy_id(newxprt->sc_cm_id); |
895 | rdma_destroy_xprt(newxprt); | 908 | /* This call to put will destroy the transport */ |
909 | svc_xprt_put(&newxprt->sc_xprt); | ||
896 | return NULL; | 910 | return NULL; |
897 | } | 911 | } |
898 | 912 | ||
@@ -919,54 +933,60 @@ static void svc_rdma_release_rqst(struct svc_rqst *rqstp) | |||
919 | rqstp->rq_xprt_ctxt = NULL; | 933 | rqstp->rq_xprt_ctxt = NULL; |
920 | } | 934 | } |
921 | 935 | ||
922 | /* Disable data ready events for this connection */ | 936 | /* |
937 | * When connected, an svc_xprt has at least three references: | ||
938 | * | ||
939 | * - A reference held by the QP. We still hold that here because this | ||
940 | * code deletes the QP and puts the reference. | ||
941 | * | ||
942 | * - A reference held by the cm_id between the ESTABLISHED and | ||
943 | * DISCONNECTED events. If the remote peer disconnected first, this | ||
944 | * reference could be gone. | ||
945 | * | ||
946 | * - A reference held by the svc_recv code that called this function | ||
947 | * as part of close processing. | ||
948 | * | ||
949 | * At a minimum two references should still be held. | ||
950 | */ | ||
923 | static void svc_rdma_detach(struct svc_xprt *xprt) | 951 | static void svc_rdma_detach(struct svc_xprt *xprt) |
924 | { | 952 | { |
925 | struct svcxprt_rdma *rdma = | 953 | struct svcxprt_rdma *rdma = |
926 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | 954 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
927 | unsigned long flags; | ||
928 | |||
929 | dprintk("svc: svc_rdma_detach(%p)\n", xprt); | 955 | dprintk("svc: svc_rdma_detach(%p)\n", xprt); |
930 | /* | 956 | |
931 | * Shutdown the connection. This will ensure we don't get any | 957 | /* Disconnect and flush posted WQE */ |
932 | * more events from the provider. | ||
933 | */ | ||
934 | rdma_disconnect(rdma->sc_cm_id); | 958 | rdma_disconnect(rdma->sc_cm_id); |
935 | rdma_destroy_id(rdma->sc_cm_id); | ||
936 | 959 | ||
937 | /* We may already be on the DTO list */ | 960 | /* Destroy the QP if present (not a listener) */ |
938 | spin_lock_irqsave(&dto_lock, flags); | 961 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) { |
939 | if (!list_empty(&rdma->sc_dto_q)) | 962 | ib_destroy_qp(rdma->sc_qp); |
940 | list_del_init(&rdma->sc_dto_q); | 963 | svc_xprt_put(xprt); |
941 | spin_unlock_irqrestore(&dto_lock, flags); | 964 | } |
965 | |||
966 | /* Destroy the CM ID */ | ||
967 | rdma_destroy_id(rdma->sc_cm_id); | ||
942 | } | 968 | } |
943 | 969 | ||
944 | static void svc_rdma_free(struct svc_xprt *xprt) | 970 | static void svc_rdma_free(struct svc_xprt *xprt) |
945 | { | 971 | { |
946 | struct svcxprt_rdma *rdma = (struct svcxprt_rdma *)xprt; | 972 | struct svcxprt_rdma *rdma = (struct svcxprt_rdma *)xprt; |
947 | dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); | 973 | dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); |
948 | rdma_destroy_xprt(rdma); | 974 | /* We should only be called from kref_put */ |
949 | kfree(rdma); | 975 | BUG_ON(atomic_read(&xprt->xpt_ref.refcount) != 0); |
950 | } | 976 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) |
951 | 977 | ib_destroy_cq(rdma->sc_sq_cq); | |
952 | static void rdma_destroy_xprt(struct svcxprt_rdma *xprt) | ||
953 | { | ||
954 | if (xprt->sc_qp && !IS_ERR(xprt->sc_qp)) | ||
955 | ib_destroy_qp(xprt->sc_qp); | ||
956 | |||
957 | if (xprt->sc_sq_cq && !IS_ERR(xprt->sc_sq_cq)) | ||
958 | ib_destroy_cq(xprt->sc_sq_cq); | ||
959 | 978 | ||
960 | if (xprt->sc_rq_cq && !IS_ERR(xprt->sc_rq_cq)) | 979 | if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) |
961 | ib_destroy_cq(xprt->sc_rq_cq); | 980 | ib_destroy_cq(rdma->sc_rq_cq); |
962 | 981 | ||
963 | if (xprt->sc_phys_mr && !IS_ERR(xprt->sc_phys_mr)) | 982 | if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) |
964 | ib_dereg_mr(xprt->sc_phys_mr); | 983 | ib_dereg_mr(rdma->sc_phys_mr); |
965 | 984 | ||
966 | if (xprt->sc_pd && !IS_ERR(xprt->sc_pd)) | 985 | if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) |
967 | ib_dealloc_pd(xprt->sc_pd); | 986 | ib_dealloc_pd(rdma->sc_pd); |
968 | 987 | ||
969 | destroy_context_cache(xprt->sc_ctxt_head); | 988 | destroy_context_cache(rdma->sc_ctxt_head); |
989 | kfree(rdma); | ||
970 | } | 990 | } |
971 | 991 | ||
972 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) | 992 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) |