diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-12 02:53:38 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-12 02:53:38 -0400 |
commit | bc47ab0241c7c86da4f5e5f82fbca7d45387c18d (patch) | |
tree | b9c33ae8b6de43e44cc5fcbaa3e4a15f18a5ed42 /net | |
parent | 37f9ef553bed630957e025504cdcbc76f5de49d5 (diff) | |
parent | 8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff) |
Merge commit 'origin/master' into next
Manual merge of:
arch/powerpc/kernel/asm-offsets.c
Diffstat (limited to 'net')
-rw-r--r-- | net/bluetooth/hci_sysfs.c | 6 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 2 | ||||
-rw-r--r-- | net/core/net-traces.c | 4 | ||||
-rw-r--r-- | net/core/skbuff.c | 2 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_proto_dccp.c | 4 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_proto_tcp.c | 18 | ||||
-rw-r--r-- | net/netfilter/nfnetlink_log.c | 6 | ||||
-rw-r--r-- | net/netfilter/xt_hashlimit.c | 2 | ||||
-rw-r--r-- | net/sched/cls_api.c | 23 | ||||
-rw-r--r-- | net/sched/cls_cgroup.c | 25 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 35 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 12 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 10 |
13 files changed, 103 insertions, 46 deletions
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 4cc3624bd22d..95f7a7a544b4 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -90,9 +90,6 @@ static void add_conn(struct work_struct *work) | |||
90 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); | 90 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); |
91 | struct hci_dev *hdev = conn->hdev; | 91 | struct hci_dev *hdev = conn->hdev; |
92 | 92 | ||
93 | /* ensure previous del is complete */ | ||
94 | flush_work(&conn->work_del); | ||
95 | |||
96 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); | 93 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); |
97 | 94 | ||
98 | if (device_add(&conn->dev) < 0) { | 95 | if (device_add(&conn->dev) < 0) { |
@@ -118,9 +115,6 @@ static void del_conn(struct work_struct *work) | |||
118 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); | 115 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); |
119 | struct hci_dev *hdev = conn->hdev; | 116 | struct hci_dev *hdev = conn->hdev; |
120 | 117 | ||
121 | /* ensure previous add is complete */ | ||
122 | flush_work(&conn->work_add); | ||
123 | |||
124 | if (!device_is_registered(&conn->dev)) | 118 | if (!device_is_registered(&conn->dev)) |
125 | return; | 119 | return; |
126 | 120 | ||
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 9fd0dc3cca99..b75b6cea49da 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
24 | #include <net/genetlink.h> | 24 | #include <net/genetlink.h> |
25 | 25 | ||
26 | #include <trace/skb.h> | 26 | #include <trace/events/skb.h> |
27 | 27 | ||
28 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
29 | 29 | ||
diff --git a/net/core/net-traces.c b/net/core/net-traces.c index c8fb45665e4f..499a67eaf3ae 100644 --- a/net/core/net-traces.c +++ b/net/core/net-traces.c | |||
@@ -19,11 +19,11 @@ | |||
19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
20 | #include <linux/netlink.h> | 20 | #include <linux/netlink.h> |
21 | #include <linux/net_dropmon.h> | 21 | #include <linux/net_dropmon.h> |
22 | #include <trace/skb.h> | ||
23 | 22 | ||
24 | #include <asm/unaligned.h> | 23 | #include <asm/unaligned.h> |
25 | #include <asm/bitops.h> | 24 | #include <asm/bitops.h> |
26 | 25 | ||
26 | #define CREATE_TRACE_POINTS | ||
27 | #include <trace/events/skb.h> | ||
27 | 28 | ||
28 | DEFINE_TRACE(kfree_skb); | ||
29 | EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb); | 29 | EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e505b5392e1e..c2e4fb8f3546 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -65,7 +65,7 @@ | |||
65 | 65 | ||
66 | #include <asm/uaccess.h> | 66 | #include <asm/uaccess.h> |
67 | #include <asm/system.h> | 67 | #include <asm/system.h> |
68 | #include <trace/skb.h> | 68 | #include <trace/events/skb.h> |
69 | 69 | ||
70 | #include "kmap_skb.h" | 70 | #include "kmap_skb.h" |
71 | 71 | ||
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 8e757dd53396..aee0d6bea309 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/netfilter/nfnetlink_conntrack.h> | 22 | #include <linux/netfilter/nfnetlink_conntrack.h> |
23 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
24 | #include <net/netfilter/nf_conntrack_l4proto.h> | 24 | #include <net/netfilter/nf_conntrack_l4proto.h> |
25 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
25 | #include <net/netfilter/nf_log.h> | 26 | #include <net/netfilter/nf_log.h> |
26 | 27 | ||
27 | static DEFINE_RWLOCK(dccp_lock); | 28 | static DEFINE_RWLOCK(dccp_lock); |
@@ -553,6 +554,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, | |||
553 | ct->proto.dccp.state = new_state; | 554 | ct->proto.dccp.state = new_state; |
554 | write_unlock_bh(&dccp_lock); | 555 | write_unlock_bh(&dccp_lock); |
555 | 556 | ||
557 | if (new_state != old_state) | ||
558 | nf_conntrack_event_cache(IPCT_PROTOINFO, ct); | ||
559 | |||
556 | dn = dccp_pernet(net); | 560 | dn = dccp_pernet(net); |
557 | nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]); | 561 | nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]); |
558 | 562 | ||
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index b5ccf2b4b2e7..97a6e93d742e 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -634,6 +634,14 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
634 | sender->td_end = end; | 634 | sender->td_end = end; |
635 | sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; | 635 | sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; |
636 | } | 636 | } |
637 | if (tcph->ack) { | ||
638 | if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) { | ||
639 | sender->td_maxack = ack; | ||
640 | sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET; | ||
641 | } else if (after(ack, sender->td_maxack)) | ||
642 | sender->td_maxack = ack; | ||
643 | } | ||
644 | |||
637 | /* | 645 | /* |
638 | * Update receiver data. | 646 | * Update receiver data. |
639 | */ | 647 | */ |
@@ -919,6 +927,16 @@ static int tcp_packet(struct nf_conn *ct, | |||
919 | return -NF_ACCEPT; | 927 | return -NF_ACCEPT; |
920 | case TCP_CONNTRACK_CLOSE: | 928 | case TCP_CONNTRACK_CLOSE: |
921 | if (index == TCP_RST_SET | 929 | if (index == TCP_RST_SET |
930 | && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) | ||
931 | && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) { | ||
932 | /* Invalid RST */ | ||
933 | write_unlock_bh(&tcp_lock); | ||
934 | if (LOG_INVALID(net, IPPROTO_TCP)) | ||
935 | nf_log_packet(pf, 0, skb, NULL, NULL, NULL, | ||
936 | "nf_ct_tcp: invalid RST "); | ||
937 | return -NF_ACCEPT; | ||
938 | } | ||
939 | if (index == TCP_RST_SET | ||
922 | && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status) | 940 | && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status) |
923 | && ct->proto.tcp.last_index == TCP_SYN_SET) | 941 | && ct->proto.tcp.last_index == TCP_SYN_SET) |
924 | || (!test_bit(IPS_ASSURED_BIT, &ct->status) | 942 | || (!test_bit(IPS_ASSURED_BIT, &ct->status) |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index fd326ac27ec8..66a6dd5c519a 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -581,6 +581,12 @@ nfulnl_log_packet(u_int8_t pf, | |||
581 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) | 581 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) |
582 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); | 582 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); |
583 | 583 | ||
584 | if (in && skb_mac_header_was_set(skb)) { | ||
585 | size += nla_total_size(skb->dev->hard_header_len) | ||
586 | + nla_total_size(sizeof(u_int16_t)) /* hwtype */ | ||
587 | + nla_total_size(sizeof(u_int16_t)); /* hwlen */ | ||
588 | } | ||
589 | |||
584 | spin_lock_bh(&inst->lock); | 590 | spin_lock_bh(&inst->lock); |
585 | 591 | ||
586 | if (inst->flags & NFULNL_CFG_F_SEQ) | 592 | if (inst->flags & NFULNL_CFG_F_SEQ) |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index a5b5369c30f9..219dcdbe388c 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -926,7 +926,7 @@ static int dl_seq_show(struct seq_file *s, void *v) | |||
926 | if (!hlist_empty(&htable->hash[*bucket])) { | 926 | if (!hlist_empty(&htable->hash[*bucket])) { |
927 | hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) | 927 | hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) |
928 | if (dl_seq_real_show(ent, htable->family, s)) | 928 | if (dl_seq_real_show(ent, htable->family, s)) |
929 | return 1; | 929 | return -1; |
930 | } | 930 | } |
931 | return 0; | 931 | return 0; |
932 | } | 932 | } |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0759f32e9dca..09cdcdfe7e91 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -135,6 +135,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
135 | unsigned long cl; | 135 | unsigned long cl; |
136 | unsigned long fh; | 136 | unsigned long fh; |
137 | int err; | 137 | int err; |
138 | int tp_created = 0; | ||
138 | 139 | ||
139 | if (net != &init_net) | 140 | if (net != &init_net) |
140 | return -EINVAL; | 141 | return -EINVAL; |
@@ -266,10 +267,7 @@ replay: | |||
266 | goto errout; | 267 | goto errout; |
267 | } | 268 | } |
268 | 269 | ||
269 | spin_lock_bh(root_lock); | 270 | tp_created = 1; |
270 | tp->next = *back; | ||
271 | *back = tp; | ||
272 | spin_unlock_bh(root_lock); | ||
273 | 271 | ||
274 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) | 272 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) |
275 | goto errout; | 273 | goto errout; |
@@ -296,8 +294,11 @@ replay: | |||
296 | switch (n->nlmsg_type) { | 294 | switch (n->nlmsg_type) { |
297 | case RTM_NEWTFILTER: | 295 | case RTM_NEWTFILTER: |
298 | err = -EEXIST; | 296 | err = -EEXIST; |
299 | if (n->nlmsg_flags & NLM_F_EXCL) | 297 | if (n->nlmsg_flags & NLM_F_EXCL) { |
298 | if (tp_created) | ||
299 | tcf_destroy(tp); | ||
300 | goto errout; | 300 | goto errout; |
301 | } | ||
301 | break; | 302 | break; |
302 | case RTM_DELTFILTER: | 303 | case RTM_DELTFILTER: |
303 | err = tp->ops->delete(tp, fh); | 304 | err = tp->ops->delete(tp, fh); |
@@ -314,8 +315,18 @@ replay: | |||
314 | } | 315 | } |
315 | 316 | ||
316 | err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); | 317 | err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); |
317 | if (err == 0) | 318 | if (err == 0) { |
319 | if (tp_created) { | ||
320 | spin_lock_bh(root_lock); | ||
321 | tp->next = *back; | ||
322 | *back = tp; | ||
323 | spin_unlock_bh(root_lock); | ||
324 | } | ||
318 | tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); | 325 | tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); |
326 | } else { | ||
327 | if (tp_created) | ||
328 | tcf_destroy(tp); | ||
329 | } | ||
319 | 330 | ||
320 | errout: | 331 | errout: |
321 | if (cl) | 332 | if (cl) |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 91a3db4a76f8..e5becb92b3e7 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -104,8 +104,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
104 | struct tcf_result *res) | 104 | struct tcf_result *res) |
105 | { | 105 | { |
106 | struct cls_cgroup_head *head = tp->root; | 106 | struct cls_cgroup_head *head = tp->root; |
107 | struct cgroup_cls_state *cs; | 107 | u32 classid; |
108 | int ret = 0; | ||
109 | 108 | ||
110 | /* | 109 | /* |
111 | * Due to the nature of the classifier it is required to ignore all | 110 | * Due to the nature of the classifier it is required to ignore all |
@@ -121,17 +120,18 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
121 | return -1; | 120 | return -1; |
122 | 121 | ||
123 | rcu_read_lock(); | 122 | rcu_read_lock(); |
124 | cs = task_cls_state(current); | 123 | classid = task_cls_state(current)->classid; |
125 | if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { | ||
126 | res->classid = cs->classid; | ||
127 | res->class = 0; | ||
128 | ret = tcf_exts_exec(skb, &head->exts, res); | ||
129 | } else | ||
130 | ret = -1; | ||
131 | |||
132 | rcu_read_unlock(); | 124 | rcu_read_unlock(); |
133 | 125 | ||
134 | return ret; | 126 | if (!classid) |
127 | return -1; | ||
128 | |||
129 | if (!tcf_em_tree_match(skb, &head->ematches, NULL)) | ||
130 | return -1; | ||
131 | |||
132 | res->classid = classid; | ||
133 | res->class = 0; | ||
134 | return tcf_exts_exec(skb, &head->exts, res); | ||
135 | } | 135 | } |
136 | 136 | ||
137 | static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) | 137 | static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) |
@@ -167,6 +167,9 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, | |||
167 | struct tcf_exts e; | 167 | struct tcf_exts e; |
168 | int err; | 168 | int err; |
169 | 169 | ||
170 | if (!tca[TCA_OPTIONS]) | ||
171 | return -EINVAL; | ||
172 | |||
170 | if (head == NULL) { | 173 | if (head == NULL) { |
171 | if (!handle) | 174 | if (!handle) |
172 | return -EINVAL; | 175 | return -EINVAL; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index af3198814c15..9d504234af4a 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -345,6 +345,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, | |||
345 | lock_sock(sock->sk); | 345 | lock_sock(sock->sk); |
346 | sock->sk->sk_sndbuf = snd * 2; | 346 | sock->sk->sk_sndbuf = snd * 2; |
347 | sock->sk->sk_rcvbuf = rcv * 2; | 347 | sock->sk->sk_rcvbuf = rcv * 2; |
348 | sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; | ||
348 | release_sock(sock->sk); | 349 | release_sock(sock->sk); |
349 | #endif | 350 | #endif |
350 | } | 351 | } |
@@ -796,6 +797,23 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
796 | test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), | 797 | test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), |
797 | test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); | 798 | test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); |
798 | 799 | ||
800 | if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) | ||
801 | /* sndbuf needs to have room for one request | ||
802 | * per thread, otherwise we can stall even when the | ||
803 | * network isn't a bottleneck. | ||
804 | * | ||
805 | * We count all threads rather than threads in a | ||
806 | * particular pool, which provides an upper bound | ||
807 | * on the number of threads which will access the socket. | ||
808 | * | ||
809 | * rcvbuf just needs to be able to hold a few requests. | ||
810 | * Normally they will be removed from the queue | ||
811 | * as soon a a complete request arrives. | ||
812 | */ | ||
813 | svc_sock_setbufsize(svsk->sk_sock, | ||
814 | (serv->sv_nrthreads+3) * serv->sv_max_mesg, | ||
815 | 3 * serv->sv_max_mesg); | ||
816 | |||
799 | clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 817 | clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
800 | 818 | ||
801 | /* Receive data. If we haven't got the record length yet, get | 819 | /* Receive data. If we haven't got the record length yet, get |
@@ -1043,6 +1061,15 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) | |||
1043 | 1061 | ||
1044 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; | 1062 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; |
1045 | 1063 | ||
1064 | /* initialise setting must have enough space to | ||
1065 | * receive and respond to one request. | ||
1066 | * svc_tcp_recvfrom will re-adjust if necessary | ||
1067 | */ | ||
1068 | svc_sock_setbufsize(svsk->sk_sock, | ||
1069 | 3 * svsk->sk_xprt.xpt_server->sv_max_mesg, | ||
1070 | 3 * svsk->sk_xprt.xpt_server->sv_max_mesg); | ||
1071 | |||
1072 | set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); | ||
1046 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 1073 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
1047 | if (sk->sk_state != TCP_ESTABLISHED) | 1074 | if (sk->sk_state != TCP_ESTABLISHED) |
1048 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 1075 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
@@ -1112,14 +1139,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, | |||
1112 | /* Initialize the socket */ | 1139 | /* Initialize the socket */ |
1113 | if (sock->type == SOCK_DGRAM) | 1140 | if (sock->type == SOCK_DGRAM) |
1114 | svc_udp_init(svsk, serv); | 1141 | svc_udp_init(svsk, serv); |
1115 | else { | 1142 | else |
1116 | /* initialise setting must have enough space to | ||
1117 | * receive and respond to one request. | ||
1118 | */ | ||
1119 | svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg, | ||
1120 | 4 * serv->sv_max_mesg); | ||
1121 | svc_tcp_init(svsk, serv); | 1143 | svc_tcp_init(svsk, serv); |
1122 | } | ||
1123 | 1144 | ||
1124 | dprintk("svc: svc_setup_socket created %p (inet %p)\n", | 1145 | dprintk("svc: svc_setup_socket created %p (inet %p)\n", |
1125 | svsk, svsk->sk_sk); | 1146 | svsk, svsk->sk_sk); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 8b510c5e8777..f11be72a1a80 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, | |||
128 | page_bytes -= sge_bytes; | 128 | page_bytes -= sge_bytes; |
129 | 129 | ||
130 | frmr->page_list->page_list[page_no] = | 130 | frmr->page_list->page_list[page_no] = |
131 | ib_dma_map_page(xprt->sc_cm_id->device, page, 0, | 131 | ib_dma_map_single(xprt->sc_cm_id->device, |
132 | page_address(page), | ||
132 | PAGE_SIZE, DMA_TO_DEVICE); | 133 | PAGE_SIZE, DMA_TO_DEVICE); |
133 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 134 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
134 | frmr->page_list->page_list[page_no])) | 135 | frmr->page_list->page_list[page_no])) |
@@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
532 | clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); | 533 | clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); |
533 | 534 | ||
534 | /* Prepare the SGE for the RPCRDMA Header */ | 535 | /* Prepare the SGE for the RPCRDMA Header */ |
536 | ctxt->sge[0].lkey = rdma->sc_dma_lkey; | ||
537 | ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); | ||
535 | ctxt->sge[0].addr = | 538 | ctxt->sge[0].addr = |
536 | ib_dma_map_page(rdma->sc_cm_id->device, | 539 | ib_dma_map_single(rdma->sc_cm_id->device, page_address(page), |
537 | page, 0, PAGE_SIZE, DMA_TO_DEVICE); | 540 | ctxt->sge[0].length, DMA_TO_DEVICE); |
538 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) | 541 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) |
539 | goto err; | 542 | goto err; |
540 | atomic_inc(&rdma->sc_dma_used); | 543 | atomic_inc(&rdma->sc_dma_used); |
541 | 544 | ||
542 | ctxt->direction = DMA_TO_DEVICE; | 545 | ctxt->direction = DMA_TO_DEVICE; |
543 | 546 | ||
544 | ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); | ||
545 | ctxt->sge[0].lkey = rdma->sc_dma_lkey; | ||
546 | |||
547 | /* Determine how many of our SGE are to be transmitted */ | 547 | /* Determine how many of our SGE are to be transmitted */ |
548 | for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { | 548 | for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { |
549 | sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); | 549 | sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 4b0c2fa15e0b..5151f9f6c573 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) | |||
500 | BUG_ON(sge_no >= xprt->sc_max_sge); | 500 | BUG_ON(sge_no >= xprt->sc_max_sge); |
501 | page = svc_rdma_get_page(); | 501 | page = svc_rdma_get_page(); |
502 | ctxt->pages[sge_no] = page; | 502 | ctxt->pages[sge_no] = page; |
503 | pa = ib_dma_map_page(xprt->sc_cm_id->device, | 503 | pa = ib_dma_map_single(xprt->sc_cm_id->device, |
504 | page, 0, PAGE_SIZE, | 504 | page_address(page), PAGE_SIZE, |
505 | DMA_FROM_DEVICE); | 505 | DMA_FROM_DEVICE); |
506 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) | 506 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) |
507 | goto err_put_ctxt; | 507 | goto err_put_ctxt; |
@@ -1315,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
1315 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); | 1315 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); |
1316 | 1316 | ||
1317 | /* Prepare SGE for local address */ | 1317 | /* Prepare SGE for local address */ |
1318 | sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, | 1318 | sge.addr = ib_dma_map_single(xprt->sc_cm_id->device, |
1319 | p, 0, PAGE_SIZE, DMA_FROM_DEVICE); | 1319 | page_address(p), PAGE_SIZE, DMA_FROM_DEVICE); |
1320 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { | 1320 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { |
1321 | put_page(p); | 1321 | put_page(p); |
1322 | return; | 1322 | return; |
@@ -1343,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
1343 | if (ret) { | 1343 | if (ret) { |
1344 | dprintk("svcrdma: Error %d posting send for protocol error\n", | 1344 | dprintk("svcrdma: Error %d posting send for protocol error\n", |
1345 | ret); | 1345 | ret); |
1346 | ib_dma_unmap_page(xprt->sc_cm_id->device, | 1346 | ib_dma_unmap_single(xprt->sc_cm_id->device, |
1347 | sge.addr, PAGE_SIZE, | 1347 | sge.addr, PAGE_SIZE, |
1348 | DMA_FROM_DEVICE); | 1348 | DMA_FROM_DEVICE); |
1349 | svc_rdma_put_context(ctxt, 1); | 1349 | svc_rdma_put_context(ctxt, 1); |