aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-09 11:33:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-09 11:33:31 -0400
commit26d2177e977c912863ac04f6c1a967e793ca3a56 (patch)
tree48da04fb0b947cfa404747690d7081b657e33221 /net
parenta794b4f3292160bb3fd0f1f90ec8df454e3b17b3 (diff)
parentd1178cbcdcf91900ccf10a177350d7945703c151 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull inifiniband/rdma updates from Doug Ledford: "This is a fairly sizeable set of changes. I've put them through a decent amount of testing prior to sending the pull request due to that. There are still a few fixups that I know are coming, but I wanted to go ahead and get the big, sizable chunk into your hands sooner rather than waiting for those last few fixups. Of note is the fact that this creates what is intended to be a temporary area in the drivers/staging tree specifically for some cleanups and additions that are coming for the RDMA stack. We deprecated two drivers (ipath and amso1100) and are waiting to hear back if we can deprecate another one (ehca). We also put Intel's new hfi1 driver into this area because it needs to be refactored and a transfer library created out of the factored out code, and then it and the qib driver and the soft-roce driver should all be modified to use that library. I expect drivers/staging/rdma to be around for three or four kernel releases and then to go away as all of the work is completed and final deletions of deprecated drivers are done. Summary of changes for 4.3: - Create drivers/staging/rdma - Move amso1100 driver to staging/rdma and schedule for deletion - Move ipath driver to staging/rdma and schedule for deletion - Add hfi1 driver to staging/rdma and set TODO for move to regular tree - Initial support for namespaces to be used on RDMA devices - Add RoCE GID table handling to the RDMA core caching code - Infrastructure to support handling of devices with differing read and write scatter gather capabilities - Various iSER updates - Kill off unsafe usage of global mr registrations - Update SRP driver - Misc mlx4 driver updates - Support for the mr_alloc verb - Support for a netlink interface between kernel and user space cache daemon to speed path record queries and route resolution - Ininitial support for safe hot removal of verbs devices" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (136 commits) IB/ipoib: Suppress warning for send only join failures IB/ipoib: Clean up send-only multicast joins IB/srp: Fix possible protection fault IB/core: Move SM class defines from ib_mad.h to ib_smi.h IB/core: Remove unnecessary defines from ib_mad.h IB/hfi1: Add PSM2 user space header to header_install IB/hfi1: Add CSRs for CONFIG_SDMA_VERBOSITY mlx5: Fix incorrect wc pkey_index assignment for GSI messages IB/mlx5: avoid destroying a NULL mr in reg_user_mr error flow IB/uverbs: reject invalid or unknown opcodes IB/cxgb4: Fix if statement in pick_local_ip6adddrs IB/sa: Fix rdma netlink message flags IB/ucma: HW Device hot-removal support IB/mlx4_ib: Disassociate support IB/uverbs: Enable device removal when there are active user space applications IB/uverbs: Explicitly pass ib_dev to uverbs commands IB/uverbs: Fix race between ib_uverbs_open and remove_one IB/uverbs: Fix reference counting usage of event files IB/core: Make ib_dealloc_pd return void IB/srp: Create an insecure all physical rkey only if needed ...
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_rdma.c26
-rw-r--r--net/ipv6/addrconf.c31
-rw-r--r--net/rds/ib.c13
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_cm.c4
-rw-r--r--net/rds/ib_recv.c6
-rw-r--r--net/rds/ib_send.c8
-rw-r--r--net/rds/iw.c10
-rw-r--r--net/rds/iw_rdma.c5
-rw-r--r--net/rds/iw_send.c5
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
14 files changed, 34 insertions, 106 deletions
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 37a78d20c0f6..ba1210253f5e 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -94,8 +94,6 @@ struct p9_trans_rdma {
94 struct ib_pd *pd; 94 struct ib_pd *pd;
95 struct ib_qp *qp; 95 struct ib_qp *qp;
96 struct ib_cq *cq; 96 struct ib_cq *cq;
97 struct ib_mr *dma_mr;
98 u32 lkey;
99 long timeout; 97 long timeout;
100 int sq_depth; 98 int sq_depth;
101 struct semaphore sq_sem; 99 struct semaphore sq_sem;
@@ -382,9 +380,6 @@ static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
382 if (!rdma) 380 if (!rdma)
383 return; 381 return;
384 382
385 if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
386 ib_dereg_mr(rdma->dma_mr);
387
388 if (rdma->qp && !IS_ERR(rdma->qp)) 383 if (rdma->qp && !IS_ERR(rdma->qp))
389 ib_destroy_qp(rdma->qp); 384 ib_destroy_qp(rdma->qp);
390 385
@@ -415,7 +410,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
415 410
416 sge.addr = c->busa; 411 sge.addr = c->busa;
417 sge.length = client->msize; 412 sge.length = client->msize;
418 sge.lkey = rdma->lkey; 413 sge.lkey = rdma->pd->local_dma_lkey;
419 414
420 wr.next = NULL; 415 wr.next = NULL;
421 c->wc_op = IB_WC_RECV; 416 c->wc_op = IB_WC_RECV;
@@ -506,7 +501,7 @@ dont_need_post_recv:
506 501
507 sge.addr = c->busa; 502 sge.addr = c->busa;
508 sge.length = c->req->tc->size; 503 sge.length = c->req->tc->size;
509 sge.lkey = rdma->lkey; 504 sge.lkey = rdma->pd->local_dma_lkey;
510 505
511 wr.next = NULL; 506 wr.next = NULL;
512 c->wc_op = IB_WC_SEND; 507 c->wc_op = IB_WC_SEND;
@@ -647,7 +642,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
647 struct p9_trans_rdma *rdma; 642 struct p9_trans_rdma *rdma;
648 struct rdma_conn_param conn_param; 643 struct rdma_conn_param conn_param;
649 struct ib_qp_init_attr qp_attr; 644 struct ib_qp_init_attr qp_attr;
650 struct ib_device_attr devattr;
651 struct ib_cq_init_attr cq_attr = {}; 645 struct ib_cq_init_attr cq_attr = {};
652 646
653 /* Parse the transport specific mount options */ 647 /* Parse the transport specific mount options */
@@ -700,11 +694,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
700 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) 694 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
701 goto error; 695 goto error;
702 696
703 /* Query the device attributes */
704 err = ib_query_device(rdma->cm_id->device, &devattr);
705 if (err)
706 goto error;
707
708 /* Create the Completion Queue */ 697 /* Create the Completion Queue */
709 cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1; 698 cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1;
710 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, 699 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
@@ -719,17 +708,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
719 if (IS_ERR(rdma->pd)) 708 if (IS_ERR(rdma->pd))
720 goto error; 709 goto error;
721 710
722 /* Cache the DMA lkey in the transport */
723 rdma->dma_mr = NULL;
724 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
725 rdma->lkey = rdma->cm_id->device->local_dma_lkey;
726 else {
727 rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
728 if (IS_ERR(rdma->dma_mr))
729 goto error;
730 rdma->lkey = rdma->dma_mr->lkey;
731 }
732
733 /* Create the Queue Pair */ 711 /* Create the Queue Pair */
734 memset(&qp_attr, 0, sizeof qp_attr); 712 memset(&qp_attr, 0, sizeof qp_attr);
735 qp_attr.event_handler = qp_event_handler; 713 qp_attr.event_handler = qp_event_handler;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 99c0f2b843f0..030fefdc9aed 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1943,37 +1943,6 @@ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
1943 __ipv6_dev_ac_dec(ifp->idev, &addr); 1943 __ipv6_dev_ac_dec(ifp->idev, &addr);
1944} 1944}
1945 1945
1946static int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
1947{
1948 if (dev->addr_len != ETH_ALEN)
1949 return -1;
1950 memcpy(eui, dev->dev_addr, 3);
1951 memcpy(eui + 5, dev->dev_addr + 3, 3);
1952
1953 /*
1954 * The zSeries OSA network cards can be shared among various
1955 * OS instances, but the OSA cards have only one MAC address.
1956 * This leads to duplicate address conflicts in conjunction
1957 * with IPv6 if more than one instance uses the same card.
1958 *
1959 * The driver for these cards can deliver a unique 16-bit
1960 * identifier for each instance sharing the same card. It is
1961 * placed instead of 0xFFFE in the interface identifier. The
1962 * "u" bit of the interface identifier is not inverted in this
1963 * case. Hence the resulting interface identifier has local
1964 * scope according to RFC2373.
1965 */
1966 if (dev->dev_id) {
1967 eui[3] = (dev->dev_id >> 8) & 0xFF;
1968 eui[4] = dev->dev_id & 0xFF;
1969 } else {
1970 eui[3] = 0xFF;
1971 eui[4] = 0xFE;
1972 eui[0] ^= 2;
1973 }
1974 return 0;
1975}
1976
1977static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev) 1946static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
1978{ 1947{
1979 if (dev->addr_len != IEEE802154_ADDR_LEN) 1948 if (dev->addr_len != IEEE802154_ADDR_LEN)
diff --git a/net/rds/ib.c b/net/rds/ib.c
index d020fade312c..2d3f2ab475df 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -99,8 +99,6 @@ static void rds_ib_dev_free(struct work_struct *work)
99 99
100 if (rds_ibdev->mr_pool) 100 if (rds_ibdev->mr_pool)
101 rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); 101 rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
102 if (rds_ibdev->mr)
103 ib_dereg_mr(rds_ibdev->mr);
104 if (rds_ibdev->pd) 102 if (rds_ibdev->pd)
105 ib_dealloc_pd(rds_ibdev->pd); 103 ib_dealloc_pd(rds_ibdev->pd);
106 104
@@ -164,12 +162,6 @@ static void rds_ib_add_one(struct ib_device *device)
164 goto put_dev; 162 goto put_dev;
165 } 163 }
166 164
167 rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
168 if (IS_ERR(rds_ibdev->mr)) {
169 rds_ibdev->mr = NULL;
170 goto put_dev;
171 }
172
173 rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); 165 rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
174 if (IS_ERR(rds_ibdev->mr_pool)) { 166 if (IS_ERR(rds_ibdev->mr_pool)) {
175 rds_ibdev->mr_pool = NULL; 167 rds_ibdev->mr_pool = NULL;
@@ -230,11 +222,10 @@ struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
230 * 222 *
231 * This can be called at any time and can be racing with any other RDS path. 223 * This can be called at any time and can be racing with any other RDS path.
232 */ 224 */
233static void rds_ib_remove_one(struct ib_device *device) 225static void rds_ib_remove_one(struct ib_device *device, void *client_data)
234{ 226{
235 struct rds_ib_device *rds_ibdev; 227 struct rds_ib_device *rds_ibdev = client_data;
236 228
237 rds_ibdev = ib_get_client_data(device, &rds_ib_client);
238 if (!rds_ibdev) 229 if (!rds_ibdev)
239 return; 230 return;
240 231
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 9fc95e38659a..aae60fda77f6 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -100,7 +100,6 @@ struct rds_ib_connection {
100 /* alphabet soup, IBTA style */ 100 /* alphabet soup, IBTA style */
101 struct rdma_cm_id *i_cm_id; 101 struct rdma_cm_id *i_cm_id;
102 struct ib_pd *i_pd; 102 struct ib_pd *i_pd;
103 struct ib_mr *i_mr;
104 struct ib_cq *i_send_cq; 103 struct ib_cq *i_send_cq;
105 struct ib_cq *i_recv_cq; 104 struct ib_cq *i_recv_cq;
106 105
@@ -173,7 +172,6 @@ struct rds_ib_device {
173 struct list_head conn_list; 172 struct list_head conn_list;
174 struct ib_device *dev; 173 struct ib_device *dev;
175 struct ib_pd *pd; 174 struct ib_pd *pd;
176 struct ib_mr *mr;
177 struct rds_ib_mr_pool *mr_pool; 175 struct rds_ib_mr_pool *mr_pool;
178 unsigned int fmr_max_remaps; 176 unsigned int fmr_max_remaps;
179 unsigned int max_fmrs; 177 unsigned int max_fmrs;
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index d150bb4aa3cb..9043f5c04787 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -269,7 +269,6 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
269 269
270 /* Protection domain and memory range */ 270 /* Protection domain and memory range */
271 ic->i_pd = rds_ibdev->pd; 271 ic->i_pd = rds_ibdev->pd;
272 ic->i_mr = rds_ibdev->mr;
273 272
274 cq_attr.cqe = ic->i_send_ring.w_nr + 1; 273 cq_attr.cqe = ic->i_send_ring.w_nr + 1;
275 ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler, 274 ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
@@ -375,7 +374,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
375 374
376 rds_ib_recv_init_ack(ic); 375 rds_ib_recv_init_ack(ic);
377 376
378 rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr, 377 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
379 ic->i_send_cq, ic->i_recv_cq); 378 ic->i_send_cq, ic->i_recv_cq);
380 379
381out: 380out:
@@ -682,7 +681,6 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
682 681
683 ic->i_cm_id = NULL; 682 ic->i_cm_id = NULL;
684 ic->i_pd = NULL; 683 ic->i_pd = NULL;
685 ic->i_mr = NULL;
686 ic->i_send_cq = NULL; 684 ic->i_send_cq = NULL;
687 ic->i_recv_cq = NULL; 685 ic->i_recv_cq = NULL;
688 ic->i_send_hdrs = NULL; 686 ic->i_send_hdrs = NULL;
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 6bbe62060060..f43831e4186a 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -62,12 +62,12 @@ void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
62 sge = &recv->r_sge[0]; 62 sge = &recv->r_sge[0];
63 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); 63 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
64 sge->length = sizeof(struct rds_header); 64 sge->length = sizeof(struct rds_header);
65 sge->lkey = ic->i_mr->lkey; 65 sge->lkey = ic->i_pd->local_dma_lkey;
66 66
67 sge = &recv->r_sge[1]; 67 sge = &recv->r_sge[1];
68 sge->addr = 0; 68 sge->addr = 0;
69 sge->length = RDS_FRAG_SIZE; 69 sge->length = RDS_FRAG_SIZE;
70 sge->lkey = ic->i_mr->lkey; 70 sge->lkey = ic->i_pd->local_dma_lkey;
71 } 71 }
72} 72}
73 73
@@ -564,7 +564,7 @@ void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
564 564
565 sge->addr = ic->i_ack_dma; 565 sge->addr = ic->i_ack_dma;
566 sge->length = sizeof(struct rds_header); 566 sge->length = sizeof(struct rds_header);
567 sge->lkey = ic->i_mr->lkey; 567 sge->lkey = ic->i_pd->local_dma_lkey;
568 568
569 wr->sg_list = sge; 569 wr->sg_list = sge;
570 wr->num_sge = 1; 570 wr->num_sge = 1;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index c576ebeb4115..4e88047086b6 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -202,9 +202,9 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic)
202 sge = &send->s_sge[0]; 202 sge = &send->s_sge[0];
203 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); 203 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
204 sge->length = sizeof(struct rds_header); 204 sge->length = sizeof(struct rds_header);
205 sge->lkey = ic->i_mr->lkey; 205 sge->lkey = ic->i_pd->local_dma_lkey;
206 206
207 send->s_sge[1].lkey = ic->i_mr->lkey; 207 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
208 } 208 }
209} 209}
210 210
@@ -818,7 +818,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
818 /* Convert our struct scatterlist to struct ib_sge */ 818 /* Convert our struct scatterlist to struct ib_sge */
819 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg); 819 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
820 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg); 820 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
821 send->s_sge[0].lkey = ic->i_mr->lkey; 821 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
822 822
823 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr, 823 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
824 send->s_sge[0].addr, send->s_sge[0].length); 824 send->s_sge[0].addr, send->s_sge[0].length);
@@ -932,7 +932,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
932 send->s_sge[j].addr = 932 send->s_sge[j].addr =
933 ib_sg_dma_address(ic->i_cm_id->device, scat); 933 ib_sg_dma_address(ic->i_cm_id->device, scat);
934 send->s_sge[j].length = len; 934 send->s_sge[j].length = len;
935 send->s_sge[j].lkey = ic->i_mr->lkey; 935 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
936 936
937 sent += len; 937 sent += len;
938 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); 938 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 5d5a9d258658..3df0295c6659 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -125,12 +125,11 @@ free_attr:
125 kfree(dev_attr); 125 kfree(dev_attr);
126} 126}
127 127
128static void rds_iw_remove_one(struct ib_device *device) 128static void rds_iw_remove_one(struct ib_device *device, void *client_data)
129{ 129{
130 struct rds_iw_device *rds_iwdev; 130 struct rds_iw_device *rds_iwdev = client_data;
131 struct rds_iw_cm_id *i_cm_id, *next; 131 struct rds_iw_cm_id *i_cm_id, *next;
132 132
133 rds_iwdev = ib_get_client_data(device, &rds_iw_client);
134 if (!rds_iwdev) 133 if (!rds_iwdev)
135 return; 134 return;
136 135
@@ -149,10 +148,7 @@ static void rds_iw_remove_one(struct ib_device *device)
149 if (rds_iwdev->mr) 148 if (rds_iwdev->mr)
150 ib_dereg_mr(rds_iwdev->mr); 149 ib_dereg_mr(rds_iwdev->mr);
151 150
152 while (ib_dealloc_pd(rds_iwdev->pd)) { 151 ib_dealloc_pd(rds_iwdev->pd);
153 rdsdebug("Failed to dealloc pd %p\n", rds_iwdev->pd);
154 msleep(1);
155 }
156 152
157 list_del(&rds_iwdev->list); 153 list_del(&rds_iwdev->list);
158 kfree(rds_iwdev); 154 kfree(rds_iwdev);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index dba8d0864f18..6a8fbd6e69e7 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -667,11 +667,12 @@ static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
667 struct ib_mr *mr; 667 struct ib_mr *mr;
668 int err; 668 int err;
669 669
670 mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size); 670 mr = ib_alloc_mr(rds_iwdev->pd, IB_MR_TYPE_MEM_REG,
671 pool->max_message_size);
671 if (IS_ERR(mr)) { 672 if (IS_ERR(mr)) {
672 err = PTR_ERR(mr); 673 err = PTR_ERR(mr);
673 674
674 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed (err=%d)\n", err); 675 printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed (err=%d)\n", err);
675 return err; 676 return err;
676 } 677 }
677 678
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 334fe98c5084..86152ec3b887 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -153,9 +153,10 @@ void rds_iw_send_init_ring(struct rds_iw_connection *ic)
153 sge->length = sizeof(struct rds_header); 153 sge->length = sizeof(struct rds_header);
154 sge->lkey = 0; 154 sge->lkey = 0;
155 155
156 send->s_mr = ib_alloc_fast_reg_mr(ic->i_pd, fastreg_message_size); 156 send->s_mr = ib_alloc_mr(ic->i_pd, IB_MR_TYPE_MEM_REG,
157 fastreg_message_size);
157 if (IS_ERR(send->s_mr)) { 158 if (IS_ERR(send->s_mr)) {
158 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed\n"); 159 printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed\n");
159 break; 160 break;
160 } 161 }
161 162
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 63f282e770b8..d6653f5d0830 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -117,7 +117,7 @@ __frwr_recovery_worker(struct work_struct *work)
117 if (ib_dereg_mr(r->r.frmr.fr_mr)) 117 if (ib_dereg_mr(r->r.frmr.fr_mr))
118 goto out_fail; 118 goto out_fail;
119 119
120 r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(pd, depth); 120 r->r.frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
121 if (IS_ERR(r->r.frmr.fr_mr)) 121 if (IS_ERR(r->r.frmr.fr_mr))
122 goto out_fail; 122 goto out_fail;
123 123
@@ -148,7 +148,7 @@ __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
148 struct rpcrdma_frmr *f = &r->r.frmr; 148 struct rpcrdma_frmr *f = &r->r.frmr;
149 int rc; 149 int rc;
150 150
151 f->fr_mr = ib_alloc_fast_reg_mr(pd, depth); 151 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
152 if (IS_ERR(f->fr_mr)) 152 if (IS_ERR(f->fr_mr))
153 goto out_mr_err; 153 goto out_mr_err;
154 f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth); 154 f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth);
@@ -158,7 +158,7 @@ __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
158 158
159out_mr_err: 159out_mr_err:
160 rc = PTR_ERR(f->fr_mr); 160 rc = PTR_ERR(f->fr_mr);
161 dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n", 161 dprintk("RPC: %s: ib_alloc_mr status %i\n",
162 __func__, rc); 162 __func__, rc);
163 return rc; 163 return rc;
164 164
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 2e1348bde325..cb5174284074 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -115,15 +115,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
115 rqstp->rq_arg.tail[0].iov_len = 0; 115 rqstp->rq_arg.tail[0].iov_len = 0;
116} 116}
117 117
118static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
119{
120 if (!rdma_cap_read_multi_sge(xprt->sc_cm_id->device,
121 xprt->sc_cm_id->port_num))
122 return 1;
123 else
124 return min_t(int, sge_count, xprt->sc_max_sge);
125}
126
127/* Issue an RDMA_READ using the local lkey to map the data sink */ 118/* Issue an RDMA_READ using the local lkey to map the data sink */
128int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, 119int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
129 struct svc_rqst *rqstp, 120 struct svc_rqst *rqstp,
@@ -144,8 +135,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
144 135
145 ctxt->direction = DMA_FROM_DEVICE; 136 ctxt->direction = DMA_FROM_DEVICE;
146 ctxt->read_hdr = head; 137 ctxt->read_hdr = head;
147 pages_needed = 138 pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
148 min_t(int, pages_needed, rdma_read_max_sge(xprt, pages_needed));
149 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); 139 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
150 140
151 for (pno = 0; pno < pages_needed; pno++) { 141 for (pno = 0; pno < pages_needed; pno++) {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 21e40365042c..fcc3eb80c265 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -734,17 +734,19 @@ static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
734 struct ib_mr *mr; 734 struct ib_mr *mr;
735 struct ib_fast_reg_page_list *pl; 735 struct ib_fast_reg_page_list *pl;
736 struct svc_rdma_fastreg_mr *frmr; 736 struct svc_rdma_fastreg_mr *frmr;
737 u32 num_sg;
737 738
738 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); 739 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
739 if (!frmr) 740 if (!frmr)
740 goto err; 741 goto err;
741 742
742 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); 743 num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len);
744 mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg);
743 if (IS_ERR(mr)) 745 if (IS_ERR(mr))
744 goto err_free_frmr; 746 goto err_free_frmr;
745 747
746 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, 748 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
747 RPCSVC_MAXPAGES); 749 num_sg);
748 if (IS_ERR(pl)) 750 if (IS_ERR(pl))
749 goto err_free_mr; 751 goto err_free_mr;
750 752
@@ -873,6 +875,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
873 * capabilities of this particular device */ 875 * capabilities of this particular device */
874 newxprt->sc_max_sge = min((size_t)devattr.max_sge, 876 newxprt->sc_max_sge = min((size_t)devattr.max_sge,
875 (size_t)RPCSVC_MAXPAGES); 877 (size_t)RPCSVC_MAXPAGES);
878 newxprt->sc_max_sge_rd = min_t(size_t, devattr.max_sge_rd,
879 RPCSVC_MAXPAGES);
876 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, 880 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
877 (size_t)svcrdma_max_requests); 881 (size_t)svcrdma_max_requests);
878 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; 882 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
@@ -1047,6 +1051,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1047 " remote_ip : %pI4\n" 1051 " remote_ip : %pI4\n"
1048 " remote_port : %d\n" 1052 " remote_port : %d\n"
1049 " max_sge : %d\n" 1053 " max_sge : %d\n"
1054 " max_sge_rd : %d\n"
1050 " sq_depth : %d\n" 1055 " sq_depth : %d\n"
1051 " max_requests : %d\n" 1056 " max_requests : %d\n"
1052 " ord : %d\n", 1057 " ord : %d\n",
@@ -1060,6 +1065,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1060 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> 1065 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1061 route.addr.dst_addr)->sin_port), 1066 route.addr.dst_addr)->sin_port),
1062 newxprt->sc_max_sge, 1067 newxprt->sc_max_sge,
1068 newxprt->sc_max_sge_rd,
1063 newxprt->sc_sq_depth, 1069 newxprt->sc_sq_depth,
1064 newxprt->sc_max_requests, 1070 newxprt->sc_max_requests,
1065 newxprt->sc_ord); 1071 newxprt->sc_ord);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index f73d7a71035c..682996779970 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -611,7 +611,7 @@ rpcrdma_ia_close(struct rpcrdma_ia *ia)
611 611
612 /* If the pd is still busy, xprtrdma missed freeing a resource */ 612 /* If the pd is still busy, xprtrdma missed freeing a resource */
613 if (ia->ri_pd && !IS_ERR(ia->ri_pd)) 613 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
614 WARN_ON(ib_dealloc_pd(ia->ri_pd)); 614 ib_dealloc_pd(ia->ri_pd);
615} 615}
616 616
617/* 617/*