aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvinash Repaka <avinash.repaka@oracle.com>2016-03-01 18:20:54 -0500
committerDavid S. Miller <davem@davemloft.net>2016-03-02 14:13:19 -0500
commit1659185fb4d0025835eb2058a141f0746c5cab00 (patch)
treeb4cf4a644ba1fc2ba50cd478b1fa896bc61b1b2d
parentad6832f950d35df8c70b577993a24b31b34d88e4 (diff)
RDS: IB: Support Fastreg MR (FRMR) memory registration mode
Fastreg MR(FRMR) is another method with which one can register memory to HCA. Some of the newer HCAs supports only fastreg mr mode, so we need to add support for it to have RDS functional on them. Signed-off-by: Santosh Shilimkar <ssantosh@kernel.org> Signed-off-by: Avinash Repaka <avinash.repaka@oracle.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/rds/Makefile2
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/ib_cm.c7
-rw-r--r--net/rds/ib_frmr.c376
-rw-r--r--net/rds/ib_mr.h24
-rw-r--r--net/rds/ib_rdma.c17
6 files changed, 422 insertions, 5 deletions
diff --git a/net/rds/Makefile b/net/rds/Makefile
index bcf5591b493a..0e72bec1529f 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -6,7 +6,7 @@ rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \
6obj-$(CONFIG_RDS_RDMA) += rds_rdma.o 6obj-$(CONFIG_RDS_RDMA) += rds_rdma.o
7rds_rdma-y := rdma_transport.o \ 7rds_rdma-y := rdma_transport.o \
8 ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \ 8 ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
9 ib_sysctl.o ib_rdma.o ib_fmr.o 9 ib_sysctl.o ib_rdma.o ib_fmr.o ib_frmr.o
10 10
11 11
12obj-$(CONFIG_RDS_TCP) += rds_tcp.o 12obj-$(CONFIG_RDS_TCP) += rds_tcp.o
diff --git a/net/rds/ib.h b/net/rds/ib.h
index eeb0d6c85cb3..627fb79aee65 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -349,6 +349,7 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
349void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); 349void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
350void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); 350void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
351void rds_ib_destroy_nodev_conns(void); 351void rds_ib_destroy_nodev_conns(void);
352void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
352 353
353/* ib_recv.c */ 354/* ib_recv.c */
354int rds_ib_recv_init(void); 355int rds_ib_recv_init(void);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 83f4673970e7..8764970f0c24 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -249,7 +249,12 @@ static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
249 (unsigned long long)wc->wr_id, wc->status, 249 (unsigned long long)wc->wr_id, wc->status,
250 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); 250 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
251 251
252 rds_ib_send_cqe_handler(ic, wc); 252 if (wc->wr_id <= ic->i_send_ring.w_nr ||
253 wc->wr_id == RDS_IB_ACK_WR_ID)
254 rds_ib_send_cqe_handler(ic, wc);
255 else
256 rds_ib_mr_cqe_handler(ic, wc);
257
253 } 258 }
254 } 259 }
255} 260}
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
new file mode 100644
index 000000000000..93ff038ea9d1
--- /dev/null
+++ b/net/rds/ib_frmr.c
@@ -0,0 +1,376 @@
1/*
2 * Copyright (c) 2016 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "ib_mr.h"
34
35static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
36 int npages)
37{
38 struct rds_ib_mr_pool *pool;
39 struct rds_ib_mr *ibmr = NULL;
40 struct rds_ib_frmr *frmr;
41 int err = 0;
42
43 if (npages <= RDS_MR_8K_MSG_SIZE)
44 pool = rds_ibdev->mr_8k_pool;
45 else
46 pool = rds_ibdev->mr_1m_pool;
47
48 ibmr = rds_ib_try_reuse_ibmr(pool);
49 if (ibmr)
50 return ibmr;
51
52 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
53 rdsibdev_to_node(rds_ibdev));
54 if (!ibmr) {
55 err = -ENOMEM;
56 goto out_no_cigar;
57 }
58
59 frmr = &ibmr->u.frmr;
60 frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
61 pool->fmr_attr.max_pages);
62 if (IS_ERR(frmr->mr)) {
63 pr_warn("RDS/IB: %s failed to allocate MR", __func__);
64 goto out_no_cigar;
65 }
66
67 ibmr->pool = pool;
68 if (pool->pool_type == RDS_IB_MR_8K_POOL)
69 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
70 else
71 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
72
73 if (atomic_read(&pool->item_count) > pool->max_items_soft)
74 pool->max_items_soft = pool->max_items;
75
76 frmr->fr_state = FRMR_IS_FREE;
77 return ibmr;
78
79out_no_cigar:
80 kfree(ibmr);
81 atomic_dec(&pool->item_count);
82 return ERR_PTR(err);
83}
84
85static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop)
86{
87 struct rds_ib_mr_pool *pool = ibmr->pool;
88
89 if (drop)
90 llist_add(&ibmr->llnode, &pool->drop_list);
91 else
92 llist_add(&ibmr->llnode, &pool->free_list);
93 atomic_add(ibmr->sg_len, &pool->free_pinned);
94 atomic_inc(&pool->dirty_count);
95
96 /* If we've pinned too many pages, request a flush */
97 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
98 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
99 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
100}
101
102static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
103{
104 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
105 struct ib_send_wr *failed_wr;
106 struct ib_reg_wr reg_wr;
107 int ret;
108
109 while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
110 atomic_inc(&ibmr->ic->i_fastreg_wrs);
111 cpu_relax();
112 }
113
114 ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, PAGE_SIZE);
115 if (unlikely(ret != ibmr->sg_len))
116 return ret < 0 ? ret : -EINVAL;
117
118 /* Perform a WR for the fast_reg_mr. Each individual page
119 * in the sg list is added to the fast reg page list and placed
120 * inside the fast_reg_mr WR. The key used is a rolling 8bit
121 * counter, which should guarantee uniqueness.
122 */
123 ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
124 frmr->fr_state = FRMR_IS_INUSE;
125
126 memset(&reg_wr, 0, sizeof(reg_wr));
127 reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
128 reg_wr.wr.opcode = IB_WR_REG_MR;
129 reg_wr.wr.num_sge = 0;
130 reg_wr.mr = frmr->mr;
131 reg_wr.key = frmr->mr->rkey;
132 reg_wr.access = IB_ACCESS_LOCAL_WRITE |
133 IB_ACCESS_REMOTE_READ |
134 IB_ACCESS_REMOTE_WRITE;
135 reg_wr.wr.send_flags = IB_SEND_SIGNALED;
136
137 failed_wr = &reg_wr.wr;
138 ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, &failed_wr);
139 WARN_ON(failed_wr != &reg_wr.wr);
140 if (unlikely(ret)) {
141 /* Failure here can be because of -ENOMEM as well */
142 frmr->fr_state = FRMR_IS_STALE;
143 atomic_inc(&ibmr->ic->i_fastreg_wrs);
144 if (printk_ratelimit())
145 pr_warn("RDS/IB: %s returned error(%d)\n",
146 __func__, ret);
147 }
148 return ret;
149}
150
151static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
152 struct rds_ib_mr_pool *pool,
153 struct rds_ib_mr *ibmr,
154 struct scatterlist *sg, unsigned int sg_len)
155{
156 struct ib_device *dev = rds_ibdev->dev;
157 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
158 int i;
159 u32 len;
160 int ret = 0;
161
162 /* We want to teardown old ibmr values here and fill it up with
163 * new sg values
164 */
165 rds_ib_teardown_mr(ibmr);
166
167 ibmr->sg = sg;
168 ibmr->sg_len = sg_len;
169 ibmr->sg_dma_len = 0;
170 frmr->sg_byte_len = 0;
171 WARN_ON(ibmr->sg_dma_len);
172 ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len,
173 DMA_BIDIRECTIONAL);
174 if (unlikely(!ibmr->sg_dma_len)) {
175 pr_warn("RDS/IB: %s failed!\n", __func__);
176 return -EBUSY;
177 }
178
179 frmr->sg_byte_len = 0;
180 frmr->dma_npages = 0;
181 len = 0;
182
183 ret = -EINVAL;
184 for (i = 0; i < ibmr->sg_dma_len; ++i) {
185 unsigned int dma_len = ib_sg_dma_len(dev, &ibmr->sg[i]);
186 u64 dma_addr = ib_sg_dma_address(dev, &ibmr->sg[i]);
187
188 frmr->sg_byte_len += dma_len;
189 if (dma_addr & ~PAGE_MASK) {
190 if (i > 0)
191 goto out_unmap;
192 else
193 ++frmr->dma_npages;
194 }
195
196 if ((dma_addr + dma_len) & ~PAGE_MASK) {
197 if (i < ibmr->sg_dma_len - 1)
198 goto out_unmap;
199 else
200 ++frmr->dma_npages;
201 }
202
203 len += dma_len;
204 }
205 frmr->dma_npages += len >> PAGE_SHIFT;
206
207 if (frmr->dma_npages > ibmr->pool->fmr_attr.max_pages) {
208 ret = -EMSGSIZE;
209 goto out_unmap;
210 }
211
212 ret = rds_ib_post_reg_frmr(ibmr);
213 if (ret)
214 goto out_unmap;
215
216 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
217 rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
218 else
219 rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
220
221 return ret;
222
223out_unmap:
224 ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
225 DMA_BIDIRECTIONAL);
226 ibmr->sg_dma_len = 0;
227 return ret;
228}
229
230static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
231{
232 struct ib_send_wr *s_wr, *failed_wr;
233 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
234 struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id;
235 int ret = -EINVAL;
236
237 if (!i_cm_id || !i_cm_id->qp || !frmr->mr)
238 goto out;
239
240 if (frmr->fr_state != FRMR_IS_INUSE)
241 goto out;
242
243 while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
244 atomic_inc(&ibmr->ic->i_fastreg_wrs);
245 cpu_relax();
246 }
247
248 frmr->fr_inv = true;
249 s_wr = &frmr->fr_wr;
250
251 memset(s_wr, 0, sizeof(*s_wr));
252 s_wr->wr_id = (unsigned long)(void *)ibmr;
253 s_wr->opcode = IB_WR_LOCAL_INV;
254 s_wr->ex.invalidate_rkey = frmr->mr->rkey;
255 s_wr->send_flags = IB_SEND_SIGNALED;
256
257 failed_wr = s_wr;
258 ret = ib_post_send(i_cm_id->qp, s_wr, &failed_wr);
259 WARN_ON(failed_wr != s_wr);
260 if (unlikely(ret)) {
261 frmr->fr_state = FRMR_IS_STALE;
262 frmr->fr_inv = false;
263 atomic_inc(&ibmr->ic->i_fastreg_wrs);
264 pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
265 goto out;
266 }
267out:
268 return ret;
269}
270
271void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
272{
273 struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id;
274 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
275
276 if (wc->status != IB_WC_SUCCESS) {
277 frmr->fr_state = FRMR_IS_STALE;
278 if (rds_conn_up(ic->conn))
279 rds_ib_conn_error(ic->conn,
280 "frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
281 &ic->conn->c_laddr,
282 &ic->conn->c_faddr,
283 wc->status,
284 ib_wc_status_msg(wc->status),
285 wc->vendor_err);
286 }
287
288 if (frmr->fr_inv) {
289 frmr->fr_state = FRMR_IS_FREE;
290 frmr->fr_inv = false;
291 }
292
293 atomic_inc(&ic->i_fastreg_wrs);
294}
295
296void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
297 unsigned long *unpinned, unsigned int goal)
298{
299 struct rds_ib_mr *ibmr, *next;
300 struct rds_ib_frmr *frmr;
301 int ret = 0;
302 unsigned int freed = *nfreed;
303
304 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
305 list_for_each_entry(ibmr, list, unmap_list) {
306 if (ibmr->sg_dma_len)
307 ret |= rds_ib_post_inv(ibmr);
308 }
309 if (ret)
310 pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
311
312 /* Now we can destroy the DMA mapping and unpin any pages */
313 list_for_each_entry_safe(ibmr, next, list, unmap_list) {
314 *unpinned += ibmr->sg_len;
315 frmr = &ibmr->u.frmr;
316 __rds_ib_teardown_mr(ibmr);
317 if (freed < goal || frmr->fr_state == FRMR_IS_STALE) {
318 /* Don't de-allocate if the MR is not free yet */
319 if (frmr->fr_state == FRMR_IS_INUSE)
320 continue;
321
322 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
323 rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
324 else
325 rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
326 list_del(&ibmr->unmap_list);
327 if (frmr->mr)
328 ib_dereg_mr(frmr->mr);
329 kfree(ibmr);
330 freed++;
331 }
332 }
333 *nfreed = freed;
334}
335
336struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
337 struct rds_ib_connection *ic,
338 struct scatterlist *sg,
339 unsigned long nents, u32 *key)
340{
341 struct rds_ib_mr *ibmr = NULL;
342 struct rds_ib_frmr *frmr;
343 int ret;
344
345 do {
346 if (ibmr)
347 rds_ib_free_frmr(ibmr, true);
348 ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
349 if (IS_ERR(ibmr))
350 return ibmr;
351 frmr = &ibmr->u.frmr;
352 } while (frmr->fr_state != FRMR_IS_FREE);
353
354 ibmr->ic = ic;
355 ibmr->device = rds_ibdev;
356 ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
357 if (ret == 0) {
358 *key = frmr->mr->rkey;
359 } else {
360 rds_ib_free_frmr(ibmr, false);
361 ibmr = ERR_PTR(ret);
362 }
363
364 return ibmr;
365}
366
367void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr)
368{
369 struct rds_ib_mr_pool *pool = ibmr->pool;
370 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
371
372 if (frmr->fr_state == FRMR_IS_STALE)
373 llist_add(&ibmr->llnode, &pool->drop_list);
374 else
375 llist_add(&ibmr->llnode, &pool->free_list);
376}
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 2f9b9c3afc59..1c754f4acbe5 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -48,6 +48,21 @@ struct rds_ib_fmr {
48 u64 *dma; 48 u64 *dma;
49}; 49};
50 50
51enum rds_ib_fr_state {
52 FRMR_IS_FREE, /* mr invalidated & ready for use */
53 FRMR_IS_INUSE, /* mr is in use or used & can be invalidated */
54 FRMR_IS_STALE, /* Stale MR and needs to be dropped */
55};
56
57struct rds_ib_frmr {
58 struct ib_mr *mr;
59 enum rds_ib_fr_state fr_state;
60 bool fr_inv;
61 struct ib_send_wr fr_wr;
62 unsigned int dma_npages;
63 unsigned int sg_byte_len;
64};
65
51/* This is stored as mr->r_trans_private. */ 66/* This is stored as mr->r_trans_private. */
52struct rds_ib_mr { 67struct rds_ib_mr {
53 struct rds_ib_device *device; 68 struct rds_ib_device *device;
@@ -66,6 +81,7 @@ struct rds_ib_mr {
66 81
67 union { 82 union {
68 struct rds_ib_fmr fmr; 83 struct rds_ib_fmr fmr;
84 struct rds_ib_frmr frmr;
69 } u; 85 } u;
70}; 86};
71 87
@@ -88,6 +104,7 @@ struct rds_ib_mr_pool {
88 unsigned long max_items_soft; 104 unsigned long max_items_soft;
89 unsigned long max_free_pinned; 105 unsigned long max_free_pinned;
90 struct ib_fmr_attr fmr_attr; 106 struct ib_fmr_attr fmr_attr;
107 bool use_fastreg;
91}; 108};
92 109
93extern struct workqueue_struct *rds_ib_mr_wq; 110extern struct workqueue_struct *rds_ib_mr_wq;
@@ -121,4 +138,11 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *);
121void rds_ib_unreg_fmr(struct list_head *, unsigned int *, 138void rds_ib_unreg_fmr(struct list_head *, unsigned int *,
122 unsigned long *, unsigned int); 139 unsigned long *, unsigned int);
123void rds_ib_free_fmr_list(struct rds_ib_mr *); 140void rds_ib_free_fmr_list(struct rds_ib_mr *);
141struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
142 struct rds_ib_connection *ic,
143 struct scatterlist *sg,
144 unsigned long nents, u32 *key);
145void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
146 unsigned long *unpinned, unsigned int goal);
147void rds_ib_free_frmr_list(struct rds_ib_mr *);
124#endif 148#endif
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index ec7ea32265c0..f7164ac1ffc1 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -397,7 +397,10 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
397 if (list_empty(&unmap_list)) 397 if (list_empty(&unmap_list))
398 goto out; 398 goto out;
399 399
400 rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal); 400 if (pool->use_fastreg)
401 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
402 else
403 rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
401 404
402 if (!list_empty(&unmap_list)) { 405 if (!list_empty(&unmap_list)) {
403 /* we have to make sure that none of the things we're about 406 /* we have to make sure that none of the things we're about
@@ -489,7 +492,10 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
489 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); 492 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
490 493
491 /* Return it to the pool's free list */ 494 /* Return it to the pool's free list */
492 rds_ib_free_fmr_list(ibmr); 495 if (rds_ibdev->use_fastreg)
496 rds_ib_free_frmr_list(ibmr);
497 else
498 rds_ib_free_fmr_list(ibmr);
493 499
494 atomic_add(ibmr->sg_len, &pool->free_pinned); 500 atomic_add(ibmr->sg_len, &pool->free_pinned);
495 atomic_inc(&pool->dirty_count); 501 atomic_inc(&pool->dirty_count);
@@ -534,6 +540,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
534{ 540{
535 struct rds_ib_device *rds_ibdev; 541 struct rds_ib_device *rds_ibdev;
536 struct rds_ib_mr *ibmr = NULL; 542 struct rds_ib_mr *ibmr = NULL;
543 struct rds_ib_connection *ic = rs->rs_conn->c_transport_data;
537 int ret; 544 int ret;
538 545
539 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); 546 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
@@ -547,7 +554,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
547 goto out; 554 goto out;
548 } 555 }
549 556
550 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); 557 if (rds_ibdev->use_fastreg)
558 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
559 else
560 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
551 if (ibmr) 561 if (ibmr)
552 rds_ibdev = NULL; 562 rds_ibdev = NULL;
553 563
@@ -601,6 +611,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
601 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; 611 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
602 pool->fmr_attr.page_shift = PAGE_SHIFT; 612 pool->fmr_attr.page_shift = PAGE_SHIFT;
603 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4; 613 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
614 pool->use_fastreg = rds_ibdev->use_fastreg;
604 615
605 return pool; 616 return pool;
606} 617}