diff options
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 365 |
1 files changed, 207 insertions, 158 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 8ea283ecc522..d04208a02f67 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -863,6 +863,7 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
863 | char *p; | 863 | char *p; |
864 | size_t len; | 864 | size_t len; |
865 | int i, rc; | 865 | int i, rc; |
866 | struct rpcrdma_mw *r; | ||
866 | 867 | ||
867 | buf->rb_max_requests = cdata->max_requests; | 868 | buf->rb_max_requests = cdata->max_requests; |
868 | spin_lock_init(&buf->rb_lock); | 869 | spin_lock_init(&buf->rb_lock); |
@@ -873,7 +874,7 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
873 | * 2. arrays of struct rpcrdma_req to fill in pointers | 874 | * 2. arrays of struct rpcrdma_req to fill in pointers |
874 | * 3. array of struct rpcrdma_rep for replies | 875 | * 3. array of struct rpcrdma_rep for replies |
875 | * 4. padding, if any | 876 | * 4. padding, if any |
876 | * 5. mw's, if any | 877 | * 5. mw's or fmr's, if any |
877 | * Send/recv buffers in req/rep need to be registered | 878 | * Send/recv buffers in req/rep need to be registered |
878 | */ | 879 | */ |
879 | 880 | ||
@@ -927,15 +928,13 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
927 | * and also reduce unbind-to-bind collision. | 928 | * and also reduce unbind-to-bind collision. |
928 | */ | 929 | */ |
929 | INIT_LIST_HEAD(&buf->rb_mws); | 930 | INIT_LIST_HEAD(&buf->rb_mws); |
931 | r = (struct rpcrdma_mw *)p; | ||
930 | switch (ia->ri_memreg_strategy) { | 932 | switch (ia->ri_memreg_strategy) { |
931 | case RPCRDMA_MTHCAFMR: | 933 | case RPCRDMA_MTHCAFMR: |
932 | { | ||
933 | struct rpcrdma_mw *r = (struct rpcrdma_mw *)p; | ||
934 | struct ib_fmr_attr fa = { | ||
935 | RPCRDMA_MAX_DATA_SEGS, 1, PAGE_SHIFT | ||
936 | }; | ||
937 | /* TBD we are perhaps overallocating here */ | 934 | /* TBD we are perhaps overallocating here */ |
938 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { | 935 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { |
936 | static struct ib_fmr_attr fa = | ||
937 | { RPCRDMA_MAX_DATA_SEGS, 1, PAGE_SHIFT }; | ||
939 | r->r.fmr = ib_alloc_fmr(ia->ri_pd, | 938 | r->r.fmr = ib_alloc_fmr(ia->ri_pd, |
940 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ, | 939 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ, |
941 | &fa); | 940 | &fa); |
@@ -948,12 +947,9 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
948 | list_add(&r->mw_list, &buf->rb_mws); | 947 | list_add(&r->mw_list, &buf->rb_mws); |
949 | ++r; | 948 | ++r; |
950 | } | 949 | } |
951 | } | ||
952 | break; | 950 | break; |
953 | case RPCRDMA_MEMWINDOWS_ASYNC: | 951 | case RPCRDMA_MEMWINDOWS_ASYNC: |
954 | case RPCRDMA_MEMWINDOWS: | 952 | case RPCRDMA_MEMWINDOWS: |
955 | { | ||
956 | struct rpcrdma_mw *r = (struct rpcrdma_mw *)p; | ||
957 | /* Allocate one extra request's worth, for full cycling */ | 953 | /* Allocate one extra request's worth, for full cycling */ |
958 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { | 954 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { |
959 | r->r.mw = ib_alloc_mw(ia->ri_pd); | 955 | r->r.mw = ib_alloc_mw(ia->ri_pd); |
@@ -966,7 +962,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
966 | list_add(&r->mw_list, &buf->rb_mws); | 962 | list_add(&r->mw_list, &buf->rb_mws); |
967 | ++r; | 963 | ++r; |
968 | } | 964 | } |
969 | } | ||
970 | break; | 965 | break; |
971 | default: | 966 | default: |
972 | break; | 967 | break; |
@@ -1046,6 +1041,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |||
1046 | { | 1041 | { |
1047 | int rc, i; | 1042 | int rc, i; |
1048 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); | 1043 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); |
1044 | struct rpcrdma_mw *r; | ||
1049 | 1045 | ||
1050 | /* clean up in reverse order from create | 1046 | /* clean up in reverse order from create |
1051 | * 1. recv mr memory (mr free, then kfree) | 1047 | * 1. recv mr memory (mr free, then kfree) |
@@ -1065,7 +1061,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |||
1065 | } | 1061 | } |
1066 | if (buf->rb_send_bufs && buf->rb_send_bufs[i]) { | 1062 | if (buf->rb_send_bufs && buf->rb_send_bufs[i]) { |
1067 | while (!list_empty(&buf->rb_mws)) { | 1063 | while (!list_empty(&buf->rb_mws)) { |
1068 | struct rpcrdma_mw *r; | ||
1069 | r = list_entry(buf->rb_mws.next, | 1064 | r = list_entry(buf->rb_mws.next, |
1070 | struct rpcrdma_mw, mw_list); | 1065 | struct rpcrdma_mw, mw_list); |
1071 | list_del(&r->mw_list); | 1066 | list_del(&r->mw_list); |
@@ -1115,6 +1110,8 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) | |||
1115 | { | 1110 | { |
1116 | struct rpcrdma_req *req; | 1111 | struct rpcrdma_req *req; |
1117 | unsigned long flags; | 1112 | unsigned long flags; |
1113 | int i; | ||
1114 | struct rpcrdma_mw *r; | ||
1118 | 1115 | ||
1119 | spin_lock_irqsave(&buffers->rb_lock, flags); | 1116 | spin_lock_irqsave(&buffers->rb_lock, flags); |
1120 | if (buffers->rb_send_index == buffers->rb_max_requests) { | 1117 | if (buffers->rb_send_index == buffers->rb_max_requests) { |
@@ -1135,9 +1132,8 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) | |||
1135 | } | 1132 | } |
1136 | buffers->rb_send_bufs[buffers->rb_send_index++] = NULL; | 1133 | buffers->rb_send_bufs[buffers->rb_send_index++] = NULL; |
1137 | if (!list_empty(&buffers->rb_mws)) { | 1134 | if (!list_empty(&buffers->rb_mws)) { |
1138 | int i = RPCRDMA_MAX_SEGS - 1; | 1135 | i = RPCRDMA_MAX_SEGS - 1; |
1139 | do { | 1136 | do { |
1140 | struct rpcrdma_mw *r; | ||
1141 | r = list_entry(buffers->rb_mws.next, | 1137 | r = list_entry(buffers->rb_mws.next, |
1142 | struct rpcrdma_mw, mw_list); | 1138 | struct rpcrdma_mw, mw_list); |
1143 | list_del(&r->mw_list); | 1139 | list_del(&r->mw_list); |
@@ -1329,15 +1325,202 @@ rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg) | |||
1329 | seg->mr_dma, seg->mr_dmalen, seg->mr_dir); | 1325 | seg->mr_dma, seg->mr_dmalen, seg->mr_dir); |
1330 | } | 1326 | } |
1331 | 1327 | ||
1328 | static int | ||
1329 | rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg, | ||
1330 | int *nsegs, int writing, struct rpcrdma_ia *ia) | ||
1331 | { | ||
1332 | struct rpcrdma_mr_seg *seg1 = seg; | ||
1333 | u64 physaddrs[RPCRDMA_MAX_DATA_SEGS]; | ||
1334 | int len, pageoff, i, rc; | ||
1335 | |||
1336 | pageoff = offset_in_page(seg1->mr_offset); | ||
1337 | seg1->mr_offset -= pageoff; /* start of page */ | ||
1338 | seg1->mr_len += pageoff; | ||
1339 | len = -pageoff; | ||
1340 | if (*nsegs > RPCRDMA_MAX_DATA_SEGS) | ||
1341 | *nsegs = RPCRDMA_MAX_DATA_SEGS; | ||
1342 | for (i = 0; i < *nsegs;) { | ||
1343 | rpcrdma_map_one(ia, seg, writing); | ||
1344 | physaddrs[i] = seg->mr_dma; | ||
1345 | len += seg->mr_len; | ||
1346 | ++seg; | ||
1347 | ++i; | ||
1348 | /* Check for holes */ | ||
1349 | if ((i < *nsegs && offset_in_page(seg->mr_offset)) || | ||
1350 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) | ||
1351 | break; | ||
1352 | } | ||
1353 | rc = ib_map_phys_fmr(seg1->mr_chunk.rl_mw->r.fmr, | ||
1354 | physaddrs, i, seg1->mr_dma); | ||
1355 | if (rc) { | ||
1356 | dprintk("RPC: %s: failed ib_map_phys_fmr " | ||
1357 | "%u@0x%llx+%i (%d)... status %i\n", __func__, | ||
1358 | len, (unsigned long long)seg1->mr_dma, | ||
1359 | pageoff, i, rc); | ||
1360 | while (i--) | ||
1361 | rpcrdma_unmap_one(ia, --seg); | ||
1362 | } else { | ||
1363 | seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.fmr->rkey; | ||
1364 | seg1->mr_base = seg1->mr_dma + pageoff; | ||
1365 | seg1->mr_nsegs = i; | ||
1366 | seg1->mr_len = len; | ||
1367 | } | ||
1368 | *nsegs = i; | ||
1369 | return rc; | ||
1370 | } | ||
1371 | |||
1372 | static int | ||
1373 | rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg, | ||
1374 | struct rpcrdma_ia *ia) | ||
1375 | { | ||
1376 | struct rpcrdma_mr_seg *seg1 = seg; | ||
1377 | LIST_HEAD(l); | ||
1378 | int rc; | ||
1379 | |||
1380 | list_add(&seg1->mr_chunk.rl_mw->r.fmr->list, &l); | ||
1381 | rc = ib_unmap_fmr(&l); | ||
1382 | while (seg1->mr_nsegs--) | ||
1383 | rpcrdma_unmap_one(ia, seg++); | ||
1384 | if (rc) | ||
1385 | dprintk("RPC: %s: failed ib_unmap_fmr," | ||
1386 | " status %i\n", __func__, rc); | ||
1387 | return rc; | ||
1388 | } | ||
1389 | |||
1390 | static int | ||
1391 | rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg, | ||
1392 | int *nsegs, int writing, struct rpcrdma_ia *ia, | ||
1393 | struct rpcrdma_xprt *r_xprt) | ||
1394 | { | ||
1395 | int mem_priv = (writing ? IB_ACCESS_REMOTE_WRITE : | ||
1396 | IB_ACCESS_REMOTE_READ); | ||
1397 | struct ib_mw_bind param; | ||
1398 | int rc; | ||
1399 | |||
1400 | *nsegs = 1; | ||
1401 | rpcrdma_map_one(ia, seg, writing); | ||
1402 | param.mr = ia->ri_bind_mem; | ||
1403 | param.wr_id = 0ULL; /* no send cookie */ | ||
1404 | param.addr = seg->mr_dma; | ||
1405 | param.length = seg->mr_len; | ||
1406 | param.send_flags = 0; | ||
1407 | param.mw_access_flags = mem_priv; | ||
1408 | |||
1409 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
1410 | rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); | ||
1411 | if (rc) { | ||
1412 | dprintk("RPC: %s: failed ib_bind_mw " | ||
1413 | "%u@0x%llx status %i\n", | ||
1414 | __func__, seg->mr_len, | ||
1415 | (unsigned long long)seg->mr_dma, rc); | ||
1416 | rpcrdma_unmap_one(ia, seg); | ||
1417 | } else { | ||
1418 | seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey; | ||
1419 | seg->mr_base = param.addr; | ||
1420 | seg->mr_nsegs = 1; | ||
1421 | } | ||
1422 | return rc; | ||
1423 | } | ||
1424 | |||
1425 | static int | ||
1426 | rpcrdma_deregister_memwin_external(struct rpcrdma_mr_seg *seg, | ||
1427 | struct rpcrdma_ia *ia, | ||
1428 | struct rpcrdma_xprt *r_xprt, void **r) | ||
1429 | { | ||
1430 | struct ib_mw_bind param; | ||
1431 | LIST_HEAD(l); | ||
1432 | int rc; | ||
1433 | |||
1434 | BUG_ON(seg->mr_nsegs != 1); | ||
1435 | param.mr = ia->ri_bind_mem; | ||
1436 | param.addr = 0ULL; /* unbind */ | ||
1437 | param.length = 0; | ||
1438 | param.mw_access_flags = 0; | ||
1439 | if (*r) { | ||
1440 | param.wr_id = (u64) (unsigned long) *r; | ||
1441 | param.send_flags = IB_SEND_SIGNALED; | ||
1442 | INIT_CQCOUNT(&r_xprt->rx_ep); | ||
1443 | } else { | ||
1444 | param.wr_id = 0ULL; | ||
1445 | param.send_flags = 0; | ||
1446 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
1447 | } | ||
1448 | rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); | ||
1449 | rpcrdma_unmap_one(ia, seg); | ||
1450 | if (rc) | ||
1451 | dprintk("RPC: %s: failed ib_(un)bind_mw," | ||
1452 | " status %i\n", __func__, rc); | ||
1453 | else | ||
1454 | *r = NULL; /* will upcall on completion */ | ||
1455 | return rc; | ||
1456 | } | ||
1457 | |||
1458 | static int | ||
1459 | rpcrdma_register_default_external(struct rpcrdma_mr_seg *seg, | ||
1460 | int *nsegs, int writing, struct rpcrdma_ia *ia) | ||
1461 | { | ||
1462 | int mem_priv = (writing ? IB_ACCESS_REMOTE_WRITE : | ||
1463 | IB_ACCESS_REMOTE_READ); | ||
1464 | struct rpcrdma_mr_seg *seg1 = seg; | ||
1465 | struct ib_phys_buf ipb[RPCRDMA_MAX_DATA_SEGS]; | ||
1466 | int len, i, rc = 0; | ||
1467 | |||
1468 | if (*nsegs > RPCRDMA_MAX_DATA_SEGS) | ||
1469 | *nsegs = RPCRDMA_MAX_DATA_SEGS; | ||
1470 | for (len = 0, i = 0; i < *nsegs;) { | ||
1471 | rpcrdma_map_one(ia, seg, writing); | ||
1472 | ipb[i].addr = seg->mr_dma; | ||
1473 | ipb[i].size = seg->mr_len; | ||
1474 | len += seg->mr_len; | ||
1475 | ++seg; | ||
1476 | ++i; | ||
1477 | /* Check for holes */ | ||
1478 | if ((i < *nsegs && offset_in_page(seg->mr_offset)) || | ||
1479 | offset_in_page((seg-1)->mr_offset+(seg-1)->mr_len)) | ||
1480 | break; | ||
1481 | } | ||
1482 | seg1->mr_base = seg1->mr_dma; | ||
1483 | seg1->mr_chunk.rl_mr = ib_reg_phys_mr(ia->ri_pd, | ||
1484 | ipb, i, mem_priv, &seg1->mr_base); | ||
1485 | if (IS_ERR(seg1->mr_chunk.rl_mr)) { | ||
1486 | rc = PTR_ERR(seg1->mr_chunk.rl_mr); | ||
1487 | dprintk("RPC: %s: failed ib_reg_phys_mr " | ||
1488 | "%u@0x%llx (%d)... status %i\n", | ||
1489 | __func__, len, | ||
1490 | (unsigned long long)seg1->mr_dma, i, rc); | ||
1491 | while (i--) | ||
1492 | rpcrdma_unmap_one(ia, --seg); | ||
1493 | } else { | ||
1494 | seg1->mr_rkey = seg1->mr_chunk.rl_mr->rkey; | ||
1495 | seg1->mr_nsegs = i; | ||
1496 | seg1->mr_len = len; | ||
1497 | } | ||
1498 | *nsegs = i; | ||
1499 | return rc; | ||
1500 | } | ||
1501 | |||
1502 | static int | ||
1503 | rpcrdma_deregister_default_external(struct rpcrdma_mr_seg *seg, | ||
1504 | struct rpcrdma_ia *ia) | ||
1505 | { | ||
1506 | struct rpcrdma_mr_seg *seg1 = seg; | ||
1507 | int rc; | ||
1508 | |||
1509 | rc = ib_dereg_mr(seg1->mr_chunk.rl_mr); | ||
1510 | seg1->mr_chunk.rl_mr = NULL; | ||
1511 | while (seg1->mr_nsegs--) | ||
1512 | rpcrdma_unmap_one(ia, seg++); | ||
1513 | if (rc) | ||
1514 | dprintk("RPC: %s: failed ib_dereg_mr," | ||
1515 | " status %i\n", __func__, rc); | ||
1516 | return rc; | ||
1517 | } | ||
1518 | |||
1332 | int | 1519 | int |
1333 | rpcrdma_register_external(struct rpcrdma_mr_seg *seg, | 1520 | rpcrdma_register_external(struct rpcrdma_mr_seg *seg, |
1334 | int nsegs, int writing, struct rpcrdma_xprt *r_xprt) | 1521 | int nsegs, int writing, struct rpcrdma_xprt *r_xprt) |
1335 | { | 1522 | { |
1336 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | 1523 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
1337 | int mem_priv = (writing ? IB_ACCESS_REMOTE_WRITE : | ||
1338 | IB_ACCESS_REMOTE_READ); | ||
1339 | struct rpcrdma_mr_seg *seg1 = seg; | ||
1340 | int i; | ||
1341 | int rc = 0; | 1524 | int rc = 0; |
1342 | 1525 | ||
1343 | switch (ia->ri_memreg_strategy) { | 1526 | switch (ia->ri_memreg_strategy) { |
@@ -1352,114 +1535,20 @@ rpcrdma_register_external(struct rpcrdma_mr_seg *seg, | |||
1352 | break; | 1535 | break; |
1353 | #endif | 1536 | #endif |
1354 | 1537 | ||
1355 | /* Registration using fast memory registration */ | 1538 | /* Registration using fmr memory registration */ |
1356 | case RPCRDMA_MTHCAFMR: | 1539 | case RPCRDMA_MTHCAFMR: |
1357 | { | 1540 | rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia); |
1358 | u64 physaddrs[RPCRDMA_MAX_DATA_SEGS]; | ||
1359 | int len, pageoff = offset_in_page(seg->mr_offset); | ||
1360 | seg1->mr_offset -= pageoff; /* start of page */ | ||
1361 | seg1->mr_len += pageoff; | ||
1362 | len = -pageoff; | ||
1363 | if (nsegs > RPCRDMA_MAX_DATA_SEGS) | ||
1364 | nsegs = RPCRDMA_MAX_DATA_SEGS; | ||
1365 | for (i = 0; i < nsegs;) { | ||
1366 | rpcrdma_map_one(ia, seg, writing); | ||
1367 | physaddrs[i] = seg->mr_dma; | ||
1368 | len += seg->mr_len; | ||
1369 | ++seg; | ||
1370 | ++i; | ||
1371 | /* Check for holes */ | ||
1372 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || | ||
1373 | offset_in_page((seg-1)->mr_offset+(seg-1)->mr_len)) | ||
1374 | break; | ||
1375 | } | ||
1376 | nsegs = i; | ||
1377 | rc = ib_map_phys_fmr(seg1->mr_chunk.rl_mw->r.fmr, | ||
1378 | physaddrs, nsegs, seg1->mr_dma); | ||
1379 | if (rc) { | ||
1380 | dprintk("RPC: %s: failed ib_map_phys_fmr " | ||
1381 | "%u@0x%llx+%i (%d)... status %i\n", __func__, | ||
1382 | len, (unsigned long long)seg1->mr_dma, | ||
1383 | pageoff, nsegs, rc); | ||
1384 | while (nsegs--) | ||
1385 | rpcrdma_unmap_one(ia, --seg); | ||
1386 | } else { | ||
1387 | seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.fmr->rkey; | ||
1388 | seg1->mr_base = seg1->mr_dma + pageoff; | ||
1389 | seg1->mr_nsegs = nsegs; | ||
1390 | seg1->mr_len = len; | ||
1391 | } | ||
1392 | } | ||
1393 | break; | 1541 | break; |
1394 | 1542 | ||
1395 | /* Registration using memory windows */ | 1543 | /* Registration using memory windows */ |
1396 | case RPCRDMA_MEMWINDOWS_ASYNC: | 1544 | case RPCRDMA_MEMWINDOWS_ASYNC: |
1397 | case RPCRDMA_MEMWINDOWS: | 1545 | case RPCRDMA_MEMWINDOWS: |
1398 | { | 1546 | rc = rpcrdma_register_memwin_external(seg, &nsegs, writing, ia, r_xprt); |
1399 | struct ib_mw_bind param; | ||
1400 | rpcrdma_map_one(ia, seg, writing); | ||
1401 | param.mr = ia->ri_bind_mem; | ||
1402 | param.wr_id = 0ULL; /* no send cookie */ | ||
1403 | param.addr = seg->mr_dma; | ||
1404 | param.length = seg->mr_len; | ||
1405 | param.send_flags = 0; | ||
1406 | param.mw_access_flags = mem_priv; | ||
1407 | |||
1408 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
1409 | rc = ib_bind_mw(ia->ri_id->qp, | ||
1410 | seg->mr_chunk.rl_mw->r.mw, ¶m); | ||
1411 | if (rc) { | ||
1412 | dprintk("RPC: %s: failed ib_bind_mw " | ||
1413 | "%u@0x%llx status %i\n", | ||
1414 | __func__, seg->mr_len, | ||
1415 | (unsigned long long)seg->mr_dma, rc); | ||
1416 | rpcrdma_unmap_one(ia, seg); | ||
1417 | } else { | ||
1418 | seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey; | ||
1419 | seg->mr_base = param.addr; | ||
1420 | seg->mr_nsegs = 1; | ||
1421 | nsegs = 1; | ||
1422 | } | ||
1423 | } | ||
1424 | break; | 1547 | break; |
1425 | 1548 | ||
1426 | /* Default registration each time */ | 1549 | /* Default registration each time */ |
1427 | default: | 1550 | default: |
1428 | { | 1551 | rc = rpcrdma_register_default_external(seg, &nsegs, writing, ia); |
1429 | struct ib_phys_buf ipb[RPCRDMA_MAX_DATA_SEGS]; | ||
1430 | int len = 0; | ||
1431 | if (nsegs > RPCRDMA_MAX_DATA_SEGS) | ||
1432 | nsegs = RPCRDMA_MAX_DATA_SEGS; | ||
1433 | for (i = 0; i < nsegs;) { | ||
1434 | rpcrdma_map_one(ia, seg, writing); | ||
1435 | ipb[i].addr = seg->mr_dma; | ||
1436 | ipb[i].size = seg->mr_len; | ||
1437 | len += seg->mr_len; | ||
1438 | ++seg; | ||
1439 | ++i; | ||
1440 | /* Check for holes */ | ||
1441 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || | ||
1442 | offset_in_page((seg-1)->mr_offset+(seg-1)->mr_len)) | ||
1443 | break; | ||
1444 | } | ||
1445 | nsegs = i; | ||
1446 | seg1->mr_base = seg1->mr_dma; | ||
1447 | seg1->mr_chunk.rl_mr = ib_reg_phys_mr(ia->ri_pd, | ||
1448 | ipb, nsegs, mem_priv, &seg1->mr_base); | ||
1449 | if (IS_ERR(seg1->mr_chunk.rl_mr)) { | ||
1450 | rc = PTR_ERR(seg1->mr_chunk.rl_mr); | ||
1451 | dprintk("RPC: %s: failed ib_reg_phys_mr " | ||
1452 | "%u@0x%llx (%d)... status %i\n", | ||
1453 | __func__, len, | ||
1454 | (unsigned long long)seg1->mr_dma, nsegs, rc); | ||
1455 | while (nsegs--) | ||
1456 | rpcrdma_unmap_one(ia, --seg); | ||
1457 | } else { | ||
1458 | seg1->mr_rkey = seg1->mr_chunk.rl_mr->rkey; | ||
1459 | seg1->mr_nsegs = nsegs; | ||
1460 | seg1->mr_len = len; | ||
1461 | } | ||
1462 | } | ||
1463 | break; | 1552 | break; |
1464 | } | 1553 | } |
1465 | if (rc) | 1554 | if (rc) |
@@ -1473,7 +1562,6 @@ rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg, | |||
1473 | struct rpcrdma_xprt *r_xprt, void *r) | 1562 | struct rpcrdma_xprt *r_xprt, void *r) |
1474 | { | 1563 | { |
1475 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | 1564 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
1476 | struct rpcrdma_mr_seg *seg1 = seg; | ||
1477 | int nsegs = seg->mr_nsegs, rc; | 1565 | int nsegs = seg->mr_nsegs, rc; |
1478 | 1566 | ||
1479 | switch (ia->ri_memreg_strategy) { | 1567 | switch (ia->ri_memreg_strategy) { |
@@ -1487,55 +1575,16 @@ rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg, | |||
1487 | #endif | 1575 | #endif |
1488 | 1576 | ||
1489 | case RPCRDMA_MTHCAFMR: | 1577 | case RPCRDMA_MTHCAFMR: |
1490 | { | 1578 | rc = rpcrdma_deregister_fmr_external(seg, ia); |
1491 | LIST_HEAD(l); | ||
1492 | list_add(&seg->mr_chunk.rl_mw->r.fmr->list, &l); | ||
1493 | rc = ib_unmap_fmr(&l); | ||
1494 | while (seg1->mr_nsegs--) | ||
1495 | rpcrdma_unmap_one(ia, seg++); | ||
1496 | } | ||
1497 | if (rc) | ||
1498 | dprintk("RPC: %s: failed ib_unmap_fmr," | ||
1499 | " status %i\n", __func__, rc); | ||
1500 | break; | 1579 | break; |
1501 | 1580 | ||
1502 | case RPCRDMA_MEMWINDOWS_ASYNC: | 1581 | case RPCRDMA_MEMWINDOWS_ASYNC: |
1503 | case RPCRDMA_MEMWINDOWS: | 1582 | case RPCRDMA_MEMWINDOWS: |
1504 | { | 1583 | rc = rpcrdma_deregister_memwin_external(seg, ia, r_xprt, &r); |
1505 | struct ib_mw_bind param; | ||
1506 | BUG_ON(nsegs != 1); | ||
1507 | param.mr = ia->ri_bind_mem; | ||
1508 | param.addr = 0ULL; /* unbind */ | ||
1509 | param.length = 0; | ||
1510 | param.mw_access_flags = 0; | ||
1511 | if (r) { | ||
1512 | param.wr_id = (u64) (unsigned long) r; | ||
1513 | param.send_flags = IB_SEND_SIGNALED; | ||
1514 | INIT_CQCOUNT(&r_xprt->rx_ep); | ||
1515 | } else { | ||
1516 | param.wr_id = 0ULL; | ||
1517 | param.send_flags = 0; | ||
1518 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
1519 | } | ||
1520 | rc = ib_bind_mw(ia->ri_id->qp, | ||
1521 | seg->mr_chunk.rl_mw->r.mw, ¶m); | ||
1522 | rpcrdma_unmap_one(ia, seg); | ||
1523 | } | ||
1524 | if (rc) | ||
1525 | dprintk("RPC: %s: failed ib_(un)bind_mw," | ||
1526 | " status %i\n", __func__, rc); | ||
1527 | else | ||
1528 | r = NULL; /* will upcall on completion */ | ||
1529 | break; | 1584 | break; |
1530 | 1585 | ||
1531 | default: | 1586 | default: |
1532 | rc = ib_dereg_mr(seg1->mr_chunk.rl_mr); | 1587 | rc = rpcrdma_deregister_default_external(seg, ia); |
1533 | seg1->mr_chunk.rl_mr = NULL; | ||
1534 | while (seg1->mr_nsegs--) | ||
1535 | rpcrdma_unmap_one(ia, seg++); | ||
1536 | if (rc) | ||
1537 | dprintk("RPC: %s: failed ib_dereg_mr," | ||
1538 | " status %i\n", __func__, rc); | ||
1539 | break; | 1588 | break; |
1540 | } | 1589 | } |
1541 | if (r) { | 1590 | if (r) { |