diff options
Diffstat (limited to 'drivers/infiniband/ulp/srp/ib_srp.c')
| -rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 262 |
1 files changed, 155 insertions, 107 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index b481490ad257..32f79624dd28 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -340,8 +340,6 @@ static void srp_destroy_fr_pool(struct srp_fr_pool *pool) | |||
| 340 | return; | 340 | return; |
| 341 | 341 | ||
| 342 | for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { | 342 | for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { |
| 343 | if (d->frpl) | ||
| 344 | ib_free_fast_reg_page_list(d->frpl); | ||
| 345 | if (d->mr) | 343 | if (d->mr) |
| 346 | ib_dereg_mr(d->mr); | 344 | ib_dereg_mr(d->mr); |
| 347 | } | 345 | } |
| @@ -362,7 +360,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, | |||
| 362 | struct srp_fr_pool *pool; | 360 | struct srp_fr_pool *pool; |
| 363 | struct srp_fr_desc *d; | 361 | struct srp_fr_desc *d; |
| 364 | struct ib_mr *mr; | 362 | struct ib_mr *mr; |
| 365 | struct ib_fast_reg_page_list *frpl; | ||
| 366 | int i, ret = -EINVAL; | 363 | int i, ret = -EINVAL; |
| 367 | 364 | ||
| 368 | if (pool_size <= 0) | 365 | if (pool_size <= 0) |
| @@ -385,12 +382,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, | |||
| 385 | goto destroy_pool; | 382 | goto destroy_pool; |
| 386 | } | 383 | } |
| 387 | d->mr = mr; | 384 | d->mr = mr; |
| 388 | frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len); | ||
| 389 | if (IS_ERR(frpl)) { | ||
| 390 | ret = PTR_ERR(frpl); | ||
| 391 | goto destroy_pool; | ||
| 392 | } | ||
| 393 | d->frpl = frpl; | ||
| 394 | list_add_tail(&d->entry, &pool->free_list); | 385 | list_add_tail(&d->entry, &pool->free_list); |
| 395 | } | 386 | } |
| 396 | 387 | ||
| @@ -849,11 +840,12 @@ static void srp_free_req_data(struct srp_target_port *target, | |||
| 849 | 840 | ||
| 850 | for (i = 0; i < target->req_ring_size; ++i) { | 841 | for (i = 0; i < target->req_ring_size; ++i) { |
| 851 | req = &ch->req_ring[i]; | 842 | req = &ch->req_ring[i]; |
| 852 | if (dev->use_fast_reg) | 843 | if (dev->use_fast_reg) { |
| 853 | kfree(req->fr_list); | 844 | kfree(req->fr_list); |
| 854 | else | 845 | } else { |
| 855 | kfree(req->fmr_list); | 846 | kfree(req->fmr_list); |
| 856 | kfree(req->map_page); | 847 | kfree(req->map_page); |
| 848 | } | ||
| 857 | if (req->indirect_dma_addr) { | 849 | if (req->indirect_dma_addr) { |
| 858 | ib_dma_unmap_single(ibdev, req->indirect_dma_addr, | 850 | ib_dma_unmap_single(ibdev, req->indirect_dma_addr, |
| 859 | target->indirect_size, | 851 | target->indirect_size, |
| @@ -887,14 +879,15 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) | |||
| 887 | GFP_KERNEL); | 879 | GFP_KERNEL); |
| 888 | if (!mr_list) | 880 | if (!mr_list) |
| 889 | goto out; | 881 | goto out; |
| 890 | if (srp_dev->use_fast_reg) | 882 | if (srp_dev->use_fast_reg) { |
| 891 | req->fr_list = mr_list; | 883 | req->fr_list = mr_list; |
| 892 | else | 884 | } else { |
| 893 | req->fmr_list = mr_list; | 885 | req->fmr_list = mr_list; |
| 894 | req->map_page = kmalloc(srp_dev->max_pages_per_mr * | 886 | req->map_page = kmalloc(srp_dev->max_pages_per_mr * |
| 895 | sizeof(void *), GFP_KERNEL); | 887 | sizeof(void *), GFP_KERNEL); |
| 896 | if (!req->map_page) | 888 | if (!req->map_page) |
| 897 | goto out; | 889 | goto out; |
| 890 | } | ||
| 898 | req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); | 891 | req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); |
| 899 | if (!req->indirect_desc) | 892 | if (!req->indirect_desc) |
| 900 | goto out; | 893 | goto out; |
| @@ -1286,6 +1279,17 @@ static int srp_map_finish_fmr(struct srp_map_state *state, | |||
| 1286 | if (state->fmr.next >= state->fmr.end) | 1279 | if (state->fmr.next >= state->fmr.end) |
| 1287 | return -ENOMEM; | 1280 | return -ENOMEM; |
| 1288 | 1281 | ||
| 1282 | WARN_ON_ONCE(!dev->use_fmr); | ||
| 1283 | |||
| 1284 | if (state->npages == 0) | ||
| 1285 | return 0; | ||
| 1286 | |||
| 1287 | if (state->npages == 1 && target->global_mr) { | ||
| 1288 | srp_map_desc(state, state->base_dma_addr, state->dma_len, | ||
| 1289 | target->global_mr->rkey); | ||
| 1290 | goto reset_state; | ||
| 1291 | } | ||
| 1292 | |||
| 1289 | fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, | 1293 | fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, |
| 1290 | state->npages, io_addr); | 1294 | state->npages, io_addr); |
| 1291 | if (IS_ERR(fmr)) | 1295 | if (IS_ERR(fmr)) |
| @@ -1297,6 +1301,10 @@ static int srp_map_finish_fmr(struct srp_map_state *state, | |||
| 1297 | srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, | 1301 | srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, |
| 1298 | state->dma_len, fmr->fmr->rkey); | 1302 | state->dma_len, fmr->fmr->rkey); |
| 1299 | 1303 | ||
| 1304 | reset_state: | ||
| 1305 | state->npages = 0; | ||
| 1306 | state->dma_len = 0; | ||
| 1307 | |||
| 1300 | return 0; | 1308 | return 0; |
| 1301 | } | 1309 | } |
| 1302 | 1310 | ||
| @@ -1306,13 +1314,26 @@ static int srp_map_finish_fr(struct srp_map_state *state, | |||
| 1306 | struct srp_target_port *target = ch->target; | 1314 | struct srp_target_port *target = ch->target; |
| 1307 | struct srp_device *dev = target->srp_host->srp_dev; | 1315 | struct srp_device *dev = target->srp_host->srp_dev; |
| 1308 | struct ib_send_wr *bad_wr; | 1316 | struct ib_send_wr *bad_wr; |
| 1309 | struct ib_send_wr wr; | 1317 | struct ib_reg_wr wr; |
| 1310 | struct srp_fr_desc *desc; | 1318 | struct srp_fr_desc *desc; |
| 1311 | u32 rkey; | 1319 | u32 rkey; |
| 1320 | int n, err; | ||
| 1312 | 1321 | ||
| 1313 | if (state->fr.next >= state->fr.end) | 1322 | if (state->fr.next >= state->fr.end) |
| 1314 | return -ENOMEM; | 1323 | return -ENOMEM; |
| 1315 | 1324 | ||
| 1325 | WARN_ON_ONCE(!dev->use_fast_reg); | ||
| 1326 | |||
| 1327 | if (state->sg_nents == 0) | ||
| 1328 | return 0; | ||
| 1329 | |||
| 1330 | if (state->sg_nents == 1 && target->global_mr) { | ||
| 1331 | srp_map_desc(state, sg_dma_address(state->sg), | ||
| 1332 | sg_dma_len(state->sg), | ||
| 1333 | target->global_mr->rkey); | ||
| 1334 | return 1; | ||
| 1335 | } | ||
| 1336 | |||
| 1316 | desc = srp_fr_pool_get(ch->fr_pool); | 1337 | desc = srp_fr_pool_get(ch->fr_pool); |
| 1317 | if (!desc) | 1338 | if (!desc) |
| 1318 | return -ENOMEM; | 1339 | return -ENOMEM; |
| @@ -1320,56 +1341,33 @@ static int srp_map_finish_fr(struct srp_map_state *state, | |||
| 1320 | rkey = ib_inc_rkey(desc->mr->rkey); | 1341 | rkey = ib_inc_rkey(desc->mr->rkey); |
| 1321 | ib_update_fast_reg_key(desc->mr, rkey); | 1342 | ib_update_fast_reg_key(desc->mr, rkey); |
| 1322 | 1343 | ||
| 1323 | memcpy(desc->frpl->page_list, state->pages, | 1344 | n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents, |
| 1324 | sizeof(state->pages[0]) * state->npages); | 1345 | dev->mr_page_size); |
| 1325 | 1346 | if (unlikely(n < 0)) | |
| 1326 | memset(&wr, 0, sizeof(wr)); | 1347 | return n; |
| 1327 | wr.opcode = IB_WR_FAST_REG_MR; | 1348 | |
| 1328 | wr.wr_id = FAST_REG_WR_ID_MASK; | 1349 | wr.wr.next = NULL; |
| 1329 | wr.wr.fast_reg.iova_start = state->base_dma_addr; | 1350 | wr.wr.opcode = IB_WR_REG_MR; |
| 1330 | wr.wr.fast_reg.page_list = desc->frpl; | 1351 | wr.wr.wr_id = FAST_REG_WR_ID_MASK; |
| 1331 | wr.wr.fast_reg.page_list_len = state->npages; | 1352 | wr.wr.num_sge = 0; |
| 1332 | wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size); | 1353 | wr.wr.send_flags = 0; |
| 1333 | wr.wr.fast_reg.length = state->dma_len; | 1354 | wr.mr = desc->mr; |
| 1334 | wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | | 1355 | wr.key = desc->mr->rkey; |
| 1335 | IB_ACCESS_REMOTE_READ | | 1356 | wr.access = (IB_ACCESS_LOCAL_WRITE | |
| 1336 | IB_ACCESS_REMOTE_WRITE); | 1357 | IB_ACCESS_REMOTE_READ | |
| 1337 | wr.wr.fast_reg.rkey = desc->mr->lkey; | 1358 | IB_ACCESS_REMOTE_WRITE); |
| 1338 | 1359 | ||
| 1339 | *state->fr.next++ = desc; | 1360 | *state->fr.next++ = desc; |
| 1340 | state->nmdesc++; | 1361 | state->nmdesc++; |
| 1341 | 1362 | ||
| 1342 | srp_map_desc(state, state->base_dma_addr, state->dma_len, | 1363 | srp_map_desc(state, desc->mr->iova, |
| 1343 | desc->mr->rkey); | 1364 | desc->mr->length, desc->mr->rkey); |
| 1344 | 1365 | ||
| 1345 | return ib_post_send(ch->qp, &wr, &bad_wr); | 1366 | err = ib_post_send(ch->qp, &wr.wr, &bad_wr); |
| 1346 | } | 1367 | if (unlikely(err)) |
| 1368 | return err; | ||
| 1347 | 1369 | ||
| 1348 | static int srp_finish_mapping(struct srp_map_state *state, | 1370 | return n; |
| 1349 | struct srp_rdma_ch *ch) | ||
| 1350 | { | ||
| 1351 | struct srp_target_port *target = ch->target; | ||
| 1352 | struct srp_device *dev = target->srp_host->srp_dev; | ||
| 1353 | int ret = 0; | ||
| 1354 | |||
| 1355 | WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr); | ||
| 1356 | |||
| 1357 | if (state->npages == 0) | ||
| 1358 | return 0; | ||
| 1359 | |||
| 1360 | if (state->npages == 1 && target->global_mr) | ||
| 1361 | srp_map_desc(state, state->base_dma_addr, state->dma_len, | ||
| 1362 | target->global_mr->rkey); | ||
| 1363 | else | ||
| 1364 | ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) : | ||
| 1365 | srp_map_finish_fmr(state, ch); | ||
| 1366 | |||
| 1367 | if (ret == 0) { | ||
| 1368 | state->npages = 0; | ||
| 1369 | state->dma_len = 0; | ||
| 1370 | } | ||
| 1371 | |||
| 1372 | return ret; | ||
| 1373 | } | 1371 | } |
| 1374 | 1372 | ||
| 1375 | static int srp_map_sg_entry(struct srp_map_state *state, | 1373 | static int srp_map_sg_entry(struct srp_map_state *state, |
| @@ -1389,7 +1387,7 @@ static int srp_map_sg_entry(struct srp_map_state *state, | |||
| 1389 | while (dma_len) { | 1387 | while (dma_len) { |
| 1390 | unsigned offset = dma_addr & ~dev->mr_page_mask; | 1388 | unsigned offset = dma_addr & ~dev->mr_page_mask; |
| 1391 | if (state->npages == dev->max_pages_per_mr || offset != 0) { | 1389 | if (state->npages == dev->max_pages_per_mr || offset != 0) { |
| 1392 | ret = srp_finish_mapping(state, ch); | 1390 | ret = srp_map_finish_fmr(state, ch); |
| 1393 | if (ret) | 1391 | if (ret) |
| 1394 | return ret; | 1392 | return ret; |
| 1395 | } | 1393 | } |
| @@ -1411,51 +1409,83 @@ static int srp_map_sg_entry(struct srp_map_state *state, | |||
| 1411 | */ | 1409 | */ |
| 1412 | ret = 0; | 1410 | ret = 0; |
| 1413 | if (len != dev->mr_page_size) | 1411 | if (len != dev->mr_page_size) |
| 1414 | ret = srp_finish_mapping(state, ch); | 1412 | ret = srp_map_finish_fmr(state, ch); |
| 1415 | return ret; | 1413 | return ret; |
| 1416 | } | 1414 | } |
| 1417 | 1415 | ||
| 1418 | static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, | 1416 | static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, |
| 1419 | struct srp_request *req, struct scatterlist *scat, | 1417 | struct srp_request *req, struct scatterlist *scat, |
| 1420 | int count) | 1418 | int count) |
| 1421 | { | 1419 | { |
| 1422 | struct srp_target_port *target = ch->target; | ||
| 1423 | struct srp_device *dev = target->srp_host->srp_dev; | ||
| 1424 | struct scatterlist *sg; | 1420 | struct scatterlist *sg; |
| 1425 | int i, ret; | 1421 | int i, ret; |
| 1426 | 1422 | ||
| 1427 | state->desc = req->indirect_desc; | 1423 | state->desc = req->indirect_desc; |
| 1428 | state->pages = req->map_page; | 1424 | state->pages = req->map_page; |
| 1429 | if (dev->use_fast_reg) { | 1425 | state->fmr.next = req->fmr_list; |
| 1430 | state->fr.next = req->fr_list; | 1426 | state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt; |
| 1431 | state->fr.end = req->fr_list + target->cmd_sg_cnt; | ||
| 1432 | } else if (dev->use_fmr) { | ||
| 1433 | state->fmr.next = req->fmr_list; | ||
| 1434 | state->fmr.end = req->fmr_list + target->cmd_sg_cnt; | ||
| 1435 | } | ||
| 1436 | 1427 | ||
| 1437 | if (dev->use_fast_reg || dev->use_fmr) { | 1428 | for_each_sg(scat, sg, count, i) { |
| 1438 | for_each_sg(scat, sg, count, i) { | 1429 | ret = srp_map_sg_entry(state, ch, sg, i); |
| 1439 | ret = srp_map_sg_entry(state, ch, sg, i); | ||
| 1440 | if (ret) | ||
| 1441 | goto out; | ||
| 1442 | } | ||
| 1443 | ret = srp_finish_mapping(state, ch); | ||
| 1444 | if (ret) | 1430 | if (ret) |
| 1445 | goto out; | 1431 | return ret; |
| 1446 | } else { | ||
| 1447 | for_each_sg(scat, sg, count, i) { | ||
| 1448 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), | ||
| 1449 | ib_sg_dma_len(dev->dev, sg), | ||
| 1450 | target->global_mr->rkey); | ||
| 1451 | } | ||
| 1452 | } | 1432 | } |
| 1453 | 1433 | ||
| 1434 | ret = srp_map_finish_fmr(state, ch); | ||
| 1435 | if (ret) | ||
| 1436 | return ret; | ||
| 1437 | |||
| 1454 | req->nmdesc = state->nmdesc; | 1438 | req->nmdesc = state->nmdesc; |
| 1455 | ret = 0; | ||
| 1456 | 1439 | ||
| 1457 | out: | 1440 | return 0; |
| 1458 | return ret; | 1441 | } |
| 1442 | |||
| 1443 | static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, | ||
| 1444 | struct srp_request *req, struct scatterlist *scat, | ||
| 1445 | int count) | ||
| 1446 | { | ||
| 1447 | state->desc = req->indirect_desc; | ||
| 1448 | state->fr.next = req->fr_list; | ||
| 1449 | state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; | ||
| 1450 | state->sg = scat; | ||
| 1451 | state->sg_nents = scsi_sg_count(req->scmnd); | ||
| 1452 | |||
| 1453 | while (state->sg_nents) { | ||
| 1454 | int i, n; | ||
| 1455 | |||
| 1456 | n = srp_map_finish_fr(state, ch); | ||
| 1457 | if (unlikely(n < 0)) | ||
| 1458 | return n; | ||
| 1459 | |||
| 1460 | state->sg_nents -= n; | ||
| 1461 | for (i = 0; i < n; i++) | ||
| 1462 | state->sg = sg_next(state->sg); | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | req->nmdesc = state->nmdesc; | ||
| 1466 | |||
| 1467 | return 0; | ||
| 1468 | } | ||
| 1469 | |||
| 1470 | static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, | ||
| 1471 | struct srp_request *req, struct scatterlist *scat, | ||
| 1472 | int count) | ||
| 1473 | { | ||
| 1474 | struct srp_target_port *target = ch->target; | ||
| 1475 | struct srp_device *dev = target->srp_host->srp_dev; | ||
| 1476 | struct scatterlist *sg; | ||
| 1477 | int i; | ||
| 1478 | |||
| 1479 | state->desc = req->indirect_desc; | ||
| 1480 | for_each_sg(scat, sg, count, i) { | ||
| 1481 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), | ||
| 1482 | ib_sg_dma_len(dev->dev, sg), | ||
| 1483 | target->global_mr->rkey); | ||
| 1484 | } | ||
| 1485 | |||
| 1486 | req->nmdesc = state->nmdesc; | ||
| 1487 | |||
| 1488 | return 0; | ||
| 1459 | } | 1489 | } |
| 1460 | 1490 | ||
| 1461 | /* | 1491 | /* |
| @@ -1474,6 +1504,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, | |||
| 1474 | struct srp_map_state state; | 1504 | struct srp_map_state state; |
| 1475 | struct srp_direct_buf idb_desc; | 1505 | struct srp_direct_buf idb_desc; |
| 1476 | u64 idb_pages[1]; | 1506 | u64 idb_pages[1]; |
| 1507 | struct scatterlist idb_sg[1]; | ||
| 1477 | int ret; | 1508 | int ret; |
| 1478 | 1509 | ||
| 1479 | memset(&state, 0, sizeof(state)); | 1510 | memset(&state, 0, sizeof(state)); |
| @@ -1481,20 +1512,32 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, | |||
| 1481 | state.gen.next = next_mr; | 1512 | state.gen.next = next_mr; |
| 1482 | state.gen.end = end_mr; | 1513 | state.gen.end = end_mr; |
| 1483 | state.desc = &idb_desc; | 1514 | state.desc = &idb_desc; |
| 1484 | state.pages = idb_pages; | ||
| 1485 | state.pages[0] = (req->indirect_dma_addr & | ||
| 1486 | dev->mr_page_mask); | ||
| 1487 | state.npages = 1; | ||
| 1488 | state.base_dma_addr = req->indirect_dma_addr; | 1515 | state.base_dma_addr = req->indirect_dma_addr; |
| 1489 | state.dma_len = idb_len; | 1516 | state.dma_len = idb_len; |
| 1490 | ret = srp_finish_mapping(&state, ch); | 1517 | |
| 1491 | if (ret < 0) | 1518 | if (dev->use_fast_reg) { |
| 1492 | goto out; | 1519 | state.sg = idb_sg; |
| 1520 | state.sg_nents = 1; | ||
| 1521 | sg_set_buf(idb_sg, req->indirect_desc, idb_len); | ||
| 1522 | idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ | ||
| 1523 | ret = srp_map_finish_fr(&state, ch); | ||
| 1524 | if (ret < 0) | ||
| 1525 | return ret; | ||
| 1526 | } else if (dev->use_fmr) { | ||
| 1527 | state.pages = idb_pages; | ||
| 1528 | state.pages[0] = (req->indirect_dma_addr & | ||
| 1529 | dev->mr_page_mask); | ||
| 1530 | state.npages = 1; | ||
| 1531 | ret = srp_map_finish_fmr(&state, ch); | ||
| 1532 | if (ret < 0) | ||
| 1533 | return ret; | ||
| 1534 | } else { | ||
| 1535 | return -EINVAL; | ||
| 1536 | } | ||
| 1493 | 1537 | ||
| 1494 | *idb_rkey = idb_desc.key; | 1538 | *idb_rkey = idb_desc.key; |
| 1495 | 1539 | ||
| 1496 | out: | 1540 | return 0; |
| 1497 | return ret; | ||
| 1498 | } | 1541 | } |
| 1499 | 1542 | ||
| 1500 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, | 1543 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, |
| @@ -1563,7 +1606,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, | |||
| 1563 | target->indirect_size, DMA_TO_DEVICE); | 1606 | target->indirect_size, DMA_TO_DEVICE); |
| 1564 | 1607 | ||
| 1565 | memset(&state, 0, sizeof(state)); | 1608 | memset(&state, 0, sizeof(state)); |
| 1566 | srp_map_sg(&state, ch, req, scat, count); | 1609 | if (dev->use_fast_reg) |
| 1610 | srp_map_sg_fr(&state, ch, req, scat, count); | ||
| 1611 | else if (dev->use_fmr) | ||
| 1612 | srp_map_sg_fmr(&state, ch, req, scat, count); | ||
| 1613 | else | ||
| 1614 | srp_map_sg_dma(&state, ch, req, scat, count); | ||
| 1567 | 1615 | ||
| 1568 | /* We've mapped the request, now pull as much of the indirect | 1616 | /* We've mapped the request, now pull as much of the indirect |
| 1569 | * descriptor table as we can into the command buffer. If this | 1617 | * descriptor table as we can into the command buffer. If this |
| @@ -3213,7 +3261,7 @@ static ssize_t srp_create_target(struct device *dev, | |||
| 3213 | INIT_WORK(&target->tl_err_work, srp_tl_err_work); | 3261 | INIT_WORK(&target->tl_err_work, srp_tl_err_work); |
| 3214 | INIT_WORK(&target->remove_work, srp_remove_work); | 3262 | INIT_WORK(&target->remove_work, srp_remove_work); |
| 3215 | spin_lock_init(&target->lock); | 3263 | spin_lock_init(&target->lock); |
| 3216 | ret = ib_query_gid(ibdev, host->port, 0, &target->sgid); | 3264 | ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL); |
| 3217 | if (ret) | 3265 | if (ret) |
| 3218 | goto out; | 3266 | goto out; |
| 3219 | 3267 | ||
