diff options
author | David Dillow <dillowda@ornl.gov> | 2011-01-14 17:32:07 -0500 |
---|---|---|
committer | David Dillow <dillowda@ornl.gov> | 2011-03-15 19:34:48 -0400 |
commit | 961e0be89a5120a1409ebc525cca6f603615a8a8 (patch) | |
tree | 4a56b84ef3a6aee8b0c23bdd4c8bf92f3e1b7110 /drivers/infiniband/ulp/srp | |
parent | 8c4037b501acd2ec3abc7925e66af8af40a2da9d (diff) |
IB/srp: move IB CM setup completion into its own function
This is to clean up prior to further changes.
Signed-off-by: David Dillow <dillowda@ornl.gov>
Diffstat (limited to 'drivers/infiniband/ulp/srp')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 144 |
1 files changed, 73 insertions, 71 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 197e26cc2b0b..060e6a84f18f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -1213,6 +1213,78 @@ err: | |||
1213 | return -ENOMEM; | 1213 | return -ENOMEM; |
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | static void srp_cm_rep_handler(struct ib_cm_id *cm_id, | ||
1217 | struct srp_login_rsp *lrsp, | ||
1218 | struct srp_target_port *target) | ||
1219 | { | ||
1220 | struct ib_qp_attr *qp_attr = NULL; | ||
1221 | int attr_mask = 0; | ||
1222 | int ret; | ||
1223 | int i; | ||
1224 | |||
1225 | if (lrsp->opcode == SRP_LOGIN_RSP) { | ||
1226 | target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); | ||
1227 | target->req_lim = be32_to_cpu(lrsp->req_lim_delta); | ||
1228 | |||
1229 | /* | ||
1230 | * Reserve credits for task management so we don't | ||
1231 | * bounce requests back to the SCSI mid-layer. | ||
1232 | */ | ||
1233 | target->scsi_host->can_queue | ||
1234 | = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, | ||
1235 | target->scsi_host->can_queue); | ||
1236 | } else { | ||
1237 | shost_printk(KERN_WARNING, target->scsi_host, | ||
1238 | PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); | ||
1239 | ret = -ECONNRESET; | ||
1240 | goto error; | ||
1241 | } | ||
1242 | |||
1243 | if (!target->rx_ring[0]) { | ||
1244 | ret = srp_alloc_iu_bufs(target); | ||
1245 | if (ret) | ||
1246 | goto error; | ||
1247 | } | ||
1248 | |||
1249 | ret = -ENOMEM; | ||
1250 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); | ||
1251 | if (!qp_attr) | ||
1252 | goto error; | ||
1253 | |||
1254 | qp_attr->qp_state = IB_QPS_RTR; | ||
1255 | ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | ||
1256 | if (ret) | ||
1257 | goto error_free; | ||
1258 | |||
1259 | ret = ib_modify_qp(target->qp, qp_attr, attr_mask); | ||
1260 | if (ret) | ||
1261 | goto error_free; | ||
1262 | |||
1263 | for (i = 0; i < SRP_RQ_SIZE; i++) { | ||
1264 | struct srp_iu *iu = target->rx_ring[i]; | ||
1265 | ret = srp_post_recv(target, iu); | ||
1266 | if (ret) | ||
1267 | goto error_free; | ||
1268 | } | ||
1269 | |||
1270 | qp_attr->qp_state = IB_QPS_RTS; | ||
1271 | ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | ||
1272 | if (ret) | ||
1273 | goto error_free; | ||
1274 | |||
1275 | ret = ib_modify_qp(target->qp, qp_attr, attr_mask); | ||
1276 | if (ret) | ||
1277 | goto error_free; | ||
1278 | |||
1279 | ret = ib_send_cm_rtu(cm_id, NULL, 0); | ||
1280 | |||
1281 | error_free: | ||
1282 | kfree(qp_attr); | ||
1283 | |||
1284 | error: | ||
1285 | target->status = ret; | ||
1286 | } | ||
1287 | |||
1216 | static void srp_cm_rej_handler(struct ib_cm_id *cm_id, | 1288 | static void srp_cm_rej_handler(struct ib_cm_id *cm_id, |
1217 | struct ib_cm_event *event, | 1289 | struct ib_cm_event *event, |
1218 | struct srp_target_port *target) | 1290 | struct srp_target_port *target) |
@@ -1296,11 +1368,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id, | |||
1296 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | 1368 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) |
1297 | { | 1369 | { |
1298 | struct srp_target_port *target = cm_id->context; | 1370 | struct srp_target_port *target = cm_id->context; |
1299 | struct ib_qp_attr *qp_attr = NULL; | ||
1300 | int attr_mask = 0; | ||
1301 | int comp = 0; | 1371 | int comp = 0; |
1302 | int opcode = 0; | ||
1303 | int i; | ||
1304 | 1372 | ||
1305 | switch (event->event) { | 1373 | switch (event->event) { |
1306 | case IB_CM_REQ_ERROR: | 1374 | case IB_CM_REQ_ERROR: |
@@ -1312,71 +1380,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1312 | 1380 | ||
1313 | case IB_CM_REP_RECEIVED: | 1381 | case IB_CM_REP_RECEIVED: |
1314 | comp = 1; | 1382 | comp = 1; |
1315 | opcode = *(u8 *) event->private_data; | 1383 | srp_cm_rep_handler(cm_id, event->private_data, target); |
1316 | |||
1317 | if (opcode == SRP_LOGIN_RSP) { | ||
1318 | struct srp_login_rsp *rsp = event->private_data; | ||
1319 | |||
1320 | target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); | ||
1321 | target->req_lim = be32_to_cpu(rsp->req_lim_delta); | ||
1322 | |||
1323 | /* | ||
1324 | * Reserve credits for task management so we don't | ||
1325 | * bounce requests back to the SCSI mid-layer. | ||
1326 | */ | ||
1327 | target->scsi_host->can_queue | ||
1328 | = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, | ||
1329 | target->scsi_host->can_queue); | ||
1330 | } else { | ||
1331 | shost_printk(KERN_WARNING, target->scsi_host, | ||
1332 | PFX "Unhandled RSP opcode %#x\n", opcode); | ||
1333 | target->status = -ECONNRESET; | ||
1334 | break; | ||
1335 | } | ||
1336 | |||
1337 | if (!target->rx_ring[0]) { | ||
1338 | target->status = srp_alloc_iu_bufs(target); | ||
1339 | if (target->status) | ||
1340 | break; | ||
1341 | } | ||
1342 | |||
1343 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); | ||
1344 | if (!qp_attr) { | ||
1345 | target->status = -ENOMEM; | ||
1346 | break; | ||
1347 | } | ||
1348 | |||
1349 | qp_attr->qp_state = IB_QPS_RTR; | ||
1350 | target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | ||
1351 | if (target->status) | ||
1352 | break; | ||
1353 | |||
1354 | target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); | ||
1355 | if (target->status) | ||
1356 | break; | ||
1357 | |||
1358 | for (i = 0; i < SRP_RQ_SIZE; i++) { | ||
1359 | struct srp_iu *iu = target->rx_ring[i]; | ||
1360 | target->status = srp_post_recv(target, iu); | ||
1361 | if (target->status) | ||
1362 | break; | ||
1363 | } | ||
1364 | if (target->status) | ||
1365 | break; | ||
1366 | |||
1367 | qp_attr->qp_state = IB_QPS_RTS; | ||
1368 | target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | ||
1369 | if (target->status) | ||
1370 | break; | ||
1371 | |||
1372 | target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); | ||
1373 | if (target->status) | ||
1374 | break; | ||
1375 | |||
1376 | target->status = ib_send_cm_rtu(cm_id, NULL, 0); | ||
1377 | if (target->status) | ||
1378 | break; | ||
1379 | |||
1380 | break; | 1384 | break; |
1381 | 1385 | ||
1382 | case IB_CM_REJ_RECEIVED: | 1386 | case IB_CM_REJ_RECEIVED: |
@@ -1416,8 +1420,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1416 | if (comp) | 1420 | if (comp) |
1417 | complete(&target->done); | 1421 | complete(&target->done); |
1418 | 1422 | ||
1419 | kfree(qp_attr); | ||
1420 | |||
1421 | return 0; | 1423 | return 0; |
1422 | } | 1424 | } |
1423 | 1425 | ||