aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bnx2fc
diff options
context:
space:
mode:
authorBhanu Prakash Gollapudi <bprakash@broadcom.com>2011-07-27 14:32:05 -0400
committerJames Bottomley <JBottomley@Parallels.com>2011-07-28 03:42:57 -0400
commit6c5a7ce4f176b641fd11e59be4df31ee3e6202dd (patch)
tree676e052ad6c2ac3868e934a9f22e2f5528d6a8f3 /drivers/scsi/bnx2fc
parentd6857595394f1fa5c5752eae9bb6045c067fa41e (diff)
[SCSI] bnx2fc: Support 'sequence cleanup' task
For the devices that support sequence level error recovery, based on the REC response, the firmware has to be informed about the offset from which the retransmission should happen. Driver initiates sequence cleanup task to firmware so that the firmware can program the task. Upon the sequence cleanup completion, SRR is issued to retransmit the sequence. Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/bnx2fc')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h14
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c97
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c106
4 files changed, 237 insertions, 0 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index d38dcc7f1047..45d5391229e2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -357,6 +357,8 @@ struct bnx2fc_els_cb_arg {
357 struct bnx2fc_cmd *aborted_io_req; 357 struct bnx2fc_cmd *aborted_io_req;
358 struct bnx2fc_cmd *io_req; 358 struct bnx2fc_cmd *io_req;
359 u16 l2_oxid; 359 u16 l2_oxid;
360 u32 offset;
361 enum fc_rctl r_ctl;
360}; 362};
361 363
362/* bnx2fc command structure */ 364/* bnx2fc command structure */
@@ -370,6 +372,7 @@ struct bnx2fc_cmd {
370#define BNX2FC_ABTS 3 372#define BNX2FC_ABTS 3
371#define BNX2FC_ELS 4 373#define BNX2FC_ELS 4
372#define BNX2FC_CLEANUP 5 374#define BNX2FC_CLEANUP 5
375#define BNX2FC_SEQ_CLEANUP 6
373 u8 io_req_flags; 376 u8 io_req_flags;
374 struct kref refcount; 377 struct kref refcount;
375 struct fcoe_port *port; 378 struct fcoe_port *port;
@@ -466,6 +469,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
466void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 469void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
467 struct fcoe_task_ctx_entry *task, 470 struct fcoe_task_ctx_entry *task,
468 u16 orig_xid); 471 u16 orig_xid);
472void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
473 struct fcoe_task_ctx_entry *task,
474 struct bnx2fc_cmd *orig_io_req,
475 u32 offset);
469void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, 476void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
470 struct fcoe_task_ctx_entry *task); 477 struct fcoe_task_ctx_entry *task);
471void bnx2fc_init_task(struct bnx2fc_cmd *io_req, 478void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
@@ -515,5 +522,12 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
515 unsigned char *buf, 522 unsigned char *buf,
516 u32 frame_len, u16 l2_oxid); 523 u32 frame_len, u16 l2_oxid);
517int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); 524int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
525int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
526int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
527void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
528 struct fcoe_task_ctx_entry *task,
529 u8 rx_state);
530int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
531 enum fc_rctl r_ctl);
518 532
519#endif 533#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 5d7baa2371f8..75d0b6ac6c2b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -253,6 +253,26 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
253 return rc; 253 return rc;
254} 254}
255 255
256int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
257{
258 /*
259 * Dummy function to enable compiling individual patches. Real function
260 * is in the next patch.
261 */
262 return 0;
263}
264
265int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
266{
267 /*
268 * Dummy function to enable compiling individual patches. Real function
269 * is in the next patch.
270 */
271 return 0;
272}
273
274
275
256static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, 276static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
257 void *data, u32 data_len, 277 void *data, u32 data_len,
258 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), 278 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index fa263b5902e1..03ae003d3b85 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -880,6 +880,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
880 kref_put(&io_req->refcount, bnx2fc_cmd_release); 880 kref_put(&io_req->refcount, bnx2fc_cmd_release);
881 break; 881 break;
882 882
883 case BNX2FC_SEQ_CLEANUP:
884 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
885 io_req->xid);
886 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
887 kref_put(&io_req->refcount, bnx2fc_cmd_release);
888 break;
889
883 default: 890 default:
884 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); 891 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
885 break; 892 break;
@@ -1369,6 +1376,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1369 tgt->conn_db->rq_prod = tgt->rq_prod_idx; 1376 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1370} 1377}
1371 1378
1379void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1380 struct fcoe_task_ctx_entry *task,
1381 struct bnx2fc_cmd *orig_io_req,
1382 u32 offset)
1383{
1384 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1385 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1386 struct bnx2fc_interface *interface = tgt->port->priv;
1387 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1388 struct fcoe_task_ctx_entry *orig_task;
1389 struct fcoe_task_ctx_entry *task_page;
1390 struct fcoe_ext_mul_sges_ctx *sgl;
1391 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1392 u8 orig_task_type;
1393 u16 orig_xid = orig_io_req->xid;
1394 u32 context_id = tgt->context_id;
1395 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1396 u32 orig_offset = offset;
1397 int bd_count;
1398 int orig_task_idx, index;
1399 int i;
1400
1401 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1402
1403 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1404 orig_task_type = FCOE_TASK_TYPE_WRITE;
1405 else
1406 orig_task_type = FCOE_TASK_TYPE_READ;
1407
1408 /* Tx flags */
1409 task->txwr_rxrd.const_ctx.tx_flags =
1410 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1411 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1412 /* init flags */
1413 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1414 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1415 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1416 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1417 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1418 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1419 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1420 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1421
1422 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1423
1424 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1425 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1426
1427 bd_count = orig_io_req->bd_tbl->bd_valid;
1428
1429 /* obtain the appropriate bd entry from relative offset */
1430 for (i = 0; i < bd_count; i++) {
1431 if (offset < bd[i].buf_len)
1432 break;
1433 offset -= bd[i].buf_len;
1434 }
1435 phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1436
1437 if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1438 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1439 (u32)phys_addr;
1440 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1441 (u32)((u64)phys_addr >> 32);
1442 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1443 bd_count;
1444 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1445 offset; /* adjusted offset */
1446 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1447 } else {
1448 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1449 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1450
1451 task_page = (struct fcoe_task_ctx_entry *)
1452 interface->hba->task_ctx[orig_task_idx];
1453 orig_task = &(task_page[index]);
1454
1455 /* Multiple SGEs were used for this IO */
1456 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1457 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1458 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1459 sgl->mul_sgl.sgl_size = bd_count;
1460 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1461 sgl->mul_sgl.cur_sge_idx = i;
1462
1463 memset(&task->rxwr_only.rx_seq_ctx, 0,
1464 sizeof(struct fcoe_rx_seq_ctx));
1465 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1466 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1467 }
1468}
1372void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 1469void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1373 struct fcoe_task_ctx_entry *task, 1470 struct fcoe_task_ctx_entry *task,
1374 u16 orig_xid) 1471 u16 orig_xid)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 72940b8625bd..9820d3060cd8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -930,6 +930,76 @@ abts_err:
930 return rc; 930 return rc;
931} 931}
932 932
933int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
934 enum fc_rctl r_ctl)
935{
936 struct fc_lport *lport;
937 struct bnx2fc_rport *tgt = orig_io_req->tgt;
938 struct bnx2fc_interface *interface;
939 struct fcoe_port *port;
940 struct bnx2fc_cmd *seq_clnp_req;
941 struct fcoe_task_ctx_entry *task;
942 struct fcoe_task_ctx_entry *task_page;
943 struct bnx2fc_els_cb_arg *cb_arg = NULL;
944 int task_idx, index;
945 u16 xid;
946 int rc = 0;
947
948 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
949 orig_io_req->xid);
950 kref_get(&orig_io_req->refcount);
951
952 port = orig_io_req->port;
953 interface = port->priv;
954 lport = port->lport;
955
956 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
957 if (!cb_arg) {
958 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
959 rc = -ENOMEM;
960 goto cleanup_err;
961 }
962
963 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
964 if (!seq_clnp_req) {
965 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
966 rc = -ENOMEM;
967 kfree(cb_arg);
968 goto cleanup_err;
969 }
970 /* Initialize rest of io_req fields */
971 seq_clnp_req->sc_cmd = NULL;
972 seq_clnp_req->port = port;
973 seq_clnp_req->tgt = tgt;
974 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
975
976 xid = seq_clnp_req->xid;
977
978 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
979 index = xid % BNX2FC_TASKS_PER_PAGE;
980
981 /* Initialize task context for this IO request */
982 task_page = (struct fcoe_task_ctx_entry *)
983 interface->hba->task_ctx[task_idx];
984 task = &(task_page[index]);
985 cb_arg->aborted_io_req = orig_io_req;
986 cb_arg->io_req = seq_clnp_req;
987 cb_arg->r_ctl = r_ctl;
988 cb_arg->offset = offset;
989 seq_clnp_req->cb_arg = cb_arg;
990
991 printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
992 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
993
994 /* Obtain free SQ entry */
995 bnx2fc_add_2_sq(tgt, xid);
996
997 /* Ring doorbell */
998 bnx2fc_ring_doorbell(tgt);
999cleanup_err:
1000 return rc;
1001}
1002
933int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 1003int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
934{ 1004{
935 struct fc_lport *lport; 1005 struct fc_lport *lport;
@@ -1156,6 +1226,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1156 return rc; 1226 return rc;
1157} 1227}
1158 1228
1229void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1230 struct fcoe_task_ctx_entry *task,
1231 u8 rx_state)
1232{
1233 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1234 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1235 u32 offset = cb_arg->offset;
1236 enum fc_rctl r_ctl = cb_arg->r_ctl;
1237 int rc = 0;
1238 struct bnx2fc_rport *tgt = orig_io_req->tgt;
1239
1240 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1241 "cmd_type = %d\n",
1242 seq_clnp_req->xid, seq_clnp_req->cmd_type);
1243
1244 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1245 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1246 seq_clnp_req->xid);
1247 goto free_cb_arg;
1248 }
1249 kref_get(&orig_io_req->refcount);
1250
1251 spin_unlock_bh(&tgt->tgt_lock);
1252 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1253 spin_lock_bh(&tgt->tgt_lock);
1254
1255 if (rc)
1256 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1257 " IO will abort\n");
1258 seq_clnp_req->cb_arg = NULL;
1259 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1260free_cb_arg:
1261 kfree(cb_arg);
1262 return;
1263}
1264
1159void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1265void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1160 struct fcoe_task_ctx_entry *task, 1266 struct fcoe_task_ctx_entry *task,
1161 u8 num_rq) 1267 u8 num_rq)