aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bnx2fc/bnx2fc_hwi.c
diff options
context:
space:
mode:
authorBhanu Prakash Gollapudi <bprakash@broadcom.com>2011-07-27 14:32:05 -0400
committerJames Bottomley <JBottomley@Parallels.com>2011-07-28 03:42:57 -0400
commit6c5a7ce4f176b641fd11e59be4df31ee3e6202dd (patch)
tree676e052ad6c2ac3868e934a9f22e2f5528d6a8f3 /drivers/scsi/bnx2fc/bnx2fc_hwi.c
parentd6857595394f1fa5c5752eae9bb6045c067fa41e (diff)
[SCSI] bnx2fc: Support 'sequence cleanup' task
For the devices that support sequence level error recovery, based on the REC response, the firmware has to be informed about the offset from which the retransmission should happen. Driver initiates sequence cleanup task to firmware so that the firmware can program the task. Upon the sequence cleanup completion, SRR is issued to retransmit the sequence. Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/bnx2fc/bnx2fc_hwi.c')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c97
1 files changed, 97 insertions, 0 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index fa263b5902e1..03ae003d3b85 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -880,6 +880,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
880 kref_put(&io_req->refcount, bnx2fc_cmd_release); 880 kref_put(&io_req->refcount, bnx2fc_cmd_release);
881 break; 881 break;
882 882
883 case BNX2FC_SEQ_CLEANUP:
884 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
885 io_req->xid);
886 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
887 kref_put(&io_req->refcount, bnx2fc_cmd_release);
888 break;
889
883 default: 890 default:
884 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); 891 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
885 break; 892 break;
@@ -1369,6 +1376,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1369 tgt->conn_db->rq_prod = tgt->rq_prod_idx; 1376 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1370} 1377}
1371 1378
1379void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1380 struct fcoe_task_ctx_entry *task,
1381 struct bnx2fc_cmd *orig_io_req,
1382 u32 offset)
1383{
1384 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1385 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1386 struct bnx2fc_interface *interface = tgt->port->priv;
1387 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1388 struct fcoe_task_ctx_entry *orig_task;
1389 struct fcoe_task_ctx_entry *task_page;
1390 struct fcoe_ext_mul_sges_ctx *sgl;
1391 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1392 u8 orig_task_type;
1393 u16 orig_xid = orig_io_req->xid;
1394 u32 context_id = tgt->context_id;
1395 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1396 u32 orig_offset = offset;
1397 int bd_count;
1398 int orig_task_idx, index;
1399 int i;
1400
1401 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1402
1403 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1404 orig_task_type = FCOE_TASK_TYPE_WRITE;
1405 else
1406 orig_task_type = FCOE_TASK_TYPE_READ;
1407
1408 /* Tx flags */
1409 task->txwr_rxrd.const_ctx.tx_flags =
1410 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1411 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1412 /* init flags */
1413 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1414 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1415 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1416 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1417 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1418 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1419 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1420 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1421
1422 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1423
1424 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1425 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1426
1427 bd_count = orig_io_req->bd_tbl->bd_valid;
1428
1429 /* obtain the appropriate bd entry from relative offset */
1430 for (i = 0; i < bd_count; i++) {
1431 if (offset < bd[i].buf_len)
1432 break;
1433 offset -= bd[i].buf_len;
1434 }
1435 phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1436
1437 if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1438 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1439 (u32)phys_addr;
1440 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1441 (u32)((u64)phys_addr >> 32);
1442 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1443 bd_count;
1444 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1445 offset; /* adjusted offset */
1446 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1447 } else {
1448 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1449 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1450
1451 task_page = (struct fcoe_task_ctx_entry *)
1452 interface->hba->task_ctx[orig_task_idx];
1453 orig_task = &(task_page[index]);
1454
1455 /* Multiple SGEs were used for this IO */
1456 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1457 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1458 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1459 sgl->mul_sgl.sgl_size = bd_count;
1460 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1461 sgl->mul_sgl.cur_sge_idx = i;
1462
1463 memset(&task->rxwr_only.rx_seq_ctx, 0,
1464 sizeof(struct fcoe_rx_seq_ctx));
1465 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1466 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1467 }
1468}
1372void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 1469void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1373 struct fcoe_task_ctx_entry *task, 1470 struct fcoe_task_ctx_entry *task,
1374 u16 orig_xid) 1471 u16 orig_xid)