aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bnx2fc
diff options
context:
space:
mode:
authorBhanu Prakash Gollapudi <bprakash@broadcom.com>2013-04-22 15:22:30 -0400
committerDavid S. Miller <davem@davemloft.net>2013-04-25 04:06:46 -0400
commit0eb43b4bb081a1a22574daab9c05286a600dd7fe (patch)
tree85933a915bbe9ebd7c3872feefc1d88972063835 /drivers/scsi/bnx2fc
parent4c09eed9dc422e980fabdb25434ef68e599b704c (diff)
bnx2x, bnx2fc: Use per port max exchange resources
The firmware supports a maximum of 4K FCoE exchanges. In 4-port devices, or when working in multi-function mode, this resource needs to be distributed between the various possible FCoE functions. This information needs to be calculated by bnx2x and propagated into bnx2fc via cnic. bnx2fc can then use this value to calculate corresponding xid resources instead of using global constants. Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com> Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: Yuval Mintz <yuvalmin@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/scsi/bnx2fc')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h23
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c35
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c22
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c9
4 files changed, 48 insertions, 41 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 50fcd018d14b..11596b2c4702 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -88,9 +88,6 @@
88 88
89#define BNX2FC_MAX_NPIV 256 89#define BNX2FC_MAX_NPIV 256
90 90
91#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
92#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
93#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
94#define BNX2FC_MIN_PAYLOAD 256 91#define BNX2FC_MIN_PAYLOAD 256
95#define BNX2FC_MAX_PAYLOAD 2048 92#define BNX2FC_MAX_PAYLOAD 2048
96#define BNX2FC_MFS \ 93#define BNX2FC_MFS \
@@ -108,11 +105,8 @@
108#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) 105#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
109#define BNX2FC_5771X_DB_PAGE_SIZE 128 106#define BNX2FC_5771X_DB_PAGE_SIZE 128
110 107
111#define BNX2FC_MAX_TASKS \
112 (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS)
113#define BNX2FC_TASK_SIZE 128 108#define BNX2FC_TASK_SIZE 128
114#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) 109#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
115#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
116 110
117#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8 111#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
118#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) 112#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
@@ -125,12 +119,9 @@
125#define BNX2FC_WRITE (1 << 0) 119#define BNX2FC_WRITE (1 << 0)
126 120
127#define BNX2FC_MIN_XID 0 121#define BNX2FC_MIN_XID 0
128#define BNX2FC_MAX_XID \
129 (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
130#define FCOE_MAX_NUM_XIDS 0x2000 122#define FCOE_MAX_NUM_XIDS 0x2000
131#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) 123#define FCOE_MAX_XID_OFFSET (FCOE_MAX_NUM_XIDS - 1)
132#define FCOE_MAX_XID (FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1) 124#define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1)
133#define FCOE_XIDS_PER_CPU (FCOE_MIN_XID + (512 * nr_cpu_ids) - 1)
134#define BNX2FC_MAX_LUN 0xFFFF 125#define BNX2FC_MAX_LUN 0xFFFF
135#define BNX2FC_MAX_FCP_TGT 256 126#define BNX2FC_MAX_FCP_TGT 256
136#define BNX2FC_MAX_CMD_LEN 16 127#define BNX2FC_MAX_CMD_LEN 16
@@ -206,6 +197,13 @@ struct bnx2fc_hba {
206 #define BNX2FC_FLAG_FW_INIT_DONE 0 197 #define BNX2FC_FLAG_FW_INIT_DONE 0
207 #define BNX2FC_FLAG_DESTROY_CMPL 1 198 #define BNX2FC_FLAG_DESTROY_CMPL 1
208 u32 next_conn_id; 199 u32 next_conn_id;
200
201 /* xid resources */
202 u16 max_xid;
203 u32 max_tasks;
204 u32 max_outstanding_cmds;
205 u32 elstm_xids;
206
209 struct fcoe_task_ctx_entry **task_ctx; 207 struct fcoe_task_ctx_entry **task_ctx;
210 dma_addr_t *task_ctx_dma; 208 dma_addr_t *task_ctx_dma;
211 struct regpair *task_ctx_bd_tbl; 209 struct regpair *task_ctx_bd_tbl;
@@ -504,8 +502,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
504void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); 502void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
505int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); 503int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
506void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); 504void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
507struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 505struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba);
508 u16 min_xid, u16 max_xid);
509void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr); 506void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
510void bnx2fc_get_link_state(struct bnx2fc_hba *hba); 507void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
511char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items); 508char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 90bc7bd00966..7dffec1e5715 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -71,7 +71,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb);
71static void bnx2fc_start_disc(struct bnx2fc_interface *interface); 71static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
72static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); 72static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
73static int bnx2fc_lport_config(struct fc_lport *lport); 73static int bnx2fc_lport_config(struct fc_lport *lport);
74static int bnx2fc_em_config(struct fc_lport *lport); 74static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba);
75static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); 75static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
76static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); 76static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
77static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); 77static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
@@ -944,16 +944,17 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
944 return 0; 944 return 0;
945} 945}
946 946
947static int bnx2fc_em_config(struct fc_lport *lport) 947static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba)
948{ 948{
949 int max_xid; 949 int fcoe_min_xid, fcoe_max_xid;
950 950
951 fcoe_min_xid = hba->max_xid + 1;
951 if (nr_cpu_ids <= 2) 952 if (nr_cpu_ids <= 2)
952 max_xid = FCOE_XIDS_PER_CPU; 953 fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET;
953 else 954 else
954 max_xid = FCOE_MAX_XID; 955 fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET;
955 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, 956 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid,
956 max_xid, NULL)) { 957 fcoe_max_xid, NULL)) {
957 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); 958 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
958 return -ENOMEM; 959 return -ENOMEM;
959 } 960 }
@@ -1300,6 +1301,12 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1300 mutex_init(&hba->hba_mutex); 1301 mutex_init(&hba->hba_mutex);
1301 1302
1302 hba->cnic = cnic; 1303 hba->cnic = cnic;
1304
1305 hba->max_tasks = cnic->max_fcoe_exchanges;
1306 hba->elstm_xids = (hba->max_tasks / 2);
1307 hba->max_outstanding_cmds = hba->elstm_xids;
1308 hba->max_xid = (hba->max_tasks - 1);
1309
1303 rc = bnx2fc_bind_pcidev(hba); 1310 rc = bnx2fc_bind_pcidev(hba);
1304 if (rc) { 1311 if (rc) {
1305 printk(KERN_ERR PFX "create_adapter: bind error\n"); 1312 printk(KERN_ERR PFX "create_adapter: bind error\n");
@@ -1318,8 +1325,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1318 1325
1319 hba->num_ofld_sess = 0; 1326 hba->num_ofld_sess = 0;
1320 1327
1321 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID, 1328 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba);
1322 BNX2FC_MAX_XID);
1323 if (!hba->cmd_mgr) { 1329 if (!hba->cmd_mgr) {
1324 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); 1330 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1325 goto cmgr_err; 1331 goto cmgr_err;
@@ -1330,13 +1336,13 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1330 FCOE_IOS_PER_CONNECTION_SHIFT; 1336 FCOE_IOS_PER_CONNECTION_SHIFT;
1331 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << 1337 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
1332 FCOE_LOGINS_PER_PORT_SHIFT; 1338 FCOE_LOGINS_PER_PORT_SHIFT;
1333 fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS << 1339 fcoe_cap->capability2 = hba->max_outstanding_cmds <<
1334 FCOE_NUMBER_OF_EXCHANGES_SHIFT; 1340 FCOE_NUMBER_OF_EXCHANGES_SHIFT;
1335 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << 1341 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
1336 FCOE_NPIV_WWN_PER_PORT_SHIFT; 1342 FCOE_NPIV_WWN_PER_PORT_SHIFT;
1337 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << 1343 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
1338 FCOE_TARGETS_SUPPORTED_SHIFT; 1344 FCOE_TARGETS_SUPPORTED_SHIFT;
1339 fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS << 1345 fcoe_cap->capability3 |= hba->max_outstanding_cmds <<
1340 FCOE_OUTSTANDING_COMMANDS_SHIFT; 1346 FCOE_OUTSTANDING_COMMANDS_SHIFT;
1341 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; 1347 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
1342 1348
@@ -1416,7 +1422,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1416 struct Scsi_Host *shost; 1422 struct Scsi_Host *shost;
1417 struct fc_vport *vport = dev_to_vport(parent); 1423 struct fc_vport *vport = dev_to_vport(parent);
1418 struct bnx2fc_lport *blport; 1424 struct bnx2fc_lport *blport;
1419 struct bnx2fc_hba *hba; 1425 struct bnx2fc_hba *hba = interface->hba;
1420 int rc = 0; 1426 int rc = 0;
1421 1427
1422 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1428 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
@@ -1426,6 +1432,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1426 } 1432 }
1427 1433
1428 /* Allocate Scsi_Host structure */ 1434 /* Allocate Scsi_Host structure */
1435 bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds;
1429 if (!npiv) 1436 if (!npiv)
1430 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); 1437 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
1431 else 1438 else
@@ -1477,7 +1484,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1477 1484
1478 /* Allocate exchange manager */ 1485 /* Allocate exchange manager */
1479 if (!npiv) 1486 if (!npiv)
1480 rc = bnx2fc_em_config(lport); 1487 rc = bnx2fc_em_config(lport, hba);
1481 else { 1488 else {
1482 shost = vport_to_shost(vport); 1489 shost = vport_to_shost(vport);
1483 n_port = shost_priv(shost); 1490 n_port = shost_priv(shost);
@@ -1491,7 +1498,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1491 1498
1492 bnx2fc_interface_get(interface); 1499 bnx2fc_interface_get(interface);
1493 1500
1494 hba = interface->hba;
1495 spin_lock_bh(&hba->hba_lock); 1501 spin_lock_bh(&hba->hba_lock);
1496 blport->lport = lport; 1502 blport->lport = lport;
1497 list_add_tail(&blport->list, &hba->vports); 1503 list_add_tail(&blport->list, &hba->vports);
@@ -2706,7 +2712,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
2706 .change_queue_type = fc_change_queue_type, 2712 .change_queue_type = fc_change_queue_type,
2707 .this_id = -1, 2713 .this_id = -1,
2708 .cmd_per_lun = 3, 2714 .cmd_per_lun = 3,
2709 .can_queue = BNX2FC_CAN_QUEUE,
2710 .use_clustering = ENABLE_CLUSTERING, 2715 .use_clustering = ENABLE_CLUSTERING,
2711 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2716 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
2712 .max_sectors = 1024, 2717 .max_sectors = 1024,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 85ea98a80f40..50510ffe1bf5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -77,7 +77,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << 77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79 79
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; 80 fcoe_init1.num_tasks = hba->max_tasks;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; 81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; 82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; 83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
@@ -697,7 +697,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
697 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 697 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
698 698
699 699
700 if (xid > BNX2FC_MAX_XID) { 700 if (xid > hba->max_xid) {
701 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", 701 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
702 xid); 702 xid);
703 goto ret_err_rqe; 703 goto ret_err_rqe;
@@ -815,7 +815,7 @@ ret_err_rqe:
815 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 815 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
816 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 816 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
817 817
818 if (xid > BNX2FC_MAX_XID) { 818 if (xid > hba->max_xid) {
819 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); 819 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
820 goto ret_warn_rqe; 820 goto ret_warn_rqe;
821 } 821 }
@@ -880,7 +880,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
880 880
881 spin_lock_bh(&tgt->tgt_lock); 881 spin_lock_bh(&tgt->tgt_lock);
882 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 882 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
883 if (xid >= BNX2FC_MAX_TASKS) { 883 if (xid >= hba->max_tasks) {
884 printk(KERN_ERR PFX "ERROR:xid out of range\n"); 884 printk(KERN_ERR PFX "ERROR:xid out of range\n");
885 spin_unlock_bh(&tgt->tgt_lock); 885 spin_unlock_bh(&tgt->tgt_lock);
886 return; 886 return;
@@ -1842,6 +1842,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1842 int rc = 0; 1842 int rc = 0;
1843 struct regpair *task_ctx_bdt; 1843 struct regpair *task_ctx_bdt;
1844 dma_addr_t addr; 1844 dma_addr_t addr;
1845 int task_ctx_arr_sz;
1845 int i; 1846 int i;
1846 1847
1847 /* 1848 /*
@@ -1865,7 +1866,8 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1865 * Allocate task_ctx which is an array of pointers pointing to 1866 * Allocate task_ctx which is an array of pointers pointing to
1866 * a page containing 32 task contexts 1867 * a page containing 32 task contexts
1867 */ 1868 */
1868 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), 1869 task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1870 hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1869 GFP_KERNEL); 1871 GFP_KERNEL);
1870 if (!hba->task_ctx) { 1872 if (!hba->task_ctx) {
1871 printk(KERN_ERR PFX "unable to allocate task context array\n"); 1873 printk(KERN_ERR PFX "unable to allocate task context array\n");
@@ -1876,7 +1878,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1876 /* 1878 /*
1877 * Allocate task_ctx_dma which is an array of dma addresses 1879 * Allocate task_ctx_dma which is an array of dma addresses
1878 */ 1880 */
1879 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * 1881 hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1880 sizeof(dma_addr_t)), GFP_KERNEL); 1882 sizeof(dma_addr_t)), GFP_KERNEL);
1881 if (!hba->task_ctx_dma) { 1883 if (!hba->task_ctx_dma) {
1882 printk(KERN_ERR PFX "unable to alloc context mapping array\n"); 1884 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
@@ -1885,7 +1887,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1885 } 1887 }
1886 1888
1887 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1889 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1888 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1890 for (i = 0; i < task_ctx_arr_sz; i++) {
1889 1891
1890 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, 1892 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1891 PAGE_SIZE, 1893 PAGE_SIZE,
@@ -1905,7 +1907,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1905 return 0; 1907 return 0;
1906 1908
1907out3: 1909out3:
1908 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1910 for (i = 0; i < task_ctx_arr_sz; i++) {
1909 if (hba->task_ctx[i]) { 1911 if (hba->task_ctx[i]) {
1910 1912
1911 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1913 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
@@ -1929,6 +1931,7 @@ out:
1929 1931
1930void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) 1932void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1931{ 1933{
1934 int task_ctx_arr_sz;
1932 int i; 1935 int i;
1933 1936
1934 if (hba->task_ctx_bd_tbl) { 1937 if (hba->task_ctx_bd_tbl) {
@@ -1938,8 +1941,9 @@ void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1938 hba->task_ctx_bd_tbl = NULL; 1941 hba->task_ctx_bd_tbl = NULL;
1939 } 1942 }
1940 1943
1944 task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1941 if (hba->task_ctx) { 1945 if (hba->task_ctx) {
1942 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1946 for (i = 0; i < task_ctx_arr_sz; i++) {
1943 if (hba->task_ctx[i]) { 1947 if (hba->task_ctx[i]) {
1944 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1948 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1945 hba->task_ctx[i], 1949 hba->task_ctx[i],
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 60798e829de6..723a9a8ba5ee 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -239,8 +239,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
239 sc_cmd->scsi_done(sc_cmd); 239 sc_cmd->scsi_done(sc_cmd);
240} 240}
241 241
242struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 242struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
243 u16 min_xid, u16 max_xid)
244{ 243{
245 struct bnx2fc_cmd_mgr *cmgr; 244 struct bnx2fc_cmd_mgr *cmgr;
246 struct io_bdt *bdt_info; 245 struct io_bdt *bdt_info;
@@ -252,6 +251,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
252 int num_ios, num_pri_ios; 251 int num_ios, num_pri_ios;
253 size_t bd_tbl_sz; 252 size_t bd_tbl_sz;
254 int arr_sz = num_possible_cpus() + 1; 253 int arr_sz = num_possible_cpus() + 1;
254 u16 min_xid = BNX2FC_MIN_XID;
255 u16 max_xid = hba->max_xid;
255 256
256 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 257 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
257 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 258 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
@@ -298,7 +299,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
298 * of slow path requests. 299 * of slow path requests.
299 */ 300 */
300 xid = BNX2FC_MIN_XID; 301 xid = BNX2FC_MIN_XID;
301 num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; 302 num_pri_ios = num_ios - hba->elstm_xids;
302 for (i = 0; i < num_ios; i++) { 303 for (i = 0; i < num_ios; i++) {
303 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 304 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
304 305
@@ -367,7 +368,7 @@ void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
367 struct bnx2fc_hba *hba = cmgr->hba; 368 struct bnx2fc_hba *hba = cmgr->hba;
368 size_t bd_tbl_sz; 369 size_t bd_tbl_sz;
369 u16 min_xid = BNX2FC_MIN_XID; 370 u16 min_xid = BNX2FC_MIN_XID;
370 u16 max_xid = BNX2FC_MAX_XID; 371 u16 max_xid = hba->max_xid;
371 int num_ios; 372 int num_ios;
372 int i; 373 int i;
373 374