aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/cxlflash/common.h2
-rw-r--r--drivers/scsi/cxlflash/main.c62
-rw-r--r--drivers/scsi/cxlflash/sislite.h6
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2
-rw-r--r--drivers/scsi/cxlflash/vlun.c14
5 files changed, 38 insertions, 48 deletions
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index a81058555d97..bbfe711826c3 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -105,8 +105,6 @@ struct cxlflash_cfg {
105 atomic_t scan_host_needed; 105 atomic_t scan_host_needed;
106 106
107 struct cxl_afu *cxl_afu; 107 struct cxl_afu *cxl_afu;
108
109 struct pci_pool *cxlflash_cmd_pool;
110 struct pci_dev *parent_dev; 108 struct pci_dev *parent_dev;
111 109
112 atomic_t recovery_threads; 110 atomic_t recovery_threads;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 14fb9b4c970d..eeb1c4798207 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -34,7 +34,6 @@ MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); 34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36 36
37
38/** 37/**
39 * cmd_checkout() - checks out an AFU command 38 * cmd_checkout() - checks out an AFU command
40 * @afu: AFU to checkout from. 39 * @afu: AFU to checkout from.
@@ -730,7 +729,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
730 case INIT_STATE_SCSI: 729 case INIT_STATE_SCSI:
731 cxlflash_term_local_luns(cfg); 730 cxlflash_term_local_luns(cfg);
732 scsi_remove_host(cfg->host); 731 scsi_remove_host(cfg->host);
733 /* Fall through */ 732 /* fall through */
734 case INIT_STATE_AFU: 733 case INIT_STATE_AFU:
735 term_afu(cfg); 734 term_afu(cfg);
736 cancel_work_sync(&cfg->work_q); 735 cancel_work_sync(&cfg->work_q);
@@ -763,9 +762,7 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
763 char *buf = NULL; 762 char *buf = NULL;
764 struct device *dev = &cfg->dev->dev; 763 struct device *dev = &cfg->dev->dev;
765 764
766 /* This allocation is about 12K, i.e. only 1 64k page 765 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
767 * and upto 4 4k pages
768 */
769 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 766 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
770 get_order(sizeof(struct afu))); 767 get_order(sizeof(struct afu)));
771 if (unlikely(!cfg->afu)) { 768 if (unlikely(!cfg->afu)) {
@@ -1295,10 +1292,10 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1295 goto out; 1292 goto out;
1296 } 1293 }
1297 1294
1298 /* it is OK to clear AFU status before FC_ERROR */ 1295 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1299 writeq_be(reg_unmasked, &global->regs.aintr_clear); 1296 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1300 1297
1301 /* check each bit that is on */ 1298 /* Check each bit that is on */
1302 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { 1299 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1303 info = find_ainfo(1ULL << i); 1300 info = find_ainfo(1ULL << i);
1304 if (((reg_unmasked & 0x1) == 0) || !info) 1301 if (((reg_unmasked & 0x1) == 0) || !info)
@@ -1311,7 +1308,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1311 readq_be(&global->fc_regs[port][FC_STATUS / 8])); 1308 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1312 1309
1313 /* 1310 /*
1314 * do link reset first, some OTHER errors will set FC_ERROR 1311 * Do link reset first, some OTHER errors will set FC_ERROR
1315 * again if cleared before or w/o a reset 1312 * again if cleared before or w/o a reset
1316 */ 1313 */
1317 if (info->action & LINK_RESET) { 1314 if (info->action & LINK_RESET) {
@@ -1326,7 +1323,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1326 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); 1323 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1327 1324
1328 /* 1325 /*
1329 * since all errors are unmasked, FC_ERROR and FC_ERRCAP 1326 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1330 * should be the same and tracing one is sufficient. 1327 * should be the same and tracing one is sufficient.
1331 */ 1328 */
1332 1329
@@ -1472,23 +1469,22 @@ static void init_pcr(struct cxlflash_cfg *cfg)
1472 1469
1473 for (i = 0; i < MAX_CONTEXT; i++) { 1470 for (i = 0; i < MAX_CONTEXT; i++) {
1474 ctrl_map = &afu->afu_map->ctrls[i].ctrl; 1471 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1475 /* disrupt any clients that could be running */ 1472 /* Disrupt any clients that could be running */
1476 /* e. g. clients that survived a master restart */ 1473 /* e.g. clients that survived a master restart */
1477 writeq_be(0, &ctrl_map->rht_start); 1474 writeq_be(0, &ctrl_map->rht_start);
1478 writeq_be(0, &ctrl_map->rht_cnt_id); 1475 writeq_be(0, &ctrl_map->rht_cnt_id);
1479 writeq_be(0, &ctrl_map->ctx_cap); 1476 writeq_be(0, &ctrl_map->ctx_cap);
1480 } 1477 }
1481 1478
1482 /* copy frequently used fields into afu */ 1479 /* Copy frequently used fields into afu */
1483 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); 1480 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1484 /* ctx_hndl is 16 bits in CAIA */
1485 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; 1481 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1486 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; 1482 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1487 1483
1488 /* Program the Endian Control for the master context */ 1484 /* Program the Endian Control for the master context */
1489 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); 1485 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1490 1486
1491 /* initialize cmd fields that never change */ 1487 /* Initialize cmd fields that never change */
1492 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 1488 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1493 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl; 1489 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1494 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; 1490 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
@@ -1517,7 +1513,7 @@ static int init_global(struct cxlflash_cfg *cfg)
1517 1513
1518 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); 1514 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1519 1515
1520 /* set up RRQ in AFU for master issued cmds */ 1516 /* Set up RRQ in AFU for master issued cmds */
1521 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); 1517 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1522 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); 1518 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1523 1519
@@ -1530,9 +1526,9 @@ static int init_global(struct cxlflash_cfg *cfg)
1530 /* checker on if dual afu */ 1526 /* checker on if dual afu */
1531 writeq_be(reg, &afu->afu_map->global.regs.afu_config); 1527 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1532 1528
1533 /* global port select: select either port */ 1529 /* Global port select: select either port */
1534 if (afu->internal_lun) { 1530 if (afu->internal_lun) {
1535 /* only use port 0 */ 1531 /* Only use port 0 */
1536 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); 1532 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1537 num_ports = NUM_FC_PORTS - 1; 1533 num_ports = NUM_FC_PORTS - 1;
1538 } else { 1534 } else {
@@ -1541,15 +1537,15 @@ static int init_global(struct cxlflash_cfg *cfg)
1541 } 1537 }
1542 1538
1543 for (i = 0; i < num_ports; i++) { 1539 for (i = 0; i < num_ports; i++) {
1544 /* unmask all errors (but they are still masked at AFU) */ 1540 /* Unmask all errors (but they are still masked at AFU) */
1545 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); 1541 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1546 /* clear CRC error cnt & set a threshold */ 1542 /* Clear CRC error cnt & set a threshold */
1547 (void)readq_be(&afu->afu_map->global. 1543 (void)readq_be(&afu->afu_map->global.
1548 fc_regs[i][FC_CNT_CRCERR / 8]); 1544 fc_regs[i][FC_CNT_CRCERR / 8]);
1549 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] 1545 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1550 [FC_CRC_THRESH / 8]); 1546 [FC_CRC_THRESH / 8]);
1551 1547
1552 /* set WWPNs. If already programmed, wwpn[i] is 0 */ 1548 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1553 if (wwpn[i] != 0 && 1549 if (wwpn[i] != 0 &&
1554 afu_set_wwpn(afu, i, 1550 afu_set_wwpn(afu, i,
1555 &afu->afu_map->global.fc_regs[i][0], 1551 &afu->afu_map->global.fc_regs[i][0],
@@ -1563,18 +1559,17 @@ static int init_global(struct cxlflash_cfg *cfg)
1563 * offline/online transitions and a PLOGI 1559 * offline/online transitions and a PLOGI
1564 */ 1560 */
1565 msleep(100); 1561 msleep(100);
1566
1567 } 1562 }
1568 1563
1569 /* set up master's own CTX_CAP to allow real mode, host translation */ 1564 /* Set up master's own CTX_CAP to allow real mode, host translation */
1570 /* tbls, afu cmds and read/write GSCSI cmds. */ 1565 /* tables, afu cmds and read/write GSCSI cmds. */
1571 /* First, unlock ctx_cap write by reading mbox */ 1566 /* First, unlock ctx_cap write by reading mbox */
1572 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ 1567 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1573 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | 1568 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1574 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | 1569 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1575 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), 1570 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1576 &afu->ctrl_map->ctx_cap); 1571 &afu->ctrl_map->ctx_cap);
1577 /* init heartbeat */ 1572 /* Initialize heartbeat */
1578 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); 1573 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1579 1574
1580out: 1575out:
@@ -1603,7 +1598,7 @@ static int start_afu(struct cxlflash_cfg *cfg)
1603 1598
1604 init_pcr(cfg); 1599 init_pcr(cfg);
1605 1600
1606 /* initialize RRQ pointers */ 1601 /* Initialize RRQ pointers */
1607 afu->hrrq_start = &afu->rrq_entry[0]; 1602 afu->hrrq_start = &afu->rrq_entry[0];
1608 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; 1603 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1609 afu->hrrq_curr = afu->hrrq_start; 1604 afu->hrrq_curr = afu->hrrq_start;
@@ -1726,8 +1721,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
1726 goto err1; 1721 goto err1;
1727 } 1722 }
1728 1723
1729 /* Map the entire MMIO space of the AFU. 1724 /* Map the entire MMIO space of the AFU */
1730 */
1731 afu->afu_map = cxl_psa_map(cfg->mcctx); 1725 afu->afu_map = cxl_psa_map(cfg->mcctx);
1732 if (!afu->afu_map) { 1726 if (!afu->afu_map) {
1733 rc = -ENOMEM; 1727 rc = -ENOMEM;
@@ -1779,7 +1773,7 @@ err1:
1779 * @mode: Type of sync to issue (lightweight, heavyweight, global). 1773 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1780 * 1774 *
1781 * The AFU can only take 1 sync command at a time. This routine enforces this 1775 * The AFU can only take 1 sync command at a time. This routine enforces this
1782 * limitation by using a mutex to provide exlusive access to the AFU during 1776 * limitation by using a mutex to provide exclusive access to the AFU during
1783 * the sync. This design point requires calling threads to not be on interrupt 1777 * the sync. This design point requires calling threads to not be on interrupt
1784 * context due to the possibility of sleeping during concurrent sync operations. 1778 * context due to the possibility of sleeping during concurrent sync operations.
1785 * 1779 *
@@ -1845,7 +1839,7 @@ retry:
1845 1839
1846 wait_resp(afu, cmd); 1840 wait_resp(afu, cmd);
1847 1841
1848 /* set on timeout */ 1842 /* Set on timeout */
1849 if (unlikely((cmd->sa.ioasc != 0) || 1843 if (unlikely((cmd->sa.ioasc != 0) ||
1850 (cmd->sa.host_use_b[0] & B_ERROR))) 1844 (cmd->sa.host_use_b[0] & B_ERROR)))
1851 rc = -1; 1845 rc = -1;
@@ -2262,7 +2256,7 @@ static struct scsi_host_template driver_template = {
2262 .cmd_per_lun = 16, 2256 .cmd_per_lun = 16,
2263 .can_queue = CXLFLASH_MAX_CMDS, 2257 .can_queue = CXLFLASH_MAX_CMDS,
2264 .this_id = -1, 2258 .this_id = -1,
2265 .sg_tablesize = SG_NONE, /* No scatter gather support. */ 2259 .sg_tablesize = SG_NONE, /* No scatter gather support */
2266 .max_sectors = CXLFLASH_MAX_SECTORS, 2260 .max_sectors = CXLFLASH_MAX_SECTORS,
2267 .use_clustering = ENABLE_CLUSTERING, 2261 .use_clustering = ENABLE_CLUSTERING,
2268 .shost_attrs = cxlflash_host_attrs, 2262 .shost_attrs = cxlflash_host_attrs,
@@ -2322,8 +2316,7 @@ static void cxlflash_worker_thread(struct work_struct *work)
2322 2316
2323 /* The reset can block... */ 2317 /* The reset can block... */
2324 afu_link_reset(afu, port, 2318 afu_link_reset(afu, port,
2325 &afu->afu_map-> 2319 &afu->afu_map->global.fc_regs[port][0]);
2326 global.fc_regs[port][0]);
2327 spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2320 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2328 } 2321 }
2329 2322
@@ -2402,7 +2395,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
2402 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1; 2395 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2403 2396
2404 cfg->dev_id = (struct pci_device_id *)dev_id; 2397 cfg->dev_id = (struct pci_device_id *)dev_id;
2405 cfg->mcctx = NULL;
2406 2398
2407 init_waitqueue_head(&cfg->tmf_waitq); 2399 init_waitqueue_head(&cfg->tmf_waitq);
2408 init_waitqueue_head(&cfg->reset_waitq); 2400 init_waitqueue_head(&cfg->reset_waitq);
@@ -2418,7 +2410,8 @@ static int cxlflash_probe(struct pci_dev *pdev,
2418 2410
2419 pci_set_drvdata(pdev, cfg); 2411 pci_set_drvdata(pdev, cfg);
2420 2412
2421 /* Use the special service provided to look up the physical 2413 /*
2414 * Use the special service provided to look up the physical
2422 * PCI device, since we are called on the probe of the virtual 2415 * PCI device, since we are called on the probe of the virtual
2423 * PCI host bus (vphb) 2416 * PCI host bus (vphb)
2424 */ 2417 */
@@ -2448,7 +2441,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
2448 } 2441 }
2449 cfg->init_state = INIT_STATE_AFU; 2442 cfg->init_state = INIT_STATE_AFU;
2450 2443
2451
2452 rc = init_scsi(cfg); 2444 rc = init_scsi(cfg);
2453 if (rc) { 2445 if (rc) {
2454 dev_err(&pdev->dev, "%s: call to init_scsi " 2446 dev_err(&pdev->dev, "%s: call to init_scsi "
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 8425d1ab65ef..0b3366f5e6f6 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -146,7 +146,7 @@ struct sisl_rc {
146#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */ 146#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
147#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */ 147#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
148#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI 148#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
149 reported len, possbly due to dropped 149 reported len, possibly due to dropped
150 frames */ 150 frames */
151#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */ 151#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
152}; 152};
@@ -258,7 +258,7 @@ struct sisl_host_map {
258 __be64 rrq_start; /* start & end are both inclusive */ 258 __be64 rrq_start; /* start & end are both inclusive */
259 __be64 rrq_end; /* write sequence: start followed by end */ 259 __be64 rrq_end; /* write sequence: start followed by end */
260 __be64 cmd_room; 260 __be64 cmd_room;
261 __be64 ctx_ctrl; /* least signiifcant byte or b56:63 is LISN# */ 261 __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
262 __be64 mbox_w; /* restricted use */ 262 __be64 mbox_w; /* restricted use */
263}; 263};
264 264
@@ -290,7 +290,7 @@ struct sisl_global_regs {
290#define SISL_ASTATUS_FC0_LOGO 0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO 290#define SISL_ASTATUS_FC0_LOGO 0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
291 while logged in */ 291 while logged in */
292#define SISL_ASTATUS_FC0_CRC_T 0x2000ULL /* b50, CRC threshold exceeded */ 292#define SISL_ASTATUS_FC0_CRC_T 0x2000ULL /* b50, CRC threshold exceeded */
293#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state mechine timed out 293#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state machine timed out
294 and retrying */ 294 and retrying */
295#define SISL_ASTATUS_FC0_LOGI_F 0x0800ULL /* b52, login failed, 295#define SISL_ASTATUS_FC0_LOGI_F 0x0800ULL /* b52, login failed,
296 FC_ERROR[19:0] */ 296 FC_ERROR[19:0] */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index d2309afe8c49..b5eeeff0fd0c 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -76,7 +76,7 @@ void cxlflash_free_errpage(void)
76 * 76 *
77 * When the host needs to go down, all users must be quiesced and their 77 * When the host needs to go down, all users must be quiesced and their
78 * memory freed. This is accomplished by putting the contexts in error 78 * memory freed. This is accomplished by putting the contexts in error
79 * state which will notify the user and let them 'drive' the tear-down. 79 * state which will notify the user and let them 'drive' the tear down.
80 * Meanwhile, this routine camps until all user contexts have been removed. 80 * Meanwhile, this routine camps until all user contexts have been removed.
81 */ 81 */
82void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) 82void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index f91b5b3558aa..b0eaf557cc2f 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -132,7 +132,7 @@ static int ba_init(struct ba_lun *ba_lun)
132 return -ENOMEM; 132 return -ENOMEM;
133 } 133 }
134 134
135 /* Pass the allocated lun info as a handle to the user */ 135 /* Pass the allocated LUN info as a handle to the user */
136 ba_lun->ba_lun_handle = bali; 136 ba_lun->ba_lun_handle = bali;
137 137
138 pr_debug("%s: Successfully initialized the LUN: " 138 pr_debug("%s: Successfully initialized the LUN: "
@@ -165,7 +165,7 @@ static int find_free_range(u32 low,
165 num_bits = (sizeof(*lam) * BITS_PER_BYTE); 165 num_bits = (sizeof(*lam) * BITS_PER_BYTE);
166 bit_pos = find_first_bit(lam, num_bits); 166 bit_pos = find_first_bit(lam, num_bits);
167 167
168 pr_devel("%s: Found free bit %llX in lun " 168 pr_devel("%s: Found free bit %llX in LUN "
169 "map entry %llX at bitmap index = %X\n", 169 "map entry %llX at bitmap index = %X\n",
170 __func__, bit_pos, bali->lun_alloc_map[i], 170 __func__, bit_pos, bali->lun_alloc_map[i],
171 i); 171 i);
@@ -682,14 +682,14 @@ out:
682} 682}
683 683
684/** 684/**
685 * _cxlflash_vlun_resize() - changes the size of a virtual lun 685 * _cxlflash_vlun_resize() - changes the size of a virtual LUN
686 * @sdev: SCSI device associated with LUN owning virtual LUN. 686 * @sdev: SCSI device associated with LUN owning virtual LUN.
687 * @ctxi: Context owning resources. 687 * @ctxi: Context owning resources.
688 * @resize: Resize ioctl data structure. 688 * @resize: Resize ioctl data structure.
689 * 689 *
690 * On successful return, the user is informed of the new size (in blocks) 690 * On successful return, the user is informed of the new size (in blocks)
691 * of the virtual lun in last LBA format. When the size of the virtual 691 * of the virtual LUN in last LBA format. When the size of the virtual
692 * lun is zero, the last LBA is reflected as -1. See comment in the 692 * LUN is zero, the last LBA is reflected as -1. See comment in the
693 * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts 693 * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
694 * on the error recovery list. 694 * on the error recovery list.
695 * 695 *
@@ -886,8 +886,8 @@ out:
886 * @arg: UVirtual ioctl data structure. 886 * @arg: UVirtual ioctl data structure.
887 * 887 *
888 * On successful return, the user is informed of the resource handle 888 * On successful return, the user is informed of the resource handle
889 * to be used to identify the virtual lun and the size (in blocks) of 889 * to be used to identify the virtual LUN and the size (in blocks) of
890 * the virtual lun in last LBA format. When the size of the virtual lun 890 * the virtual LUN in last LBA format. When the size of the virtual LUN
891 * is zero, the last LBA is reflected as -1. 891 * is zero, the last LBA is reflected as -1.
892 * 892 *
893 * Return: 0 on success, -errno on failure 893 * Return: 0 on success, -errno on failure