aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-05-14 07:13:44 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-05-31 18:38:36 -0400
commit32fbac2256cedee62de5602b1703c15c150a6e14 (patch)
tree511e33ad4061f2f80ed1bf084cc22e16545e7923 /drivers/scsi
parent155d98f072bbb4ffb5cefc7cecbba06df37699ab (diff)
[SCSI] megaraid: convert to use the data buffer accessors
- remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Sumant Patro <sumant.patro@lsi.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c165
1 files changed, 44 insertions, 121 deletions
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 04d0b6918c61..7d5a4d651b03 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1378,8 +1378,6 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1378{ 1378{
1379 struct scatterlist *sgl; 1379 struct scatterlist *sgl;
1380 mbox_ccb_t *ccb; 1380 mbox_ccb_t *ccb;
1381 struct page *page;
1382 unsigned long offset;
1383 struct scsi_cmnd *scp; 1381 struct scsi_cmnd *scp;
1384 int sgcnt; 1382 int sgcnt;
1385 int i; 1383 int i;
@@ -1388,48 +1386,16 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1388 scp = scb->scp; 1386 scp = scb->scp;
1389 ccb = (mbox_ccb_t *)scb->ccb; 1387 ccb = (mbox_ccb_t *)scb->ccb;
1390 1388
1389 sgcnt = scsi_dma_map(scp);
1390 BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
1391
1391 // no mapping required if no data to be transferred 1392 // no mapping required if no data to be transferred
1392 if (!scp->request_buffer || !scp->request_bufflen) 1393 if (!sgcnt)
1393 return 0; 1394 return 0;
1394 1395
1395 if (!scp->use_sg) { /* scatter-gather list not used */
1396
1397 page = virt_to_page(scp->request_buffer);
1398
1399 offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);
1400
1401 ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,
1402 scp->request_bufflen,
1403 scb->dma_direction);
1404 scb->dma_type = MRAID_DMA_WBUF;
1405
1406 /*
1407 * We need to handle special 64-bit commands that need a
1408 * minimum of 1 SG
1409 */
1410 sgcnt = 1;
1411 ccb->sgl64[0].address = ccb->buf_dma_h;
1412 ccb->sgl64[0].length = scp->request_bufflen;
1413
1414 return sgcnt;
1415 }
1416
1417 sgl = (struct scatterlist *)scp->request_buffer;
1418
1419 // The number of sg elements returned must not exceed our limit
1420 sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,
1421 scb->dma_direction);
1422
1423 if (sgcnt > adapter->sglen) {
1424 con_log(CL_ANN, (KERN_CRIT
1425 "megaraid critical: too many sg elements:%d\n",
1426 sgcnt));
1427 BUG();
1428 }
1429
1430 scb->dma_type = MRAID_DMA_WSG; 1396 scb->dma_type = MRAID_DMA_WSG;
1431 1397
1432 for (i = 0; i < sgcnt; i++, sgl++) { 1398 scsi_for_each_sg(scp, sgl, sgcnt, i) {
1433 ccb->sgl64[i].address = sg_dma_address(sgl); 1399 ccb->sgl64[i].address = sg_dma_address(sgl);
1434 ccb->sgl64[i].length = sg_dma_len(sgl); 1400 ccb->sgl64[i].length = sg_dma_len(sgl);
1435 } 1401 }
@@ -1489,19 +1455,11 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1489 1455
1490 adapter->outstanding_cmds++; 1456 adapter->outstanding_cmds++;
1491 1457
1492 if (scb->dma_direction == PCI_DMA_TODEVICE) { 1458 if (scb->dma_direction == PCI_DMA_TODEVICE)
1493 if (!scb->scp->use_sg) { // sg list not used 1459 pci_dma_sync_sg_for_device(adapter->pdev,
1494 pci_dma_sync_single_for_device(adapter->pdev, 1460 scsi_sglist(scb->scp),
1495 ccb->buf_dma_h, 1461 scsi_sg_count(scb->scp),
1496 scb->scp->request_bufflen, 1462 PCI_DMA_TODEVICE);
1497 PCI_DMA_TODEVICE);
1498 }
1499 else {
1500 pci_dma_sync_sg_for_device(adapter->pdev,
1501 scb->scp->request_buffer,
1502 scb->scp->use_sg, PCI_DMA_TODEVICE);
1503 }
1504 }
1505 1463
1506 mbox->busy = 1; // Set busy 1464 mbox->busy = 1; // Set busy
1507 mbox->poll = 0; 1465 mbox->poll = 0;
@@ -1624,29 +1582,26 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1624 return scb; 1582 return scb;
1625 1583
1626 case MODE_SENSE: 1584 case MODE_SENSE:
1627 if (scp->use_sg) { 1585 {
1628 struct scatterlist *sgl; 1586 struct scatterlist *sgl;
1629 caddr_t vaddr; 1587 caddr_t vaddr;
1630 1588
1631 sgl = (struct scatterlist *)scp->request_buffer; 1589 sgl = scsi_sglist(scp);
1632 if (sgl->page) { 1590 if (sgl->page) {
1633 vaddr = (caddr_t) 1591 vaddr = (caddr_t)
1634 (page_address((&sgl[0])->page) 1592 (page_address((&sgl[0])->page)
1635 + (&sgl[0])->offset); 1593 + (&sgl[0])->offset);
1636 1594
1637 memset(vaddr, 0, scp->cmnd[4]); 1595 memset(vaddr, 0, scp->cmnd[4]);
1638 }
1639 else {
1640 con_log(CL_ANN, (KERN_WARNING
1641 "megaraid mailbox: invalid sg:%d\n",
1642 __LINE__));
1643 }
1644 } 1596 }
1645 else { 1597 else {
1646 memset(scp->request_buffer, 0, scp->cmnd[4]); 1598 con_log(CL_ANN, (KERN_WARNING
1599 "megaraid mailbox: invalid sg:%d\n",
1600 __LINE__));
1647 } 1601 }
1648 scp->result = (DID_OK << 16); 1602 }
1649 return NULL; 1603 scp->result = (DID_OK << 16);
1604 return NULL;
1650 1605
1651 case INQUIRY: 1606 case INQUIRY:
1652 /* 1607 /*
@@ -1716,7 +1671,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1716 mbox->cmd = MBOXCMD_PASSTHRU64; 1671 mbox->cmd = MBOXCMD_PASSTHRU64;
1717 scb->dma_direction = scp->sc_data_direction; 1672 scb->dma_direction = scp->sc_data_direction;
1718 1673
1719 pthru->dataxferlen = scp->request_bufflen; 1674 pthru->dataxferlen = scsi_bufflen(scp);
1720 pthru->dataxferaddr = ccb->sgl_dma_h; 1675 pthru->dataxferaddr = ccb->sgl_dma_h;
1721 pthru->numsge = megaraid_mbox_mksgl(adapter, 1676 pthru->numsge = megaraid_mbox_mksgl(adapter,
1722 scb); 1677 scb);
@@ -2050,8 +2005,8 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
2050 2005
2051 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 2006 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
2052 2007
2053 if (scp->request_bufflen) { 2008 if (scsi_bufflen(scp)) {
2054 pthru->dataxferlen = scp->request_bufflen; 2009 pthru->dataxferlen = scsi_bufflen(scp);
2055 pthru->dataxferaddr = ccb->sgl_dma_h; 2010 pthru->dataxferaddr = ccb->sgl_dma_h;
2056 pthru->numsge = megaraid_mbox_mksgl(adapter, scb); 2011 pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2057 } 2012 }
@@ -2099,8 +2054,8 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
2099 2054
2100 memcpy(epthru->cdb, scp->cmnd, scp->cmd_len); 2055 memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
2101 2056
2102 if (scp->request_bufflen) { 2057 if (scsi_bufflen(scp)) {
2103 epthru->dataxferlen = scp->request_bufflen; 2058 epthru->dataxferlen = scsi_bufflen(scp);
2104 epthru->dataxferaddr = ccb->sgl_dma_h; 2059 epthru->dataxferaddr = ccb->sgl_dma_h;
2105 epthru->numsge = megaraid_mbox_mksgl(adapter, scb); 2060 epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2106 } 2061 }
@@ -2266,37 +2221,13 @@ megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
2266 2221
2267 ccb = (mbox_ccb_t *)scb->ccb; 2222 ccb = (mbox_ccb_t *)scb->ccb;
2268 2223
2269 switch (scb->dma_type) { 2224 if (scb->dma_direction == PCI_DMA_FROMDEVICE)
2270 2225 pci_dma_sync_sg_for_cpu(adapter->pdev,
2271 case MRAID_DMA_WBUF: 2226 scsi_sglist(scb->scp),
2272 if (scb->dma_direction == PCI_DMA_FROMDEVICE) { 2227 scsi_sg_count(scb->scp),
2273 pci_dma_sync_single_for_cpu(adapter->pdev,
2274 ccb->buf_dma_h,
2275 scb->scp->request_bufflen,
2276 PCI_DMA_FROMDEVICE); 2228 PCI_DMA_FROMDEVICE);
2277 }
2278
2279 pci_unmap_page(adapter->pdev, ccb->buf_dma_h,
2280 scb->scp->request_bufflen, scb->dma_direction);
2281
2282 break;
2283
2284 case MRAID_DMA_WSG:
2285 if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
2286 pci_dma_sync_sg_for_cpu(adapter->pdev,
2287 scb->scp->request_buffer,
2288 scb->scp->use_sg, PCI_DMA_FROMDEVICE);
2289 }
2290
2291 pci_unmap_sg(adapter->pdev, scb->scp->request_buffer,
2292 scb->scp->use_sg, scb->dma_direction);
2293
2294 break;
2295
2296 default:
2297 break;
2298 }
2299 2229
2230 scsi_dma_unmap(scb->scp);
2300 return; 2231 return;
2301} 2232}
2302 2233
@@ -2399,24 +2330,16 @@ megaraid_mbox_dpc(unsigned long devp)
2399 if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0 2330 if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
2400 && IS_RAID_CH(raid_dev, scb->dev_channel)) { 2331 && IS_RAID_CH(raid_dev, scb->dev_channel)) {
2401 2332
2402 if (scp->use_sg) { 2333 sgl = scsi_sglist(scp);
2403 sgl = (struct scatterlist *) 2334 if (sgl->page) {
2404 scp->request_buffer; 2335 c = *(unsigned char *)
2405
2406 if (sgl->page) {
2407 c = *(unsigned char *)
2408 (page_address((&sgl[0])->page) + 2336 (page_address((&sgl[0])->page) +
2409 (&sgl[0])->offset); 2337 (&sgl[0])->offset);
2410 } 2338 } else {
2411 else { 2339 con_log(CL_ANN, (KERN_WARNING
2412 con_log(CL_ANN, (KERN_WARNING 2340 "megaraid mailbox: invalid sg:%d\n",
2413 "megaraid mailbox: invalid sg:%d\n", 2341 __LINE__));
2414 __LINE__)); 2342 c = 0;
2415 c = 0;
2416 }
2417 }
2418 else {
2419 c = *(uint8_t *)scp->request_buffer;
2420 } 2343 }
2421 2344
2422 if ((c & 0x1F ) == TYPE_DISK) { 2345 if ((c & 0x1F ) == TYPE_DISK) {