aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2008-08-09 14:16:24 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2008-10-15 16:21:07 -0400
commitcd8c79f17a878b01f3e83a8efd89da18ccdc7ef3 (patch)
tree206c8c3e96fdc1876929e668d38d518f2b6a63dd /drivers
parent0a77b17c855c4ec1c87ed80e0f280095a4ee1f4f (diff)
ieee1394: sbp2: check for DMA mapping failures
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ieee1394/sbp2.c94
1 files changed, 59 insertions, 35 deletions
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 7a8119e0c910..0037305f599e 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -526,26 +526,41 @@ static void sbp2util_write_doorbell(struct work_struct *work)
526 526
527static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu) 527static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
528{ 528{
529 struct sbp2_fwhost_info *hi = lu->hi;
530 struct sbp2_command_info *cmd; 529 struct sbp2_command_info *cmd;
530 struct device *dmadev = lu->hi->host->device.parent;
531 int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS; 531 int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
532 532
533 for (i = 0; i < orbs; i++) { 533 for (i = 0; i < orbs; i++) {
534 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 534 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
535 if (!cmd) 535 if (!cmd)
536 return -ENOMEM; 536 goto failed_alloc;
537 cmd->command_orb_dma = dma_map_single(hi->host->device.parent, 537
538 &cmd->command_orb, 538 cmd->command_orb_dma =
539 sizeof(struct sbp2_command_orb), 539 dma_map_single(dmadev, &cmd->command_orb,
540 DMA_TO_DEVICE); 540 sizeof(struct sbp2_command_orb),
541 cmd->sge_dma = dma_map_single(hi->host->device.parent, 541 DMA_TO_DEVICE);
542 &cmd->scatter_gather_element, 542 if (dma_mapping_error(dmadev, cmd->command_orb_dma))
543 sizeof(cmd->scatter_gather_element), 543 goto failed_orb;
544 DMA_TO_DEVICE); 544
545 cmd->sge_dma =
546 dma_map_single(dmadev, &cmd->scatter_gather_element,
547 sizeof(cmd->scatter_gather_element),
548 DMA_TO_DEVICE);
549 if (dma_mapping_error(dmadev, cmd->sge_dma))
550 goto failed_sge;
551
545 INIT_LIST_HEAD(&cmd->list); 552 INIT_LIST_HEAD(&cmd->list);
546 list_add_tail(&cmd->list, &lu->cmd_orb_completed); 553 list_add_tail(&cmd->list, &lu->cmd_orb_completed);
547 } 554 }
548 return 0; 555 return 0;
556
557failed_sge:
558 dma_unmap_single(dmadev, cmd->command_orb_dma,
559 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
560failed_orb:
561 kfree(cmd);
562failed_alloc:
563 return -ENOMEM;
549} 564}
550 565
551static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu, 566static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu,
@@ -1494,14 +1509,16 @@ static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
1494 return 0; 1509 return 0;
1495} 1510}
1496 1511
1497static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb, 1512static int sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1498 struct sbp2_fwhost_info *hi, 1513 struct sbp2_fwhost_info *hi,
1499 struct sbp2_command_info *cmd, 1514 struct sbp2_command_info *cmd,
1500 unsigned int scsi_use_sg, 1515 unsigned int scsi_use_sg,
1501 struct scatterlist *sg, 1516 struct scatterlist *sg,
1502 u32 orb_direction, 1517 u32 orb_direction,
1503 enum dma_data_direction dma_dir) 1518 enum dma_data_direction dma_dir)
1504{ 1519{
1520 struct device *dmadev = hi->host->device.parent;
1521
1505 cmd->dma_dir = dma_dir; 1522 cmd->dma_dir = dma_dir;
1506 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1523 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1507 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1524 orb->misc |= ORB_SET_DIRECTION(orb_direction);
@@ -1511,9 +1528,12 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1511 1528
1512 cmd->dma_size = sg->length; 1529 cmd->dma_size = sg->length;
1513 cmd->dma_type = CMD_DMA_PAGE; 1530 cmd->dma_type = CMD_DMA_PAGE;
1514 cmd->cmd_dma = dma_map_page(hi->host->device.parent, 1531 cmd->cmd_dma = dma_map_page(dmadev, sg_page(sg), sg->offset,
1515 sg_page(sg), sg->offset,
1516 cmd->dma_size, cmd->dma_dir); 1532 cmd->dma_size, cmd->dma_dir);
1533 if (dma_mapping_error(dmadev, cmd->cmd_dma)) {
1534 cmd->cmd_dma = 0;
1535 return -ENOMEM;
1536 }
1517 1537
1518 orb->data_descriptor_lo = cmd->cmd_dma; 1538 orb->data_descriptor_lo = cmd->cmd_dma;
1519 orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size); 1539 orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);
@@ -1523,8 +1543,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1523 &cmd->scatter_gather_element[0]; 1543 &cmd->scatter_gather_element[0];
1524 u32 sg_count, sg_len; 1544 u32 sg_count, sg_len;
1525 dma_addr_t sg_addr; 1545 dma_addr_t sg_addr;
1526 int i, count = dma_map_sg(hi->host->device.parent, sg, 1546 int i, count = dma_map_sg(dmadev, sg, scsi_use_sg, dma_dir);
1527 scsi_use_sg, dma_dir);
1528 1547
1529 cmd->dma_size = scsi_use_sg; 1548 cmd->dma_size = scsi_use_sg;
1530 cmd->sge_buffer = sg; 1549 cmd->sge_buffer = sg;
@@ -1533,7 +1552,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1533 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); 1552 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1534 orb->data_descriptor_lo = cmd->sge_dma; 1553 orb->data_descriptor_lo = cmd->sge_dma;
1535 1554
1536 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma, 1555 dma_sync_single_for_cpu(dmadev, cmd->sge_dma,
1537 sizeof(cmd->scatter_gather_element), 1556 sizeof(cmd->scatter_gather_element),
1538 DMA_TO_DEVICE); 1557 DMA_TO_DEVICE);
1539 1558
@@ -1564,22 +1583,23 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1564 (sizeof(struct sbp2_unrestricted_page_table)) * 1583 (sizeof(struct sbp2_unrestricted_page_table)) *
1565 sg_count); 1584 sg_count);
1566 1585
1567 dma_sync_single_for_device(hi->host->device.parent, 1586 dma_sync_single_for_device(dmadev, cmd->sge_dma,
1568 cmd->sge_dma,
1569 sizeof(cmd->scatter_gather_element), 1587 sizeof(cmd->scatter_gather_element),
1570 DMA_TO_DEVICE); 1588 DMA_TO_DEVICE);
1571 } 1589 }
1590 return 0;
1572} 1591}
1573 1592
1574static void sbp2_create_command_orb(struct sbp2_lu *lu, 1593static int sbp2_create_command_orb(struct sbp2_lu *lu,
1575 struct sbp2_command_info *cmd, 1594 struct sbp2_command_info *cmd,
1576 struct scsi_cmnd *SCpnt) 1595 struct scsi_cmnd *SCpnt)
1577{ 1596{
1578 struct device *dmadev = lu->hi->host->device.parent; 1597 struct device *dmadev = lu->hi->host->device.parent;
1579 struct sbp2_command_orb *orb = &cmd->command_orb; 1598 struct sbp2_command_orb *orb = &cmd->command_orb;
1580 u32 orb_direction;
1581 unsigned int scsi_request_bufflen = scsi_bufflen(SCpnt); 1599 unsigned int scsi_request_bufflen = scsi_bufflen(SCpnt);
1582 enum dma_data_direction dma_dir = SCpnt->sc_data_direction; 1600 enum dma_data_direction dma_dir = SCpnt->sc_data_direction;
1601 u32 orb_direction;
1602 int ret;
1583 1603
1584 dma_sync_single_for_cpu(dmadev, cmd->command_orb_dma, 1604 dma_sync_single_for_cpu(dmadev, cmd->command_orb_dma,
1585 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE); 1605 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
@@ -1613,11 +1633,13 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
1613 orb->data_descriptor_hi = 0x0; 1633 orb->data_descriptor_hi = 0x0;
1614 orb->data_descriptor_lo = 0x0; 1634 orb->data_descriptor_lo = 0x0;
1615 orb->misc |= ORB_SET_DIRECTION(1); 1635 orb->misc |= ORB_SET_DIRECTION(1);
1616 } else 1636 ret = 0;
1617 sbp2_prep_command_orb_sg(orb, lu->hi, cmd, scsi_sg_count(SCpnt), 1637 } else {
1618 scsi_sglist(SCpnt), 1638 ret = sbp2_prep_command_orb_sg(orb, lu->hi, cmd,
1619 orb_direction, dma_dir); 1639 scsi_sg_count(SCpnt),
1620 1640 scsi_sglist(SCpnt),
1641 orb_direction, dma_dir);
1642 }
1621 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb)); 1643 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
1622 1644
1623 memset(orb->cdb, 0, sizeof(orb->cdb)); 1645 memset(orb->cdb, 0, sizeof(orb->cdb));
@@ -1625,6 +1647,7 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
1625 1647
1626 dma_sync_single_for_device(dmadev, cmd->command_orb_dma, 1648 dma_sync_single_for_device(dmadev, cmd->command_orb_dma,
1627 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE); 1649 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
1650 return ret;
1628} 1651}
1629 1652
1630static void sbp2_link_orb_command(struct sbp2_lu *lu, 1653static void sbp2_link_orb_command(struct sbp2_lu *lu,
@@ -1705,9 +1728,10 @@ static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
1705 if (!cmd) 1728 if (!cmd)
1706 return -EIO; 1729 return -EIO;
1707 1730
1708 sbp2_create_command_orb(lu, cmd, SCpnt); 1731 if (sbp2_create_command_orb(lu, cmd, SCpnt))
1709 sbp2_link_orb_command(lu, cmd); 1732 return -ENOMEM;
1710 1733
1734 sbp2_link_orb_command(lu, cmd);
1711 return 0; 1735 return 0;
1712} 1736}
1713 1737