aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorStephen M. Cameron <scameron@beardog.cce.hp.com>2010-08-26 14:56:30 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 06:12:39 -0400
commit0c9f5ba7cb7435ea4b99599de4af0729f0740647 (patch)
tree1c80511d7a57c5ce2998d6c26eaeb5d6814a8f27 /drivers
parentf32f125b1c14dcde49ec415ec941af750433251e (diff)
cciss: factor out cciss_big_passthru
Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss.c307
1 files changed, 151 insertions, 156 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 076dbcfa9471..cff2fa1972cb 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1498,6 +1498,155 @@ static int cciss_passthru(ctlr_info_t *h, void __user *argp)
1498 return 0; 1498 return 0;
1499} 1499}
1500 1500
1501static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
1502{
1503 BIG_IOCTL_Command_struct *ioc;
1504 CommandList_struct *c;
1505 unsigned char **buff = NULL;
1506 int *buff_size = NULL;
1507 u64bit temp64;
1508 BYTE sg_used = 0;
1509 int status = 0;
1510 int i;
1511 DECLARE_COMPLETION_ONSTACK(wait);
1512 __u32 left;
1513 __u32 sz;
1514 BYTE __user *data_ptr;
1515
1516 if (!argp)
1517 return -EINVAL;
1518 if (!capable(CAP_SYS_RAWIO))
1519 return -EPERM;
1520 ioc = (BIG_IOCTL_Command_struct *)
1521 kmalloc(sizeof(*ioc), GFP_KERNEL);
1522 if (!ioc) {
1523 status = -ENOMEM;
1524 goto cleanup1;
1525 }
1526 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1527 status = -EFAULT;
1528 goto cleanup1;
1529 }
1530 if ((ioc->buf_size < 1) &&
1531 (ioc->Request.Type.Direction != XFER_NONE)) {
1532 status = -EINVAL;
1533 goto cleanup1;
1534 }
1535 /* Check kmalloc limits using all SGs */
1536 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1537 status = -EINVAL;
1538 goto cleanup1;
1539 }
1540 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1541 status = -EINVAL;
1542 goto cleanup1;
1543 }
1544 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1545 if (!buff) {
1546 status = -ENOMEM;
1547 goto cleanup1;
1548 }
1549 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
1550 if (!buff_size) {
1551 status = -ENOMEM;
1552 goto cleanup1;
1553 }
1554 left = ioc->buf_size;
1555 data_ptr = ioc->buf;
1556 while (left) {
1557 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1558 buff_size[sg_used] = sz;
1559 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1560 if (buff[sg_used] == NULL) {
1561 status = -ENOMEM;
1562 goto cleanup1;
1563 }
1564 if (ioc->Request.Type.Direction == XFER_WRITE) {
1565 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1566 status = -EFAULT;
1567 goto cleanup1;
1568 }
1569 } else {
1570 memset(buff[sg_used], 0, sz);
1571 }
1572 left -= sz;
1573 data_ptr += sz;
1574 sg_used++;
1575 }
1576 c = cmd_special_alloc(h);
1577 if (!c) {
1578 status = -ENOMEM;
1579 goto cleanup1;
1580 }
1581 c->cmd_type = CMD_IOCTL_PEND;
1582 c->Header.ReplyQueue = 0;
1583
1584 if (ioc->buf_size > 0) {
1585 c->Header.SGList = sg_used;
1586 c->Header.SGTotal = sg_used;
1587 } else {
1588 c->Header.SGList = 0;
1589 c->Header.SGTotal = 0;
1590 }
1591 c->Header.LUN = ioc->LUN_info;
1592 c->Header.Tag.lower = c->busaddr;
1593
1594 c->Request = ioc->Request;
1595 if (ioc->buf_size > 0) {
1596 for (i = 0; i < sg_used; i++) {
1597 temp64.val =
1598 pci_map_single(h->pdev, buff[i], buff_size[i],
1599 PCI_DMA_BIDIRECTIONAL);
1600 c->SG[i].Addr.lower = temp64.val32.lower;
1601 c->SG[i].Addr.upper = temp64.val32.upper;
1602 c->SG[i].Len = buff_size[i];
1603 c->SG[i].Ext = 0; /* we are not chaining */
1604 }
1605 }
1606 c->waiting = &wait;
1607 enqueue_cmd_and_start_io(h, c);
1608 wait_for_completion(&wait);
1609 /* unlock the buffers from DMA */
1610 for (i = 0; i < sg_used; i++) {
1611 temp64.val32.lower = c->SG[i].Addr.lower;
1612 temp64.val32.upper = c->SG[i].Addr.upper;
1613 pci_unmap_single(h->pdev,
1614 (dma_addr_t) temp64.val, buff_size[i],
1615 PCI_DMA_BIDIRECTIONAL);
1616 }
1617 check_ioctl_unit_attention(h, c);
1618 /* Copy the error information out */
1619 ioc->error_info = *(c->err_info);
1620 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1621 cmd_special_free(h, c);
1622 status = -EFAULT;
1623 goto cleanup1;
1624 }
1625 if (ioc->Request.Type.Direction == XFER_READ) {
1626 /* Copy the data out of the buffer we created */
1627 BYTE __user *ptr = ioc->buf;
1628 for (i = 0; i < sg_used; i++) {
1629 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1630 cmd_special_free(h, c);
1631 status = -EFAULT;
1632 goto cleanup1;
1633 }
1634 ptr += buff_size[i];
1635 }
1636 }
1637 cmd_special_free(h, c);
1638 status = 0;
1639cleanup1:
1640 if (buff) {
1641 for (i = 0; i < sg_used; i++)
1642 kfree(buff[i]);
1643 kfree(buff);
1644 }
1645 kfree(buff_size);
1646 kfree(ioc);
1647 return status;
1648}
1649
1501static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 1650static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1502 unsigned int cmd, unsigned long arg) 1651 unsigned int cmd, unsigned long arg)
1503{ 1652{
@@ -1534,162 +1683,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1534 return cciss_getluninfo(h, disk, argp); 1683 return cciss_getluninfo(h, disk, argp);
1535 case CCISS_PASSTHRU: 1684 case CCISS_PASSTHRU:
1536 return cciss_passthru(h, argp); 1685 return cciss_passthru(h, argp);
1537 case CCISS_BIG_PASSTHRU:{ 1686 case CCISS_BIG_PASSTHRU:
1538 BIG_IOCTL_Command_struct *ioc; 1687 return cciss_bigpassthru(h, argp);
1539 CommandList_struct *c;
1540 unsigned char **buff = NULL;
1541 int *buff_size = NULL;
1542 u64bit temp64;
1543 BYTE sg_used = 0;
1544 int status = 0;
1545 int i;
1546 DECLARE_COMPLETION_ONSTACK(wait);
1547 __u32 left;
1548 __u32 sz;
1549 BYTE __user *data_ptr;
1550
1551 if (!arg)
1552 return -EINVAL;
1553 if (!capable(CAP_SYS_RAWIO))
1554 return -EPERM;
1555 ioc = (BIG_IOCTL_Command_struct *)
1556 kmalloc(sizeof(*ioc), GFP_KERNEL);
1557 if (!ioc) {
1558 status = -ENOMEM;
1559 goto cleanup1;
1560 }
1561 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1562 status = -EFAULT;
1563 goto cleanup1;
1564 }
1565 if ((ioc->buf_size < 1) &&
1566 (ioc->Request.Type.Direction != XFER_NONE)) {
1567 status = -EINVAL;
1568 goto cleanup1;
1569 }
1570 /* Check kmalloc limits using all SGs */
1571 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1572 status = -EINVAL;
1573 goto cleanup1;
1574 }
1575 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1576 status = -EINVAL;
1577 goto cleanup1;
1578 }
1579 buff =
1580 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1581 if (!buff) {
1582 status = -ENOMEM;
1583 goto cleanup1;
1584 }
1585 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1586 GFP_KERNEL);
1587 if (!buff_size) {
1588 status = -ENOMEM;
1589 goto cleanup1;
1590 }
1591 left = ioc->buf_size;
1592 data_ptr = ioc->buf;
1593 while (left) {
1594 sz = (left >
1595 ioc->malloc_size) ? ioc->
1596 malloc_size : left;
1597 buff_size[sg_used] = sz;
1598 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1599 if (buff[sg_used] == NULL) {
1600 status = -ENOMEM;
1601 goto cleanup1;
1602 }
1603 if (ioc->Request.Type.Direction == XFER_WRITE) {
1604 if (copy_from_user
1605 (buff[sg_used], data_ptr, sz)) {
1606 status = -EFAULT;
1607 goto cleanup1;
1608 }
1609 } else {
1610 memset(buff[sg_used], 0, sz);
1611 }
1612 left -= sz;
1613 data_ptr += sz;
1614 sg_used++;
1615 }
1616 c = cmd_special_alloc(h);
1617 if (!c) {
1618 status = -ENOMEM;
1619 goto cleanup1;
1620 }
1621 c->cmd_type = CMD_IOCTL_PEND;
1622 c->Header.ReplyQueue = 0;
1623
1624 if (ioc->buf_size > 0) {
1625 c->Header.SGList = sg_used;
1626 c->Header.SGTotal = sg_used;
1627 } else {
1628 c->Header.SGList = 0;
1629 c->Header.SGTotal = 0;
1630 }
1631 c->Header.LUN = ioc->LUN_info;
1632 c->Header.Tag.lower = c->busaddr;
1633
1634 c->Request = ioc->Request;
1635 if (ioc->buf_size > 0) {
1636 for (i = 0; i < sg_used; i++) {
1637 temp64.val =
1638 pci_map_single(h->pdev, buff[i],
1639 buff_size[i],
1640 PCI_DMA_BIDIRECTIONAL);
1641 c->SG[i].Addr.lower =
1642 temp64.val32.lower;
1643 c->SG[i].Addr.upper =
1644 temp64.val32.upper;
1645 c->SG[i].Len = buff_size[i];
1646 c->SG[i].Ext = 0; /* we are not chaining */
1647 }
1648 }
1649 c->waiting = &wait;
1650 enqueue_cmd_and_start_io(h, c);
1651 wait_for_completion(&wait);
1652 /* unlock the buffers from DMA */
1653 for (i = 0; i < sg_used; i++) {
1654 temp64.val32.lower = c->SG[i].Addr.lower;
1655 temp64.val32.upper = c->SG[i].Addr.upper;
1656 pci_unmap_single(h->pdev,
1657 (dma_addr_t) temp64.val, buff_size[i],
1658 PCI_DMA_BIDIRECTIONAL);
1659 }
1660 check_ioctl_unit_attention(h, c);
1661 /* Copy the error information out */
1662 ioc->error_info = *(c->err_info);
1663 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1664 cmd_special_free(h, c);
1665 status = -EFAULT;
1666 goto cleanup1;
1667 }
1668 if (ioc->Request.Type.Direction == XFER_READ) {
1669 /* Copy the data out of the buffer we created */
1670 BYTE __user *ptr = ioc->buf;
1671 for (i = 0; i < sg_used; i++) {
1672 if (copy_to_user
1673 (ptr, buff[i], buff_size[i])) {
1674 cmd_special_free(h, c);
1675 status = -EFAULT;
1676 goto cleanup1;
1677 }
1678 ptr += buff_size[i];
1679 }
1680 }
1681 cmd_special_free(h, c);
1682 status = 0;
1683 cleanup1:
1684 if (buff) {
1685 for (i = 0; i < sg_used; i++)
1686 kfree(buff[i]);
1687 kfree(buff);
1688 }
1689 kfree(buff_size);
1690 kfree(ioc);
1691 return status;
1692 }
1693 1688
1694 /* scsi_cmd_ioctl handles these, below, though some are not */ 1689 /* scsi_cmd_ioctl handles these, below, though some are not */
1695 /* very meaningful for cciss. SG_IO is the main one people want. */ 1690 /* very meaningful for cciss. SG_IO is the main one people want. */