diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-08-28 03:17:08 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:10 -0400 |
commit | 6e5a30cba5e7c03b2cd564e968f1dd667a0f7c42 (patch) | |
tree | 4a48fa4edc2a285a860b6f6771ae732c036143a5 /drivers/scsi/sg.c | |
parent | 10865dfa34e7552c4c64606edcdf1e21a110c985 (diff) |
sg: convert the direct IO path to use the block layer
This patch converts the direct IO path (SG_FLAG_DIRECT_IO) to use the
block layer functions (blk_get_request, blk_execute_rq_nowait,
blk_rq_map_user, etc) instead of scsi_execute_async().
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Douglas Gilbert <dougg@torque.net>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r-- | drivers/scsi/sg.c | 173 |
1 files changed, 27 insertions, 146 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 487c7776cc4e..cb6de0752ee1 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -138,6 +138,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ | |||
138 | char sg_io_owned; /* 1 -> packet belongs to SG_IO */ | 138 | char sg_io_owned; /* 1 -> packet belongs to SG_IO */ |
139 | volatile char done; /* 0->before bh, 1->before read, 2->read */ | 139 | volatile char done; /* 0->before bh, 1->before read, 2->read */ |
140 | struct request *rq; | 140 | struct request *rq; |
141 | struct bio *bio; | ||
141 | } Sg_request; | 142 | } Sg_request; |
142 | 143 | ||
143 | typedef struct sg_fd { /* holds the state of a file descriptor */ | 144 | typedef struct sg_fd { /* holds the state of a file descriptor */ |
@@ -1679,21 +1680,29 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) | |||
1679 | sg_io_hdr_t *hp = &srp->header; | 1680 | sg_io_hdr_t *hp = &srp->header; |
1680 | int dxfer_len = (int) hp->dxfer_len; | 1681 | int dxfer_len = (int) hp->dxfer_len; |
1681 | int dxfer_dir = hp->dxfer_direction; | 1682 | int dxfer_dir = hp->dxfer_direction; |
1683 | unsigned long uaddr = (unsigned long)hp->dxferp; | ||
1682 | Sg_scatter_hold *req_schp = &srp->data; | 1684 | Sg_scatter_hold *req_schp = &srp->data; |
1683 | Sg_scatter_hold *rsv_schp = &sfp->reserve; | 1685 | Sg_scatter_hold *rsv_schp = &sfp->reserve; |
1686 | struct request_queue *q = sfp->parentdp->device->request_queue; | ||
1687 | unsigned long alignment = queue_dma_alignment(q) | q->dma_pad_mask; | ||
1684 | 1688 | ||
1685 | SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); | 1689 | SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); |
1686 | 1690 | ||
1687 | if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) | 1691 | if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) |
1688 | return __sg_start_req(srp, hp, cmd); | 1692 | return __sg_start_req(srp, hp, cmd); |
1689 | 1693 | ||
1694 | #ifdef SG_ALLOW_DIO_CODE | ||
1690 | if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && | 1695 | if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && |
1691 | (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && | 1696 | (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && |
1692 | (!sfp->parentdp->device->host->unchecked_isa_dma)) { | 1697 | (!sfp->parentdp->device->host->unchecked_isa_dma) && |
1693 | res = sg_build_direct(srp, sfp, dxfer_len); | 1698 | !(uaddr & alignment) && !(dxfer_len & alignment)) { |
1694 | if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ | 1699 | res = __sg_start_req(srp, hp, cmd); |
1695 | return res; | 1700 | if (!res) |
1701 | res = sg_build_direct(srp, sfp, dxfer_len); | ||
1702 | |||
1703 | return res; | ||
1696 | } | 1704 | } |
1705 | #endif | ||
1697 | if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) | 1706 | if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) |
1698 | sg_link_reserve(sfp, srp, dxfer_len); | 1707 | sg_link_reserve(sfp, srp, dxfer_len); |
1699 | else { | 1708 | else { |
@@ -1718,8 +1727,11 @@ sg_finish_rem_req(Sg_request * srp) | |||
1718 | else | 1727 | else |
1719 | sg_remove_scat(req_schp); | 1728 | sg_remove_scat(req_schp); |
1720 | 1729 | ||
1721 | if (srp->rq) | 1730 | if (srp->rq) { |
1731 | if (srp->bio) | ||
1732 | blk_rq_unmap_user(srp->bio); | ||
1722 | blk_put_request(srp->rq); | 1733 | blk_put_request(srp->rq); |
1734 | } | ||
1723 | 1735 | ||
1724 | sg_remove_request(sfp, srp); | 1736 | sg_remove_request(sfp, srp); |
1725 | } | 1737 | } |
@@ -1746,151 +1758,23 @@ sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) | |||
1746 | return tablesize; /* number of scat_gath elements allocated */ | 1758 | return tablesize; /* number of scat_gath elements allocated */ |
1747 | } | 1759 | } |
1748 | 1760 | ||
1749 | #ifdef SG_ALLOW_DIO_CODE | ||
1750 | /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */ | ||
1751 | /* TODO: hopefully we can use the generic block layer code */ | ||
1752 | |||
1753 | /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if | ||
1754 | - mapping of all pages not successful | ||
1755 | (i.e., either completely successful or fails) | ||
1756 | */ | ||
1757 | static int | ||
1758 | st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, | ||
1759 | unsigned long uaddr, size_t count, int rw) | ||
1760 | { | ||
1761 | unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
1762 | unsigned long start = uaddr >> PAGE_SHIFT; | ||
1763 | const int nr_pages = end - start; | ||
1764 | int res, i, j; | ||
1765 | struct page **pages; | ||
1766 | |||
1767 | /* User attempted Overflow! */ | ||
1768 | if ((uaddr + count) < uaddr) | ||
1769 | return -EINVAL; | ||
1770 | |||
1771 | /* Too big */ | ||
1772 | if (nr_pages > max_pages) | ||
1773 | return -ENOMEM; | ||
1774 | |||
1775 | /* Hmm? */ | ||
1776 | if (count == 0) | ||
1777 | return 0; | ||
1778 | |||
1779 | if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL) | ||
1780 | return -ENOMEM; | ||
1781 | |||
1782 | /* Try to fault in all of the necessary pages */ | ||
1783 | down_read(¤t->mm->mmap_sem); | ||
1784 | /* rw==READ means read from drive, write into memory area */ | ||
1785 | res = get_user_pages( | ||
1786 | current, | ||
1787 | current->mm, | ||
1788 | uaddr, | ||
1789 | nr_pages, | ||
1790 | rw == READ, | ||
1791 | 0, /* don't force */ | ||
1792 | pages, | ||
1793 | NULL); | ||
1794 | up_read(¤t->mm->mmap_sem); | ||
1795 | |||
1796 | /* Errors and no page mapped should return here */ | ||
1797 | if (res < nr_pages) | ||
1798 | goto out_unmap; | ||
1799 | |||
1800 | for (i=0; i < nr_pages; i++) { | ||
1801 | /* FIXME: flush superflous for rw==READ, | ||
1802 | * probably wrong function for rw==WRITE | ||
1803 | */ | ||
1804 | flush_dcache_page(pages[i]); | ||
1805 | /* ?? Is locking needed? I don't think so */ | ||
1806 | /* if (!trylock_page(pages[i])) | ||
1807 | goto out_unlock; */ | ||
1808 | } | ||
1809 | |||
1810 | sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK); | ||
1811 | if (nr_pages > 1) { | ||
1812 | sgl[0].length = PAGE_SIZE - sgl[0].offset; | ||
1813 | count -= sgl[0].length; | ||
1814 | for (i=1; i < nr_pages ; i++) | ||
1815 | sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0); | ||
1816 | } | ||
1817 | else { | ||
1818 | sgl[0].length = count; | ||
1819 | } | ||
1820 | |||
1821 | kfree(pages); | ||
1822 | return nr_pages; | ||
1823 | |||
1824 | out_unmap: | ||
1825 | if (res > 0) { | ||
1826 | for (j=0; j < res; j++) | ||
1827 | page_cache_release(pages[j]); | ||
1828 | res = 0; | ||
1829 | } | ||
1830 | kfree(pages); | ||
1831 | return res; | ||
1832 | } | ||
1833 | |||
1834 | |||
1835 | /* And unmap them... */ | ||
1836 | static int | ||
1837 | st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, | ||
1838 | int dirtied) | ||
1839 | { | ||
1840 | int i; | ||
1841 | |||
1842 | for (i=0; i < nr_pages; i++) { | ||
1843 | struct page *page = sg_page(&sgl[i]); | ||
1844 | |||
1845 | if (dirtied) | ||
1846 | SetPageDirty(page); | ||
1847 | /* unlock_page(page); */ | ||
1848 | /* FIXME: cache flush missing for rw==READ | ||
1849 | * FIXME: call the correct reference counting function | ||
1850 | */ | ||
1851 | page_cache_release(page); | ||
1852 | } | ||
1853 | |||
1854 | return 0; | ||
1855 | } | ||
1856 | |||
1857 | /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */ | ||
1858 | #endif | ||
1859 | |||
1860 | |||
1861 | /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */ | 1761 | /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */ |
1862 | static int | 1762 | static int |
1863 | sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) | 1763 | sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) |
1864 | { | 1764 | { |
1865 | #ifdef SG_ALLOW_DIO_CODE | ||
1866 | sg_io_hdr_t *hp = &srp->header; | 1765 | sg_io_hdr_t *hp = &srp->header; |
1867 | Sg_scatter_hold *schp = &srp->data; | 1766 | Sg_scatter_hold *schp = &srp->data; |
1868 | int sg_tablesize = sfp->parentdp->sg_tablesize; | 1767 | int res; |
1869 | int mx_sc_elems, res; | 1768 | struct request *rq = srp->rq; |
1870 | struct scsi_device *sdev = sfp->parentdp->device; | 1769 | struct request_queue *q = sfp->parentdp->device->request_queue; |
1871 | |||
1872 | if (((unsigned long)hp->dxferp & | ||
1873 | queue_dma_alignment(sdev->request_queue)) != 0) | ||
1874 | return 1; | ||
1875 | 1770 | ||
1876 | mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); | 1771 | res = blk_rq_map_user(q, rq, NULL, hp->dxferp, dxfer_len, GFP_ATOMIC); |
1877 | if (mx_sc_elems <= 0) { | 1772 | if (res) |
1878 | return 1; | 1773 | return res; |
1879 | } | 1774 | srp->bio = rq->bio; |
1880 | res = st_map_user_pages(schp->buffer, mx_sc_elems, | ||
1881 | (unsigned long)hp->dxferp, dxfer_len, | ||
1882 | (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0); | ||
1883 | if (res <= 0) { | ||
1884 | sg_remove_scat(schp); | ||
1885 | return 1; | ||
1886 | } | ||
1887 | schp->k_use_sg = res; | ||
1888 | schp->dio_in_use = 1; | 1775 | schp->dio_in_use = 1; |
1889 | hp->info |= SG_INFO_DIRECT_IO; | 1776 | hp->info |= SG_INFO_DIRECT_IO; |
1890 | return 0; | 1777 | return 0; |
1891 | #else | ||
1892 | return 1; | ||
1893 | #endif | ||
1894 | } | 1778 | } |
1895 | 1779 | ||
1896 | static int | 1780 | static int |
@@ -2069,11 +1953,7 @@ sg_remove_scat(Sg_scatter_hold * schp) | |||
2069 | if (schp->buffer && (schp->sglist_len > 0)) { | 1953 | if (schp->buffer && (schp->sglist_len > 0)) { |
2070 | struct scatterlist *sg = schp->buffer; | 1954 | struct scatterlist *sg = schp->buffer; |
2071 | 1955 | ||
2072 | if (schp->dio_in_use) { | 1956 | if (!schp->dio_in_use) { |
2073 | #ifdef SG_ALLOW_DIO_CODE | ||
2074 | st_unmap_user_pages(sg, schp->k_use_sg, TRUE); | ||
2075 | #endif | ||
2076 | } else { | ||
2077 | int k; | 1957 | int k; |
2078 | 1958 | ||
2079 | for (k = 0; (k < schp->k_use_sg) && sg_page(sg); | 1959 | for (k = 0; (k < schp->k_use_sg) && sg_page(sg); |
@@ -2083,8 +1963,9 @@ sg_remove_scat(Sg_scatter_hold * schp) | |||
2083 | k, sg_page(sg), sg->length)); | 1963 | k, sg_page(sg), sg->length)); |
2084 | sg_page_free(sg_page(sg), sg->length); | 1964 | sg_page_free(sg_page(sg), sg->length); |
2085 | } | 1965 | } |
1966 | |||
1967 | kfree(schp->buffer); | ||
2086 | } | 1968 | } |
2087 | kfree(schp->buffer); | ||
2088 | } | 1969 | } |
2089 | memset(schp, 0, sizeof (*schp)); | 1970 | memset(schp, 0, sizeof (*schp)); |
2090 | } | 1971 | } |