diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-08-29 06:32:18 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:10 -0400 |
commit | 10db10d144c0248f285242f79daf6b9de6b00a62 (patch) | |
tree | 315806c8e09f6007bdc0c1dfeea1313f15815bfd /drivers/scsi/sg.c | |
parent | 6e5a30cba5e7c03b2cd564e968f1dd667a0f7c42 (diff) |
sg: convert the indirect IO path to use the block layer
This patch converts the indirect IO path (including mmap IO and old
struct sg_header) to use the block layer functions (blk_get_request,
blk_execute_rq_nowait, blk_rq_map_user, etc) instead of
scsi_execute_async().
[Jens: fixed compile error with SCSI logging enabled]
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Douglas Gilbert <dougg@torque.net>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r-- | drivers/scsi/sg.c | 397 |
1 files changed, 105 insertions, 292 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cb6de0752ee1..d6391666502c 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */ | |||
47 | #include <linux/seq_file.h> | 47 | #include <linux/seq_file.h> |
48 | #include <linux/blkdev.h> | 48 | #include <linux/blkdev.h> |
49 | #include <linux/delay.h> | 49 | #include <linux/delay.h> |
50 | #include <linux/scatterlist.h> | ||
51 | #include <linux/blktrace_api.h> | 50 | #include <linux/blktrace_api.h> |
52 | #include <linux/smp_lock.h> | 51 | #include <linux/smp_lock.h> |
53 | 52 | ||
@@ -119,7 +118,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ | |||
119 | unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ | 118 | unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ |
120 | unsigned bufflen; /* Size of (aggregate) data buffer */ | 119 | unsigned bufflen; /* Size of (aggregate) data buffer */ |
121 | unsigned b_malloc_len; /* actual len malloc'ed in buffer */ | 120 | unsigned b_malloc_len; /* actual len malloc'ed in buffer */ |
122 | struct scatterlist *buffer;/* scatter list */ | 121 | struct page **pages; |
122 | int page_order; | ||
123 | char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ | 123 | char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ |
124 | unsigned char cmd_opcode; /* first byte of command */ | 124 | unsigned char cmd_opcode; /* first byte of command */ |
125 | } Sg_scatter_hold; | 125 | } Sg_scatter_hold; |
@@ -190,8 +190,6 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, | |||
190 | int read_only, Sg_request **o_srp); | 190 | int read_only, Sg_request **o_srp); |
191 | static int sg_common_write(Sg_fd * sfp, Sg_request * srp, | 191 | static int sg_common_write(Sg_fd * sfp, Sg_request * srp, |
192 | unsigned char *cmnd, int timeout, int blocking); | 192 | unsigned char *cmnd, int timeout, int blocking); |
193 | static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, | ||
194 | int wr_xf, int *countp, unsigned char __user **up); | ||
195 | static int sg_write_xfer(Sg_request * srp); | 193 | static int sg_write_xfer(Sg_request * srp); |
196 | static int sg_read_xfer(Sg_request * srp); | 194 | static int sg_read_xfer(Sg_request * srp); |
197 | static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); | 195 | static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); |
@@ -199,8 +197,6 @@ static void sg_remove_scat(Sg_scatter_hold * schp); | |||
199 | static void sg_build_reserve(Sg_fd * sfp, int req_size); | 197 | static void sg_build_reserve(Sg_fd * sfp, int req_size); |
200 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); | 198 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); |
201 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); | 199 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); |
202 | static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp); | ||
203 | static void sg_page_free(struct page *page, int size); | ||
204 | static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); | 200 | static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); |
205 | static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); | 201 | static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); |
206 | static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); | 202 | static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); |
@@ -771,26 +767,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
771 | break; | 767 | break; |
772 | } | 768 | } |
773 | hp->duration = jiffies_to_msecs(jiffies); | 769 | hp->duration = jiffies_to_msecs(jiffies); |
774 | /* Now send everything of to mid-level. The next time we hear about this | 770 | |
775 | packet is when sg_cmd_done() is called (i.e. a callback). */ | 771 | srp->rq->timeout = timeout; |
776 | if (srp->rq) { | 772 | blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, |
777 | srp->rq->timeout = timeout; | 773 | srp->rq, 1, sg_rq_end_io); |
778 | blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, | 774 | return 0; |
779 | srp->rq, 1, sg_rq_end_io); | ||
780 | return 0; | ||
781 | } | ||
782 | if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, | ||
783 | hp->dxfer_len, srp->data.k_use_sg, timeout, | ||
784 | SG_DEFAULT_RETRIES, srp, sg_cmd_done, | ||
785 | GFP_ATOMIC)) { | ||
786 | SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n")); | ||
787 | /* | ||
788 | * most likely out of mem, but could also be a bad map | ||
789 | */ | ||
790 | sg_finish_rem_req(srp); | ||
791 | return -ENOMEM; | ||
792 | } else | ||
793 | return 0; | ||
794 | } | 775 | } |
795 | 776 | ||
796 | static int | 777 | static int |
@@ -1206,8 +1187,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1206 | Sg_fd *sfp; | 1187 | Sg_fd *sfp; |
1207 | unsigned long offset, len, sa; | 1188 | unsigned long offset, len, sa; |
1208 | Sg_scatter_hold *rsv_schp; | 1189 | Sg_scatter_hold *rsv_schp; |
1209 | struct scatterlist *sg; | 1190 | int k, length; |
1210 | int k; | ||
1211 | 1191 | ||
1212 | if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) | 1192 | if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) |
1213 | return VM_FAULT_SIGBUS; | 1193 | return VM_FAULT_SIGBUS; |
@@ -1217,15 +1197,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1217 | return VM_FAULT_SIGBUS; | 1197 | return VM_FAULT_SIGBUS; |
1218 | SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", | 1198 | SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", |
1219 | offset, rsv_schp->k_use_sg)); | 1199 | offset, rsv_schp->k_use_sg)); |
1220 | sg = rsv_schp->buffer; | ||
1221 | sa = vma->vm_start; | 1200 | sa = vma->vm_start; |
1222 | for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); | 1201 | length = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
1223 | ++k, sg = sg_next(sg)) { | 1202 | for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { |
1224 | len = vma->vm_end - sa; | 1203 | len = vma->vm_end - sa; |
1225 | len = (len < sg->length) ? len : sg->length; | 1204 | len = (len < length) ? len : length; |
1226 | if (offset < len) { | 1205 | if (offset < len) { |
1227 | struct page *page; | 1206 | struct page *page = nth_page(rsv_schp->pages[k], |
1228 | page = virt_to_page(page_address(sg_page(sg)) + offset); | 1207 | offset >> PAGE_SHIFT); |
1229 | get_page(page); /* increment page count */ | 1208 | get_page(page); /* increment page count */ |
1230 | vmf->page = page; | 1209 | vmf->page = page; |
1231 | return 0; /* success */ | 1210 | return 0; /* success */ |
@@ -1247,8 +1226,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
1247 | Sg_fd *sfp; | 1226 | Sg_fd *sfp; |
1248 | unsigned long req_sz, len, sa; | 1227 | unsigned long req_sz, len, sa; |
1249 | Sg_scatter_hold *rsv_schp; | 1228 | Sg_scatter_hold *rsv_schp; |
1250 | int k; | 1229 | int k, length; |
1251 | struct scatterlist *sg; | ||
1252 | 1230 | ||
1253 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) | 1231 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) |
1254 | return -ENXIO; | 1232 | return -ENXIO; |
@@ -1262,11 +1240,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
1262 | return -ENOMEM; /* cannot map more than reserved buffer */ | 1240 | return -ENOMEM; /* cannot map more than reserved buffer */ |
1263 | 1241 | ||
1264 | sa = vma->vm_start; | 1242 | sa = vma->vm_start; |
1265 | sg = rsv_schp->buffer; | 1243 | length = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
1266 | for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); | 1244 | for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { |
1267 | ++k, sg = sg_next(sg)) { | ||
1268 | len = vma->vm_end - sa; | 1245 | len = vma->vm_end - sa; |
1269 | len = (len < sg->length) ? len : sg->length; | 1246 | len = (len < length) ? len : length; |
1270 | sa += len; | 1247 | sa += len; |
1271 | } | 1248 | } |
1272 | 1249 | ||
@@ -1310,7 +1287,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid) | |||
1310 | if (0 != result) { | 1287 | if (0 != result) { |
1311 | struct scsi_sense_hdr sshdr; | 1288 | struct scsi_sense_hdr sshdr; |
1312 | 1289 | ||
1313 | memcpy(srp->sense_b, sense, sizeof (srp->sense_b)); | ||
1314 | srp->header.status = 0xff & result; | 1290 | srp->header.status = 0xff & result; |
1315 | srp->header.masked_status = status_byte(result); | 1291 | srp->header.masked_status = status_byte(result); |
1316 | srp->header.msg_status = msg_byte(result); | 1292 | srp->header.msg_status = msg_byte(result); |
@@ -1685,34 +1661,51 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) | |||
1685 | Sg_scatter_hold *rsv_schp = &sfp->reserve; | 1661 | Sg_scatter_hold *rsv_schp = &sfp->reserve; |
1686 | struct request_queue *q = sfp->parentdp->device->request_queue; | 1662 | struct request_queue *q = sfp->parentdp->device->request_queue; |
1687 | unsigned long alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 1663 | unsigned long alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
1664 | struct rq_map_data map_data; | ||
1688 | 1665 | ||
1689 | SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); | 1666 | SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); |
1690 | 1667 | ||
1668 | res = __sg_start_req(srp, hp, cmd); | ||
1669 | if (res) | ||
1670 | return res; | ||
1671 | |||
1691 | if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) | 1672 | if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) |
1692 | return __sg_start_req(srp, hp, cmd); | 1673 | return 0; |
1693 | 1674 | ||
1694 | #ifdef SG_ALLOW_DIO_CODE | 1675 | #ifdef SG_ALLOW_DIO_CODE |
1695 | if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && | 1676 | if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && |
1696 | (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && | 1677 | (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && |
1697 | (!sfp->parentdp->device->host->unchecked_isa_dma) && | 1678 | (!sfp->parentdp->device->host->unchecked_isa_dma) && |
1698 | !(uaddr & alignment) && !(dxfer_len & alignment)) { | 1679 | !(uaddr & alignment) && !(dxfer_len & alignment)) |
1699 | res = __sg_start_req(srp, hp, cmd); | 1680 | return sg_build_direct(srp, sfp, dxfer_len); |
1700 | if (!res) | ||
1701 | res = sg_build_direct(srp, sfp, dxfer_len); | ||
1702 | |||
1703 | return res; | ||
1704 | } | ||
1705 | #endif | 1681 | #endif |
1706 | if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) | 1682 | if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) |
1707 | sg_link_reserve(sfp, srp, dxfer_len); | 1683 | sg_link_reserve(sfp, srp, dxfer_len); |
1708 | else { | 1684 | else |
1709 | res = sg_build_indirect(req_schp, sfp, dxfer_len); | 1685 | res = sg_build_indirect(req_schp, sfp, dxfer_len); |
1710 | if (res) { | 1686 | |
1711 | sg_remove_scat(req_schp); | 1687 | if (!res) { |
1712 | return res; | 1688 | struct request *rq = srp->rq; |
1713 | } | 1689 | Sg_scatter_hold *schp = &srp->data; |
1690 | int iovec_count = (int) hp->iovec_count; | ||
1691 | |||
1692 | map_data.pages = schp->pages; | ||
1693 | map_data.page_order = schp->page_order; | ||
1694 | map_data.nr_entries = schp->k_use_sg; | ||
1695 | |||
1696 | if (iovec_count) | ||
1697 | res = blk_rq_map_user_iov(q, rq, &map_data, hp->dxferp, | ||
1698 | iovec_count, | ||
1699 | hp->dxfer_len, GFP_ATOMIC); | ||
1700 | else | ||
1701 | res = blk_rq_map_user(q, rq, &map_data, hp->dxferp, | ||
1702 | hp->dxfer_len, GFP_ATOMIC); | ||
1703 | |||
1704 | if (!res) | ||
1705 | srp->bio = rq->bio; | ||
1714 | } | 1706 | } |
1715 | return 0; | 1707 | |
1708 | return res; | ||
1716 | } | 1709 | } |
1717 | 1710 | ||
1718 | static void | 1711 | static void |
@@ -1730,6 +1723,7 @@ sg_finish_rem_req(Sg_request * srp) | |||
1730 | if (srp->rq) { | 1723 | if (srp->rq) { |
1731 | if (srp->bio) | 1724 | if (srp->bio) |
1732 | blk_rq_unmap_user(srp->bio); | 1725 | blk_rq_unmap_user(srp->bio); |
1726 | |||
1733 | blk_put_request(srp->rq); | 1727 | blk_put_request(srp->rq); |
1734 | } | 1728 | } |
1735 | 1729 | ||
@@ -1739,21 +1733,12 @@ sg_finish_rem_req(Sg_request * srp) | |||
1739 | static int | 1733 | static int |
1740 | sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) | 1734 | sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) |
1741 | { | 1735 | { |
1742 | int sg_bufflen = tablesize * sizeof(struct scatterlist); | 1736 | int sg_bufflen = tablesize * sizeof(struct page *); |
1743 | gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; | 1737 | gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; |
1744 | 1738 | ||
1745 | /* | 1739 | schp->pages = kzalloc(sg_bufflen, gfp_flags); |
1746 | * TODO: test without low_dma, we should not need it since | 1740 | if (!schp->pages) |
1747 | * the block layer will bounce the buffer for us | ||
1748 | * | ||
1749 | * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list. | ||
1750 | */ | ||
1751 | if (sfp->low_dma) | ||
1752 | gfp_flags |= GFP_DMA; | ||
1753 | schp->buffer = kzalloc(sg_bufflen, gfp_flags); | ||
1754 | if (!schp->buffer) | ||
1755 | return -ENOMEM; | 1741 | return -ENOMEM; |
1756 | sg_init_table(schp->buffer, tablesize); | ||
1757 | schp->sglist_len = sg_bufflen; | 1742 | schp->sglist_len = sg_bufflen; |
1758 | return tablesize; /* number of scat_gath elements allocated */ | 1743 | return tablesize; /* number of scat_gath elements allocated */ |
1759 | } | 1744 | } |
@@ -1780,11 +1765,10 @@ sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) | |||
1780 | static int | 1765 | static int |
1781 | sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | 1766 | sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) |
1782 | { | 1767 | { |
1783 | struct scatterlist *sg; | 1768 | int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; |
1784 | int ret_sz = 0, k, rem_sz, num, mx_sc_elems; | ||
1785 | int sg_tablesize = sfp->parentdp->sg_tablesize; | 1769 | int sg_tablesize = sfp->parentdp->sg_tablesize; |
1786 | int blk_size = buff_size; | 1770 | int blk_size = buff_size, order; |
1787 | struct page *p = NULL; | 1771 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; |
1788 | 1772 | ||
1789 | if (blk_size < 0) | 1773 | if (blk_size < 0) |
1790 | return -EFAULT; | 1774 | return -EFAULT; |
@@ -1808,15 +1792,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1808 | } else | 1792 | } else |
1809 | scatter_elem_sz_prev = num; | 1793 | scatter_elem_sz_prev = num; |
1810 | } | 1794 | } |
1811 | for (k = 0, sg = schp->buffer, rem_sz = blk_size; | 1795 | |
1812 | (rem_sz > 0) && (k < mx_sc_elems); | 1796 | if (sfp->low_dma) |
1813 | ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { | 1797 | gfp_mask |= GFP_DMA; |
1814 | 1798 | ||
1799 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) | ||
1800 | gfp_mask |= __GFP_ZERO; | ||
1801 | |||
1802 | order = get_order(num); | ||
1803 | retry: | ||
1804 | ret_sz = 1 << (PAGE_SHIFT + order); | ||
1805 | |||
1806 | for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; | ||
1807 | k++, rem_sz -= ret_sz) { | ||
1808 | |||
1815 | num = (rem_sz > scatter_elem_sz_prev) ? | 1809 | num = (rem_sz > scatter_elem_sz_prev) ? |
1816 | scatter_elem_sz_prev : rem_sz; | 1810 | scatter_elem_sz_prev : rem_sz; |
1817 | p = sg_page_malloc(num, sfp->low_dma, &ret_sz); | 1811 | |
1818 | if (!p) | 1812 | schp->pages[k] = alloc_pages(gfp_mask, order); |
1819 | return -ENOMEM; | 1813 | if (!schp->pages[k]) |
1814 | goto out; | ||
1820 | 1815 | ||
1821 | if (num == scatter_elem_sz_prev) { | 1816 | if (num == scatter_elem_sz_prev) { |
1822 | if (unlikely(ret_sz > scatter_elem_sz_prev)) { | 1817 | if (unlikely(ret_sz > scatter_elem_sz_prev)) { |
@@ -1824,12 +1819,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1824 | scatter_elem_sz_prev = ret_sz; | 1819 | scatter_elem_sz_prev = ret_sz; |
1825 | } | 1820 | } |
1826 | } | 1821 | } |
1827 | sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0); | ||
1828 | 1822 | ||
1829 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " | 1823 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " |
1830 | "ret_sz=%d\n", k, num, ret_sz)); | 1824 | "ret_sz=%d\n", k, num, ret_sz)); |
1831 | } /* end of for loop */ | 1825 | } /* end of for loop */ |
1832 | 1826 | ||
1827 | schp->page_order = order; | ||
1833 | schp->k_use_sg = k; | 1828 | schp->k_use_sg = k; |
1834 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " | 1829 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " |
1835 | "rem_sz=%d\n", k, rem_sz)); | 1830 | "rem_sz=%d\n", k, rem_sz)); |
@@ -1837,8 +1832,15 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1837 | schp->bufflen = blk_size; | 1832 | schp->bufflen = blk_size; |
1838 | if (rem_sz > 0) /* must have failed */ | 1833 | if (rem_sz > 0) /* must have failed */ |
1839 | return -ENOMEM; | 1834 | return -ENOMEM; |
1840 | |||
1841 | return 0; | 1835 | return 0; |
1836 | out: | ||
1837 | for (i = 0; i < k; i++) | ||
1838 | __free_pages(schp->pages[k], order); | ||
1839 | |||
1840 | if (--order >= 0) | ||
1841 | goto retry; | ||
1842 | |||
1843 | return -ENOMEM; | ||
1842 | } | 1844 | } |
1843 | 1845 | ||
1844 | static int | 1846 | static int |
@@ -1846,13 +1848,8 @@ sg_write_xfer(Sg_request * srp) | |||
1846 | { | 1848 | { |
1847 | sg_io_hdr_t *hp = &srp->header; | 1849 | sg_io_hdr_t *hp = &srp->header; |
1848 | Sg_scatter_hold *schp = &srp->data; | 1850 | Sg_scatter_hold *schp = &srp->data; |
1849 | struct scatterlist *sg = schp->buffer; | ||
1850 | int num_xfer = 0; | 1851 | int num_xfer = 0; |
1851 | int j, k, onum, usglen, ksglen, res; | ||
1852 | int iovec_count = (int) hp->iovec_count; | ||
1853 | int dxfer_dir = hp->dxfer_direction; | 1852 | int dxfer_dir = hp->dxfer_direction; |
1854 | unsigned char *p; | ||
1855 | unsigned char __user *up; | ||
1856 | int new_interface = ('\0' == hp->interface_id) ? 0 : 1; | 1853 | int new_interface = ('\0' == hp->interface_id) ? 0 : 1; |
1857 | 1854 | ||
1858 | if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) || | 1855 | if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) || |
@@ -1866,83 +1863,9 @@ sg_write_xfer(Sg_request * srp) | |||
1866 | && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) | 1863 | && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) |
1867 | return 0; | 1864 | return 0; |
1868 | 1865 | ||
1869 | SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", | 1866 | SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, k_use_sg=%d\n", |
1870 | num_xfer, iovec_count, schp->k_use_sg)); | 1867 | num_xfer, schp->k_use_sg)); |
1871 | if (iovec_count) { | ||
1872 | onum = iovec_count; | ||
1873 | if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) | ||
1874 | return -EFAULT; | ||
1875 | } else | ||
1876 | onum = 1; | ||
1877 | |||
1878 | ksglen = sg->length; | ||
1879 | p = page_address(sg_page(sg)); | ||
1880 | for (j = 0, k = 0; j < onum; ++j) { | ||
1881 | res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); | ||
1882 | if (res) | ||
1883 | return res; | ||
1884 | |||
1885 | for (; p; sg = sg_next(sg), ksglen = sg->length, | ||
1886 | p = page_address(sg_page(sg))) { | ||
1887 | if (usglen <= 0) | ||
1888 | break; | ||
1889 | if (ksglen > usglen) { | ||
1890 | if (usglen >= num_xfer) { | ||
1891 | if (__copy_from_user(p, up, num_xfer)) | ||
1892 | return -EFAULT; | ||
1893 | return 0; | ||
1894 | } | ||
1895 | if (__copy_from_user(p, up, usglen)) | ||
1896 | return -EFAULT; | ||
1897 | p += usglen; | ||
1898 | ksglen -= usglen; | ||
1899 | break; | ||
1900 | } else { | ||
1901 | if (ksglen >= num_xfer) { | ||
1902 | if (__copy_from_user(p, up, num_xfer)) | ||
1903 | return -EFAULT; | ||
1904 | return 0; | ||
1905 | } | ||
1906 | if (__copy_from_user(p, up, ksglen)) | ||
1907 | return -EFAULT; | ||
1908 | up += ksglen; | ||
1909 | usglen -= ksglen; | ||
1910 | } | ||
1911 | ++k; | ||
1912 | if (k >= schp->k_use_sg) | ||
1913 | return 0; | ||
1914 | } | ||
1915 | } | ||
1916 | |||
1917 | return 0; | ||
1918 | } | ||
1919 | 1868 | ||
1920 | static int | ||
1921 | sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, | ||
1922 | int wr_xf, int *countp, unsigned char __user **up) | ||
1923 | { | ||
1924 | int num_xfer = (int) hp->dxfer_len; | ||
1925 | unsigned char __user *p = hp->dxferp; | ||
1926 | int count; | ||
1927 | |||
1928 | if (0 == sg_num) { | ||
1929 | if (wr_xf && ('\0' == hp->interface_id)) | ||
1930 | count = (int) hp->flags; /* holds "old" input_size */ | ||
1931 | else | ||
1932 | count = num_xfer; | ||
1933 | } else { | ||
1934 | sg_iovec_t iovec; | ||
1935 | if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC)) | ||
1936 | return -EFAULT; | ||
1937 | p = iovec.iov_base; | ||
1938 | count = (int) iovec.iov_len; | ||
1939 | } | ||
1940 | if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count)) | ||
1941 | return -EFAULT; | ||
1942 | if (up) | ||
1943 | *up = p; | ||
1944 | if (countp) | ||
1945 | *countp = count; | ||
1946 | return 0; | 1869 | return 0; |
1947 | } | 1870 | } |
1948 | 1871 | ||
@@ -1950,21 +1873,18 @@ static void | |||
1950 | sg_remove_scat(Sg_scatter_hold * schp) | 1873 | sg_remove_scat(Sg_scatter_hold * schp) |
1951 | { | 1874 | { |
1952 | SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); | 1875 | SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); |
1953 | if (schp->buffer && (schp->sglist_len > 0)) { | 1876 | if (schp->pages && schp->sglist_len > 0) { |
1954 | struct scatterlist *sg = schp->buffer; | ||
1955 | |||
1956 | if (!schp->dio_in_use) { | 1877 | if (!schp->dio_in_use) { |
1957 | int k; | 1878 | int k; |
1958 | 1879 | ||
1959 | for (k = 0; (k < schp->k_use_sg) && sg_page(sg); | 1880 | for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { |
1960 | ++k, sg = sg_next(sg)) { | ||
1961 | SCSI_LOG_TIMEOUT(5, printk( | 1881 | SCSI_LOG_TIMEOUT(5, printk( |
1962 | "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", | 1882 | "sg_remove_scat: k=%d, pg=0x%p\n", |
1963 | k, sg_page(sg), sg->length)); | 1883 | k, schp->pages[k])); |
1964 | sg_page_free(sg_page(sg), sg->length); | 1884 | __free_pages(schp->pages[k], schp->page_order); |
1965 | } | 1885 | } |
1966 | 1886 | ||
1967 | kfree(schp->buffer); | 1887 | kfree(schp->pages); |
1968 | } | 1888 | } |
1969 | } | 1889 | } |
1970 | memset(schp, 0, sizeof (*schp)); | 1890 | memset(schp, 0, sizeof (*schp)); |
@@ -1975,13 +1895,8 @@ sg_read_xfer(Sg_request * srp) | |||
1975 | { | 1895 | { |
1976 | sg_io_hdr_t *hp = &srp->header; | 1896 | sg_io_hdr_t *hp = &srp->header; |
1977 | Sg_scatter_hold *schp = &srp->data; | 1897 | Sg_scatter_hold *schp = &srp->data; |
1978 | struct scatterlist *sg = schp->buffer; | ||
1979 | int num_xfer = 0; | 1898 | int num_xfer = 0; |
1980 | int j, k, onum, usglen, ksglen, res; | ||
1981 | int iovec_count = (int) hp->iovec_count; | ||
1982 | int dxfer_dir = hp->dxfer_direction; | 1899 | int dxfer_dir = hp->dxfer_direction; |
1983 | unsigned char *p; | ||
1984 | unsigned char __user *up; | ||
1985 | int new_interface = ('\0' == hp->interface_id) ? 0 : 1; | 1900 | int new_interface = ('\0' == hp->interface_id) ? 0 : 1; |
1986 | 1901 | ||
1987 | if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) | 1902 | if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) |
@@ -1996,53 +1911,7 @@ sg_read_xfer(Sg_request * srp) | |||
1996 | return 0; | 1911 | return 0; |
1997 | 1912 | ||
1998 | SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", | 1913 | SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", |
1999 | num_xfer, iovec_count, schp->k_use_sg)); | 1914 | num_xfer, (int)hp->iovec_count, schp->k_use_sg)); |
2000 | if (iovec_count) { | ||
2001 | onum = iovec_count; | ||
2002 | if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) | ||
2003 | return -EFAULT; | ||
2004 | } else | ||
2005 | onum = 1; | ||
2006 | |||
2007 | p = page_address(sg_page(sg)); | ||
2008 | ksglen = sg->length; | ||
2009 | for (j = 0, k = 0; j < onum; ++j) { | ||
2010 | res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); | ||
2011 | if (res) | ||
2012 | return res; | ||
2013 | |||
2014 | for (; p; sg = sg_next(sg), ksglen = sg->length, | ||
2015 | p = page_address(sg_page(sg))) { | ||
2016 | if (usglen <= 0) | ||
2017 | break; | ||
2018 | if (ksglen > usglen) { | ||
2019 | if (usglen >= num_xfer) { | ||
2020 | if (__copy_to_user(up, p, num_xfer)) | ||
2021 | return -EFAULT; | ||
2022 | return 0; | ||
2023 | } | ||
2024 | if (__copy_to_user(up, p, usglen)) | ||
2025 | return -EFAULT; | ||
2026 | p += usglen; | ||
2027 | ksglen -= usglen; | ||
2028 | break; | ||
2029 | } else { | ||
2030 | if (ksglen >= num_xfer) { | ||
2031 | if (__copy_to_user(up, p, num_xfer)) | ||
2032 | return -EFAULT; | ||
2033 | return 0; | ||
2034 | } | ||
2035 | if (__copy_to_user(up, p, ksglen)) | ||
2036 | return -EFAULT; | ||
2037 | up += ksglen; | ||
2038 | usglen -= ksglen; | ||
2039 | } | ||
2040 | ++k; | ||
2041 | if (k >= schp->k_use_sg) | ||
2042 | return 0; | ||
2043 | } | ||
2044 | } | ||
2045 | |||
2046 | return 0; | 1915 | return 0; |
2047 | } | 1916 | } |
2048 | 1917 | ||
@@ -2050,7 +1919,6 @@ static int | |||
2050 | sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) | 1919 | sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) |
2051 | { | 1920 | { |
2052 | Sg_scatter_hold *schp = &srp->data; | 1921 | Sg_scatter_hold *schp = &srp->data; |
2053 | struct scatterlist *sg = schp->buffer; | ||
2054 | int k, num; | 1922 | int k, num; |
2055 | 1923 | ||
2056 | SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", | 1924 | SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", |
@@ -2058,15 +1926,18 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) | |||
2058 | if ((!outp) || (num_read_xfer <= 0)) | 1926 | if ((!outp) || (num_read_xfer <= 0)) |
2059 | return 0; | 1927 | return 0; |
2060 | 1928 | ||
2061 | for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) { | 1929 | blk_rq_unmap_user(srp->bio); |
2062 | num = sg->length; | 1930 | srp->bio = NULL; |
1931 | |||
1932 | num = 1 << (PAGE_SHIFT + schp->page_order); | ||
1933 | for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { | ||
2063 | if (num > num_read_xfer) { | 1934 | if (num > num_read_xfer) { |
2064 | if (__copy_to_user(outp, page_address(sg_page(sg)), | 1935 | if (__copy_to_user(outp, page_address(schp->pages[k]), |
2065 | num_read_xfer)) | 1936 | num_read_xfer)) |
2066 | return -EFAULT; | 1937 | return -EFAULT; |
2067 | break; | 1938 | break; |
2068 | } else { | 1939 | } else { |
2069 | if (__copy_to_user(outp, page_address(sg_page(sg)), | 1940 | if (__copy_to_user(outp, page_address(schp->pages[k]), |
2070 | num)) | 1941 | num)) |
2071 | return -EFAULT; | 1942 | return -EFAULT; |
2072 | num_read_xfer -= num; | 1943 | num_read_xfer -= num; |
@@ -2101,24 +1972,22 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) | |||
2101 | { | 1972 | { |
2102 | Sg_scatter_hold *req_schp = &srp->data; | 1973 | Sg_scatter_hold *req_schp = &srp->data; |
2103 | Sg_scatter_hold *rsv_schp = &sfp->reserve; | 1974 | Sg_scatter_hold *rsv_schp = &sfp->reserve; |
2104 | struct scatterlist *sg = rsv_schp->buffer; | ||
2105 | int k, num, rem; | 1975 | int k, num, rem; |
2106 | 1976 | ||
2107 | srp->res_used = 1; | 1977 | srp->res_used = 1; |
2108 | SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); | 1978 | SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); |
2109 | rem = size; | 1979 | rem = size; |
2110 | 1980 | ||
2111 | for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { | 1981 | num = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
2112 | num = sg->length; | 1982 | for (k = 0; k < rsv_schp->k_use_sg; k++) { |
2113 | if (rem <= num) { | 1983 | if (rem <= num) { |
2114 | sfp->save_scat_len = num; | ||
2115 | sg->length = rem; | ||
2116 | req_schp->k_use_sg = k + 1; | 1984 | req_schp->k_use_sg = k + 1; |
2117 | req_schp->sglist_len = rsv_schp->sglist_len; | 1985 | req_schp->sglist_len = rsv_schp->sglist_len; |
2118 | req_schp->buffer = rsv_schp->buffer; | 1986 | req_schp->pages = rsv_schp->pages; |
2119 | 1987 | ||
2120 | req_schp->bufflen = size; | 1988 | req_schp->bufflen = size; |
2121 | req_schp->b_malloc_len = rsv_schp->b_malloc_len; | 1989 | req_schp->b_malloc_len = rsv_schp->b_malloc_len; |
1990 | req_schp->page_order = rsv_schp->page_order; | ||
2122 | break; | 1991 | break; |
2123 | } else | 1992 | } else |
2124 | rem -= num; | 1993 | rem -= num; |
@@ -2132,22 +2001,13 @@ static void | |||
2132 | sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) | 2001 | sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) |
2133 | { | 2002 | { |
2134 | Sg_scatter_hold *req_schp = &srp->data; | 2003 | Sg_scatter_hold *req_schp = &srp->data; |
2135 | Sg_scatter_hold *rsv_schp = &sfp->reserve; | ||
2136 | 2004 | ||
2137 | SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", | 2005 | SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", |
2138 | (int) req_schp->k_use_sg)); | 2006 | (int) req_schp->k_use_sg)); |
2139 | if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) { | ||
2140 | struct scatterlist *sg = rsv_schp->buffer; | ||
2141 | |||
2142 | if (sfp->save_scat_len > 0) | ||
2143 | (sg + (req_schp->k_use_sg - 1))->length = | ||
2144 | (unsigned) sfp->save_scat_len; | ||
2145 | else | ||
2146 | SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n")); | ||
2147 | } | ||
2148 | req_schp->k_use_sg = 0; | 2007 | req_schp->k_use_sg = 0; |
2149 | req_schp->bufflen = 0; | 2008 | req_schp->bufflen = 0; |
2150 | req_schp->buffer = NULL; | 2009 | req_schp->pages = NULL; |
2010 | req_schp->page_order = 0; | ||
2151 | req_schp->sglist_len = 0; | 2011 | req_schp->sglist_len = 0; |
2152 | sfp->save_scat_len = 0; | 2012 | sfp->save_scat_len = 0; |
2153 | srp->res_used = 0; | 2013 | srp->res_used = 0; |
@@ -2405,53 +2265,6 @@ sg_res_in_use(Sg_fd * sfp) | |||
2405 | return srp ? 1 : 0; | 2265 | return srp ? 1 : 0; |
2406 | } | 2266 | } |
2407 | 2267 | ||
2408 | /* The size fetched (value output via retSzp) set when non-NULL return */ | ||
2409 | static struct page * | ||
2410 | sg_page_malloc(int rqSz, int lowDma, int *retSzp) | ||
2411 | { | ||
2412 | struct page *resp = NULL; | ||
2413 | gfp_t page_mask; | ||
2414 | int order, a_size; | ||
2415 | int resSz; | ||
2416 | |||
2417 | if ((rqSz <= 0) || (NULL == retSzp)) | ||
2418 | return resp; | ||
2419 | |||
2420 | if (lowDma) | ||
2421 | page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN; | ||
2422 | else | ||
2423 | page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; | ||
2424 | |||
2425 | for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; | ||
2426 | order++, a_size <<= 1) ; | ||
2427 | resSz = a_size; /* rounded up if necessary */ | ||
2428 | resp = alloc_pages(page_mask, order); | ||
2429 | while ((!resp) && order) { | ||
2430 | --order; | ||
2431 | a_size >>= 1; /* divide by 2, until PAGE_SIZE */ | ||
2432 | resp = alloc_pages(page_mask, order); /* try half */ | ||
2433 | resSz = a_size; | ||
2434 | } | ||
2435 | if (resp) { | ||
2436 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) | ||
2437 | memset(page_address(resp), 0, resSz); | ||
2438 | *retSzp = resSz; | ||
2439 | } | ||
2440 | return resp; | ||
2441 | } | ||
2442 | |||
2443 | static void | ||
2444 | sg_page_free(struct page *page, int size) | ||
2445 | { | ||
2446 | int order, a_size; | ||
2447 | |||
2448 | if (!page) | ||
2449 | return; | ||
2450 | for (order = 0, a_size = PAGE_SIZE; a_size < size; | ||
2451 | order++, a_size <<= 1) ; | ||
2452 | __free_pages(page, order); | ||
2453 | } | ||
2454 | |||
2455 | #ifdef CONFIG_SCSI_PROC_FS | 2268 | #ifdef CONFIG_SCSI_PROC_FS |
2456 | static int | 2269 | static int |
2457 | sg_idr_max_id(int id, void *p, void *data) | 2270 | sg_idr_max_id(int id, void *p, void *data) |