aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/sg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r--drivers/scsi/sg.c40
1 files changed, 33 insertions, 7 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0cbc1fb45f10..2270bd51f9c2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -546,7 +546,7 @@ static ssize_t
546sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) 546sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
547{ 547{
548 sg_io_hdr_t *hp = &srp->header; 548 sg_io_hdr_t *hp = &srp->header;
549 int err = 0; 549 int err = 0, err2;
550 int len; 550 int len;
551 551
552 if (count < SZ_SG_IO_HDR) { 552 if (count < SZ_SG_IO_HDR) {
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
575 goto err_out; 575 goto err_out;
576 } 576 }
577err_out: 577err_out:
578 err = sg_finish_rem_req(srp); 578 err2 = sg_finish_rem_req(srp);
579 return (0 == err) ? count : err; 579 return err ? : err2 ? : count;
580} 580}
581 581
582static ssize_t 582static ssize_t
@@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
1335 } 1335 }
1336 /* Rely on write phase to clean out srp status values, so no "else" */ 1336 /* Rely on write phase to clean out srp status values, so no "else" */
1337 1337
1338 /*
1339 * Free the request as soon as it is complete so that its resources
1340 * can be reused without waiting for userspace to read() the
1341 * result. But keep the associated bio (if any) around until
1342 * blk_rq_unmap_user() can be called from user context.
1343 */
1344 srp->rq = NULL;
1345 if (rq->cmd != rq->__cmd)
1346 kfree(rq->cmd);
1347 __blk_put_request(rq->q, rq);
1348
1338 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1349 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1339 if (unlikely(srp->orphan)) { 1350 if (unlikely(srp->orphan)) {
1340 if (sfp->keep_orphan) 1351 if (sfp->keep_orphan)
@@ -1669,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
1669 return -ENOMEM; 1680 return -ENOMEM;
1670 } 1681 }
1671 1682
1672 rq = blk_get_request(q, rw, GFP_ATOMIC); 1683 /*
1684 * NOTE
1685 *
1686 * With scsi-mq enabled, there are a fixed number of preallocated
1687 * requests equal in number to shost->can_queue. If all of the
1688 * preallocated requests are already in use, then using GFP_ATOMIC with
1689 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
1690 * will cause blk_get_request() to sleep until an active command
1691 * completes, freeing up a request. Neither option is ideal, but
1692 * GFP_KERNEL is the better choice to prevent userspace from getting an
1693 * unexpected EWOULDBLOCK.
1694 *
1695 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1696 * does not sleep except under memory pressure.
1697 */
1698 rq = blk_get_request(q, rw, GFP_KERNEL);
1673 if (IS_ERR(rq)) { 1699 if (IS_ERR(rq)) {
1674 kfree(long_cmdp); 1700 kfree(long_cmdp);
1675 return PTR_ERR(rq); 1701 return PTR_ERR(rq);
@@ -1759,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp)
1759 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, 1785 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
1760 "sg_finish_rem_req: res_used=%d\n", 1786 "sg_finish_rem_req: res_used=%d\n",
1761 (int) srp->res_used)); 1787 (int) srp->res_used));
1762 if (srp->rq) { 1788 if (srp->bio)
1763 if (srp->bio) 1789 ret = blk_rq_unmap_user(srp->bio);
1764 ret = blk_rq_unmap_user(srp->bio);
1765 1790
1791 if (srp->rq) {
1766 if (srp->rq->cmd != srp->rq->__cmd) 1792 if (srp->rq->cmd != srp->rq->__cmd)
1767 kfree(srp->rq->cmd); 1793 kfree(srp->rq->cmd);
1768 blk_put_request(srp->rq); 1794 blk_put_request(srp->rq);