diff options
author | Christoph Hellwig <hch@infradead.org> | 2009-09-30 07:52:12 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-10-01 15:19:30 -0400 |
commit | c15227de132f1295f3db6b7df9079956b1020fd8 (patch) | |
tree | ad06f119f283cf8a6313681055e8132ba2851ddb /drivers/mtd/mtd_blkdevs.c | |
parent | 3bd0f0c763e497c8674b28e3df2732f48683dabd (diff) |
block: use normal I/O path for discard requests
prepare_discard_fn() was being called in a place where memory allocation
was effectively impossible. This makes it inappropriate for all but
the most trivial translations of Linux's DISCARD operation to the block
command set. Additionally adding a payload there makes the ownership
of the bio backing unclear as it's now allocated by the device driver
and not the submitter as usual.
It is replaced with QUEUE_FLAG_DISCARD which is used to indicate whether
the queue supports discard operations or not. blkdev_issue_discard now
allocates a one-page, sector-length payload which is the right thing
for the common ATA and SCSI implementations.
The mtd implementation of prepare_discard_fn() is replaced with simply
checking for the request being a discard.
Largely based on a previous patch from Matthew Wilcox <matthew@wil.cx>
which did the prepare_discard_fn but not the different payload allocation
yet.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/mtd/mtd_blkdevs.c')
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 19 |
1 files changed, 5 insertions, 14 deletions
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0acbf4f5be50..8ca17a3e96ea 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -32,14 +32,6 @@ struct mtd_blkcore_priv { | |||
32 | spinlock_t queue_lock; | 32 | spinlock_t queue_lock; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | static int blktrans_discard_request(struct request_queue *q, | ||
36 | struct request *req) | ||
37 | { | ||
38 | req->cmd_type = REQ_TYPE_LINUX_BLOCK; | ||
39 | req->cmd[0] = REQ_LB_OP_DISCARD; | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | 35 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, |
44 | struct mtd_blktrans_dev *dev, | 36 | struct mtd_blktrans_dev *dev, |
45 | struct request *req) | 37 | struct request *req) |
@@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
52 | 44 | ||
53 | buf = req->buffer; | 45 | buf = req->buffer; |
54 | 46 | ||
55 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && | ||
56 | req->cmd[0] == REQ_LB_OP_DISCARD) | ||
57 | return tr->discard(dev, block, nsect); | ||
58 | |||
59 | if (!blk_fs_request(req)) | 47 | if (!blk_fs_request(req)) |
60 | return -EIO; | 48 | return -EIO; |
61 | 49 | ||
@@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
63 | get_capacity(req->rq_disk)) | 51 | get_capacity(req->rq_disk)) |
64 | return -EIO; | 52 | return -EIO; |
65 | 53 | ||
54 | if (blk_discard_rq(req)) | ||
55 | return tr->discard(dev, block, nsect); | ||
56 | |||
66 | switch(rq_data_dir(req)) { | 57 | switch(rq_data_dir(req)) { |
67 | case READ: | 58 | case READ: |
68 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 59 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
@@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
380 | tr->blkcore_priv->rq->queuedata = tr; | 371 | tr->blkcore_priv->rq->queuedata = tr; |
381 | blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); | 372 | blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); |
382 | if (tr->discard) | 373 | if (tr->discard) |
383 | blk_queue_set_discard(tr->blkcore_priv->rq, | 374 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, |
384 | blktrans_discard_request); | 375 | tr->blkcore_priv->rq); |
385 | 376 | ||
386 | tr->blkshift = ffs(tr->blksize) - 1; | 377 | tr->blkshift = ffs(tr->blksize) - 1; |
387 | 378 | ||