aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-10-14 07:29:58 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-10-23 23:21:29 -0400
commitdf5fa691ce61aedd3e4dbcf960ee44f05b797d8b (patch)
tree9acd9f28f37eb697247f6f7256650b03932ede8a
parent6b20fa9aaf0c2f69ee6f9648e20ab2be0206705e (diff)
target: make iblock_emulate_sync_cache asynchronous
Do not block the submitting thread when handling a SYNCHRONIZE CACHE command, but implement it asynchronously by sending the FLUSH command ourself and calling transport_complete_sync_cache from the completion handler. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_iblock.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index bf074c4b7a36..d9ad2a216eaa 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -313,37 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
313 return blocks_long; 313 return blocks_long;
314} 314}
315 315
316static void iblock_end_io_flush(struct bio *bio, int err)
317{
318 struct se_cmd *cmd = bio->bi_private;
319
320 if (err)
321 pr_err("IBLOCK: cache flush failed: %d\n", err);
322
323 if (cmd)
324 transport_complete_sync_cache(cmd, err == 0);
325 bio_put(bio);
326}
327
316/* 328/*
317 * Emulate SYCHRONIZE_CACHE_* 329 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
330 * always flush the whole cache.
318 */ 331 */
319static void iblock_emulate_sync_cache(struct se_task *task) 332static void iblock_emulate_sync_cache(struct se_task *task)
320{ 333{
321 struct se_cmd *cmd = task->task_se_cmd; 334 struct se_cmd *cmd = task->task_se_cmd;
322 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 335 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
323 int immed = (cmd->t_task_cdb[1] & 0x2); 336 int immed = (cmd->t_task_cdb[1] & 0x2);
324 sector_t error_sector; 337 struct bio *bio;
325 int ret;
326 338
327 /* 339 /*
328 * If the Immediate bit is set, queue up the GOOD response 340 * If the Immediate bit is set, queue up the GOOD response
329 * for this SYNCHRONIZE_CACHE op 341 * for this SYNCHRONIZE_CACHE op.
330 */ 342 */
331 if (immed) 343 if (immed)
332 transport_complete_sync_cache(cmd, 1); 344 transport_complete_sync_cache(cmd, 1);
333 345
334 /* 346 bio = bio_alloc(GFP_KERNEL, 0);
335 * blkdev_issue_flush() does not support a specifying a range, so 347 bio->bi_end_io = iblock_end_io_flush;
336 * we have to flush the entire cache. 348 bio->bi_bdev = ib_dev->ibd_bd;
337 */
338 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
339 if (ret != 0) {
340 pr_err("IBLOCK: block_issue_flush() failed: %d "
341 " error_sector: %llu\n", ret,
342 (unsigned long long)error_sector);
343 }
344
345 if (!immed) 349 if (!immed)
346 transport_complete_sync_cache(cmd, ret == 0); 350 bio->bi_private = cmd;
351 submit_bio(WRITE_FLUSH, bio);
347} 352}
348 353
349/* 354/*