aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2009-12-10 18:52:15 -0500
committerAlasdair G Kergon <agk@redhat.com>2009-12-10 18:52:15 -0500
commit6facdaff229f2b25d0de82be9be99b9f562e72ba (patch)
tree4faa8e98b33e804525c7873d160cb0d7014a2a0d
parent0888564393a1277ce2d0564d819e1bcff1120343 (diff)
dm: abstract clone_rq
This patch factors out the request cloning code in dm_prep_fn() as clone_rq(). No functional change. This patch is a preparation for a later patch in this series which needs to make clones from an original barrier request. Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm.c45
1 files changed, 28 insertions, 17 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a42dfb7a718e..30f5dc8e52bc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1445,6 +1445,32 @@ static int setup_clone(struct request *clone, struct request *rq,
1445 return 0; 1445 return 0;
1446} 1446}
1447 1447
1448static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1449 gfp_t gfp_mask)
1450{
1451 struct request *clone;
1452 struct dm_rq_target_io *tio;
1453
1454 tio = alloc_rq_tio(md, gfp_mask);
1455 if (!tio)
1456 return NULL;
1457
1458 tio->md = md;
1459 tio->ti = NULL;
1460 tio->orig = rq;
1461 tio->error = 0;
1462 memset(&tio->info, 0, sizeof(tio->info));
1463
1464 clone = &tio->clone;
1465 if (setup_clone(clone, rq, tio)) {
1466 /* -ENOMEM */
1467 free_rq_tio(tio);
1468 return NULL;
1469 }
1470
1471 return clone;
1472}
1473
1448static int dm_rq_flush_suspending(struct mapped_device *md) 1474static int dm_rq_flush_suspending(struct mapped_device *md)
1449{ 1475{
1450 return !md->suspend_rq.special; 1476 return !md->suspend_rq.special;
@@ -1456,7 +1482,6 @@ static int dm_rq_flush_suspending(struct mapped_device *md)
1456static int dm_prep_fn(struct request_queue *q, struct request *rq) 1482static int dm_prep_fn(struct request_queue *q, struct request *rq)
1457{ 1483{
1458 struct mapped_device *md = q->queuedata; 1484 struct mapped_device *md = q->queuedata;
1459 struct dm_rq_target_io *tio;
1460 struct request *clone; 1485 struct request *clone;
1461 1486
1462 if (unlikely(rq == &md->suspend_rq)) { 1487 if (unlikely(rq == &md->suspend_rq)) {
@@ -1472,24 +1497,10 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1472 return BLKPREP_KILL; 1497 return BLKPREP_KILL;
1473 } 1498 }
1474 1499
1475 tio = alloc_rq_tio(md, GFP_ATOMIC); 1500 clone = clone_rq(rq, md, GFP_ATOMIC);
1476 if (!tio) 1501 if (!clone)
1477 /* -ENOMEM */
1478 return BLKPREP_DEFER; 1502 return BLKPREP_DEFER;
1479 1503
1480 tio->md = md;
1481 tio->ti = NULL;
1482 tio->orig = rq;
1483 tio->error = 0;
1484 memset(&tio->info, 0, sizeof(tio->info));
1485
1486 clone = &tio->clone;
1487 if (setup_clone(clone, rq, tio)) {
1488 /* -ENOMEM */
1489 free_rq_tio(tio);
1490 return BLKPREP_DEFER;
1491 }
1492
1493 rq->special = clone; 1504 rq->special = clone;
1494 rq->cmd_flags |= REQ_DONTPREP; 1505 rq->cmd_flags |= REQ_DONTPREP;
1495 1506