aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-04 20:06:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-04 20:06:58 -0400
commitb4fdcb02f1e39c27058a885905bd0277370ba441 (patch)
treefd4cfd1994f21f44afe5e7904681fb5ac09f81b8 /drivers/md/dm.c
parent044595d4e448305fbaec472eb7d22636d24e7d8c (diff)
parent6dd9ad7df2019b1e33a372a501907db293ebcd0d (diff)
Merge branch 'for-3.2/core' of git://git.kernel.dk/linux-block
* 'for-3.2/core' of git://git.kernel.dk/linux-block: (29 commits) block: don't call blk_drain_queue() if elevator is not up blk-throttle: use queue_is_locked() instead of lockdep_is_held() blk-throttle: Take blkcg->lock while traversing blkcg->policy_list blk-throttle: Free up policy node associated with deleted rule block: warn if tag is greater than real_max_depth. block: make gendisk hold a reference to its queue blk-flush: move the queue kick into blk-flush: fix invalid BUG_ON in blk_insert_flush block: Remove the control of complete cpu from bio. block: fix a typo in the blk-cgroup.h file block: initialize the bounce pool if high memory may be added later block: fix request_queue lifetime handling by making blk_queue_cleanup() properly shutdown block: drop @tsk from attempt_plug_merge() and explain sync rules block: make get_request[_wait]() fail if queue is dead block: reorganize throtl_get_tg() and blk_throtl_bio() block: reorganize queue draining block: drop unnecessary blk_get/put_queue() in scsi_cmd_ioctl() and blk_get_tg() block: pass around REQ_* flags instead of broken down booleans during request alloc/free block: move blk_throtl prototypes to block/blk.h block: fix genhd refcounting in blkio_policy_parse_and_set() ... Fix up trivial conflicts due to "mddev_t" -> "struct mddev" conversion and making the request functions be of type "void" instead of "int" in - drivers/md/{faulty.c,linear.c,md.c,md.h,multipath.c,raid0.c,raid1.c,raid10.c,raid5.c} - drivers/staging/zram/zram_drv.c
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6b6616a41baa..4720f68f817e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -192,9 +192,6 @@ struct mapped_device {
192 /* forced geometry settings */ 192 /* forced geometry settings */
193 struct hd_geometry geometry; 193 struct hd_geometry geometry;
194 194
195 /* For saving the address of __make_request for request based dm */
196 make_request_fn *saved_make_request_fn;
197
198 /* sysfs handle */ 195 /* sysfs handle */
199 struct kobject kobj; 196 struct kobject kobj;
200 197
@@ -1403,7 +1400,7 @@ out:
1403 * The request function that just remaps the bio built up by 1400 * The request function that just remaps the bio built up by
1404 * dm_merge_bvec. 1401 * dm_merge_bvec.
1405 */ 1402 */
1406static int _dm_request(struct request_queue *q, struct bio *bio) 1403static void _dm_request(struct request_queue *q, struct bio *bio)
1407{ 1404{
1408 int rw = bio_data_dir(bio); 1405 int rw = bio_data_dir(bio);
1409 struct mapped_device *md = q->queuedata; 1406 struct mapped_device *md = q->queuedata;
@@ -1424,19 +1421,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1424 queue_io(md, bio); 1421 queue_io(md, bio);
1425 else 1422 else
1426 bio_io_error(bio); 1423 bio_io_error(bio);
1427 return 0; 1424 return;
1428 } 1425 }
1429 1426
1430 __split_and_process_bio(md, bio); 1427 __split_and_process_bio(md, bio);
1431 up_read(&md->io_lock); 1428 up_read(&md->io_lock);
1432 return 0; 1429 return;
1433}
1434
1435static int dm_make_request(struct request_queue *q, struct bio *bio)
1436{
1437 struct mapped_device *md = q->queuedata;
1438
1439 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1440} 1430}
1441 1431
1442static int dm_request_based(struct mapped_device *md) 1432static int dm_request_based(struct mapped_device *md)
@@ -1444,14 +1434,14 @@ static int dm_request_based(struct mapped_device *md)
1444 return blk_queue_stackable(md->queue); 1434 return blk_queue_stackable(md->queue);
1445} 1435}
1446 1436
1447static int dm_request(struct request_queue *q, struct bio *bio) 1437static void dm_request(struct request_queue *q, struct bio *bio)
1448{ 1438{
1449 struct mapped_device *md = q->queuedata; 1439 struct mapped_device *md = q->queuedata;
1450 1440
1451 if (dm_request_based(md)) 1441 if (dm_request_based(md))
1452 return dm_make_request(q, bio); 1442 blk_queue_bio(q, bio);
1453 1443 else
1454 return _dm_request(q, bio); 1444 _dm_request(q, bio);
1455} 1445}
1456 1446
1457void dm_dispatch_request(struct request *rq) 1447void dm_dispatch_request(struct request *rq)
@@ -2191,7 +2181,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2191 return 0; 2181 return 0;
2192 2182
2193 md->queue = q; 2183 md->queue = q;
2194 md->saved_make_request_fn = md->queue->make_request_fn;
2195 dm_init_md_queue(md); 2184 dm_init_md_queue(md);
2196 blk_queue_softirq_done(md->queue, dm_softirq_done); 2185 blk_queue_softirq_done(md->queue, dm_softirq_done);
2197 blk_queue_prep_rq(md->queue, dm_prep_fn); 2186 blk_queue_prep_rq(md->queue, dm_prep_fn);