diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 06:29:34 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 06:29:34 -0400 |
commit | 3eed13fd933dbb81db12f7cdec6de9268c4443b5 (patch) | |
tree | c16913706acffd4a0b29ec12cd68906b708c9a8a /block | |
parent | a39d113936370ba524fa9e34d6954c3625c8aa64 (diff) | |
parent | 2c941a204070ab32d92d40318a3196a7fb994c00 (diff) |
Merge branch 'sglist-arch' into for-linus
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 4df7d027eb06..527bd8d4db50 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/cpu.h> | 30 | #include <linux/cpu.h> |
31 | #include <linux/blktrace_api.h> | 31 | #include <linux/blktrace_api.h> |
32 | #include <linux/fault-inject.h> | 32 | #include <linux/fault-inject.h> |
33 | #include <linux/scatterlist.h> | ||
33 | 34 | ||
34 | /* | 35 | /* |
35 | * for max sense size | 36 | * for max sense size |
@@ -1318,9 +1319,10 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | |||
1318 | * must make sure sg can hold rq->nr_phys_segments entries | 1319 | * must make sure sg can hold rq->nr_phys_segments entries |
1319 | */ | 1320 | */ |
1320 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | 1321 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
1321 | struct scatterlist *sg) | 1322 | struct scatterlist *sglist) |
1322 | { | 1323 | { |
1323 | struct bio_vec *bvec, *bvprv; | 1324 | struct bio_vec *bvec, *bvprv; |
1325 | struct scatterlist *next_sg, *sg; | ||
1324 | struct req_iterator iter; | 1326 | struct req_iterator iter; |
1325 | int nsegs, cluster; | 1327 | int nsegs, cluster; |
1326 | 1328 | ||
@@ -1331,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
1331 | * for each bio in rq | 1333 | * for each bio in rq |
1332 | */ | 1334 | */ |
1333 | bvprv = NULL; | 1335 | bvprv = NULL; |
1336 | sg = next_sg = &sglist[0]; | ||
1334 | rq_for_each_segment(bvec, rq, iter) { | 1337 | rq_for_each_segment(bvec, rq, iter) { |
1335 | int nbytes = bvec->bv_len; | 1338 | int nbytes = bvec->bv_len; |
1336 | 1339 | ||
1337 | if (bvprv && cluster) { | 1340 | if (bvprv && cluster) { |
1338 | if (sg[nsegs - 1].length + nbytes > q->max_segment_size) | 1341 | if (sg->length + nbytes > q->max_segment_size) |
1339 | goto new_segment; | 1342 | goto new_segment; |
1340 | 1343 | ||
1341 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | 1344 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
@@ -1343,14 +1346,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
1343 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | 1346 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
1344 | goto new_segment; | 1347 | goto new_segment; |
1345 | 1348 | ||
1346 | sg[nsegs - 1].length += nbytes; | 1349 | sg->length += nbytes; |
1347 | } else { | 1350 | } else { |
1348 | new_segment: | 1351 | new_segment: |
1349 | memset(&sg[nsegs],0,sizeof(struct scatterlist)); | 1352 | sg = next_sg; |
1350 | sg[nsegs].page = bvec->bv_page; | 1353 | next_sg = sg_next(sg); |
1351 | sg[nsegs].length = nbytes; | ||
1352 | sg[nsegs].offset = bvec->bv_offset; | ||
1353 | 1354 | ||
1355 | sg->page = bvec->bv_page; | ||
1356 | sg->length = nbytes; | ||
1357 | sg->offset = bvec->bv_offset; | ||
1354 | nsegs++; | 1358 | nsegs++; |
1355 | } | 1359 | } |
1356 | bvprv = bvec; | 1360 | bvprv = bvec; |
@@ -4068,7 +4072,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |||
4068 | return queue_var_show(max_hw_sectors_kb, (page)); | 4072 | return queue_var_show(max_hw_sectors_kb, (page)); |
4069 | } | 4073 | } |
4070 | 4074 | ||
4075 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) | ||
4076 | { | ||
4077 | return queue_var_show(q->max_phys_segments, page); | ||
4078 | } | ||
4079 | |||
4080 | static ssize_t queue_max_segments_store(struct request_queue *q, | ||
4081 | const char *page, size_t count) | ||
4082 | { | ||
4083 | unsigned long segments; | ||
4084 | ssize_t ret = queue_var_store(&segments, page, count); | ||
4071 | 4085 | ||
4086 | spin_lock_irq(q->queue_lock); | ||
4087 | q->max_phys_segments = segments; | ||
4088 | spin_unlock_irq(q->queue_lock); | ||
4089 | |||
4090 | return ret; | ||
4091 | } | ||
4072 | static struct queue_sysfs_entry queue_requests_entry = { | 4092 | static struct queue_sysfs_entry queue_requests_entry = { |
4073 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | 4093 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
4074 | .show = queue_requests_show, | 4094 | .show = queue_requests_show, |
@@ -4092,6 +4112,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |||
4092 | .show = queue_max_hw_sectors_show, | 4112 | .show = queue_max_hw_sectors_show, |
4093 | }; | 4113 | }; |
4094 | 4114 | ||
4115 | static struct queue_sysfs_entry queue_max_segments_entry = { | ||
4116 | .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR }, | ||
4117 | .show = queue_max_segments_show, | ||
4118 | .store = queue_max_segments_store, | ||
4119 | }; | ||
4120 | |||
4095 | static struct queue_sysfs_entry queue_iosched_entry = { | 4121 | static struct queue_sysfs_entry queue_iosched_entry = { |
4096 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | 4122 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, |
4097 | .show = elv_iosched_show, | 4123 | .show = elv_iosched_show, |
@@ -4103,6 +4129,7 @@ static struct attribute *default_attrs[] = { | |||
4103 | &queue_ra_entry.attr, | 4129 | &queue_ra_entry.attr, |
4104 | &queue_max_hw_sectors_entry.attr, | 4130 | &queue_max_hw_sectors_entry.attr, |
4105 | &queue_max_sectors_entry.attr, | 4131 | &queue_max_sectors_entry.attr, |
4132 | &queue_max_segments_entry.attr, | ||
4106 | &queue_iosched_entry.attr, | 4133 | &queue_iosched_entry.attr, |
4107 | NULL, | 4134 | NULL, |
4108 | }; | 4135 | }; |