diff options
author | Tejun Heo <htejun@gmail.com> | 2005-08-05 16:28:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-08-05 16:43:16 -0400 |
commit | ba02508248e90a9d696aebd18b48a3290235b53c (patch) | |
tree | 4167aecae57a5ceab6392e54189a271f3dfdebf8 | |
parent | c7546f8f03f5a4fa612605b6be930234d6026860 (diff) |
[PATCH] blk: fix tag shrinking (revive real_max_size)
My patch in commit fa72b903f75e4f0f0b2c2feed093005167da4023 incorrectly
removed blk_queue_tag->real_max_depth.
The original resize implementation was incorrect in the following
points.
* actual allocation size of tag_index was shorter than real_max_size,
but assumed to be of the same size, possibly causing memory access
beyond the allocated area.
* bits in tag_map between max_deptn and real_max_depth were
initialized to 1's, making the tags permanently reserved.
In an attempt to fix above two bugs, I had removed allocation optimization
in init_tag_map and real_max_size. Tag map/index were allocated and freed
immediately during resize.
Unfortunately, I wasn't considering that tag map/index can be resized
dynamically with tags beyond new_depth active. This led to accessing
freed area after shrinking tags and led to the following bug reporting
thread on linux-scsi.
http://marc.theaimsgroup.com/?l=linux-scsi&m=112319898111885&w=2
To fix the problem, I've revived real_max_depth without allocation
optimization in init_tag_map, and Andrew Vasquez confirmed that the
problem was fixed. As Jens is not going to be available for a week, he
asked me to make sure that this patch reaches you.
http://marc.theaimsgroup.com/?l=linux-scsi&m=112325778530886&w=2
Also, a comment was added to make sure that real_max_size is needed for
dynamic shrinking.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/block/ll_rw_blk.c | 18 | ||||
-rw-r--r-- | include/linux/blkdev.h | 1 |
2 files changed, 16 insertions, 3 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 692a5fced76e..3c818544475e 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -719,7 +719,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) | |||
719 | { | 719 | { |
720 | struct blk_queue_tag *bqt = q->queue_tags; | 720 | struct blk_queue_tag *bqt = q->queue_tags; |
721 | 721 | ||
722 | if (unlikely(bqt == NULL || tag >= bqt->max_depth)) | 722 | if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) |
723 | return NULL; | 723 | return NULL; |
724 | 724 | ||
725 | return bqt->tag_index[tag]; | 725 | return bqt->tag_index[tag]; |
@@ -798,6 +798,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) | |||
798 | 798 | ||
799 | memset(tag_index, 0, depth * sizeof(struct request *)); | 799 | memset(tag_index, 0, depth * sizeof(struct request *)); |
800 | memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); | 800 | memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); |
801 | tags->real_max_depth = depth; | ||
801 | tags->max_depth = depth; | 802 | tags->max_depth = depth; |
802 | tags->tag_index = tag_index; | 803 | tags->tag_index = tag_index; |
803 | tags->tag_map = tag_map; | 804 | tags->tag_map = tag_map; |
@@ -872,11 +873,22 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) | |||
872 | return -ENXIO; | 873 | return -ENXIO; |
873 | 874 | ||
874 | /* | 875 | /* |
876 | * if we already have large enough real_max_depth. just | ||
877 | * adjust max_depth. *NOTE* as requests with tag value | ||
878 | * between new_depth and real_max_depth can be in-flight, tag | ||
879 | * map can not be shrunk blindly here. | ||
880 | */ | ||
881 | if (new_depth <= bqt->real_max_depth) { | ||
882 | bqt->max_depth = new_depth; | ||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | /* | ||
875 | * save the old state info, so we can copy it back | 887 | * save the old state info, so we can copy it back |
876 | */ | 888 | */ |
877 | tag_index = bqt->tag_index; | 889 | tag_index = bqt->tag_index; |
878 | tag_map = bqt->tag_map; | 890 | tag_map = bqt->tag_map; |
879 | max_depth = bqt->max_depth; | 891 | max_depth = bqt->real_max_depth; |
880 | 892 | ||
881 | if (init_tag_map(q, bqt, new_depth)) | 893 | if (init_tag_map(q, bqt, new_depth)) |
882 | return -ENOMEM; | 894 | return -ENOMEM; |
@@ -913,7 +925,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) | |||
913 | 925 | ||
914 | BUG_ON(tag == -1); | 926 | BUG_ON(tag == -1); |
915 | 927 | ||
916 | if (unlikely(tag >= bqt->max_depth)) | 928 | if (unlikely(tag >= bqt->real_max_depth)) |
917 | /* | 929 | /* |
918 | * This can happen after tag depth has been reduced. | 930 | * This can happen after tag depth has been reduced. |
919 | * FIXME: how about a warning or info message here? | 931 | * FIXME: how about a warning or info message here? |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0881b5cdee3d..19bd8e7e11bf 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -301,6 +301,7 @@ struct blk_queue_tag { | |||
301 | struct list_head busy_list; /* fifo list of busy tags */ | 301 | struct list_head busy_list; /* fifo list of busy tags */ |
302 | int busy; /* current depth */ | 302 | int busy; /* current depth */ |
303 | int max_depth; /* what we will send to device */ | 303 | int max_depth; /* what we will send to device */ |
304 | int real_max_depth; /* what the array can hold */ | ||
304 | atomic_t refcnt; /* map can be shared */ | 305 | atomic_t refcnt; /* map can be shared */ |
305 | }; | 306 | }; |
306 | 307 | ||