aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2005-06-23 03:08:49 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-23 12:45:15 -0400
commitfa72b903f75e4f0f0b2c2feed093005167da4023 (patch)
tree12087e87fb8d41d10013946e5b2c91e57265c29e /drivers
parent2bf0fdad51c6710bf15d0bf4b9b30b8498fe4ddd (diff)
[PATCH] blk: remove blk_queue_tag->real_max_depth optimization
blk_queue_tag->real_max_depth was used to optimize out unnecessary allocations/frees on tag resize. However, the whole thing was very broken - tag_map was never allocated to real_max_depth resulting in access beyond the end of the map, bits in [max_depth..real_max_depth] were set when initializing a map and copied when resizing resulting in pre-occupied tags. As the gain of the optimization is very small, well, almost nill, remove the whole thing. Signed-off-by: Tejun Heo <htejun@gmail.com> Acked-by: Jens Axboe <axboe@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/ll_rw_blk.c35
1 files changed, 10 insertions, 25 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 808390c74200..896d17c28f42 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -717,7 +717,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
717{ 717{
718 struct blk_queue_tag *bqt = q->queue_tags; 718 struct blk_queue_tag *bqt = q->queue_tags;
719 719
720 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 720 if (unlikely(bqt == NULL || tag >= bqt->max_depth))
721 return NULL; 721 return NULL;
722 722
723 return bqt->tag_index[tag]; 723 return bqt->tag_index[tag];
@@ -775,9 +775,9 @@ EXPORT_SYMBOL(blk_queue_free_tags);
775static int 775static int
776init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) 776init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
777{ 777{
778 int bits, i;
779 struct request **tag_index; 778 struct request **tag_index;
780 unsigned long *tag_map; 779 unsigned long *tag_map;
780 int nr_ulongs;
781 781
782 if (depth > q->nr_requests * 2) { 782 if (depth > q->nr_requests * 2) {
783 depth = q->nr_requests * 2; 783 depth = q->nr_requests * 2;
@@ -789,24 +789,17 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
789 if (!tag_index) 789 if (!tag_index)
790 goto fail; 790 goto fail;
791 791
792 bits = (depth / BLK_TAGS_PER_LONG) + 1; 792 nr_ulongs = ALIGN(depth, BLK_TAGS_PER_LONG) / BLK_TAGS_PER_LONG;
793 tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC); 793 tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
794 if (!tag_map) 794 if (!tag_map)
795 goto fail; 795 goto fail;
796 796
797 memset(tag_index, 0, depth * sizeof(struct request *)); 797 memset(tag_index, 0, depth * sizeof(struct request *));
798 memset(tag_map, 0, bits * sizeof(unsigned long)); 798 memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
799 tags->max_depth = depth; 799 tags->max_depth = depth;
800 tags->real_max_depth = bits * BITS_PER_LONG;
801 tags->tag_index = tag_index; 800 tags->tag_index = tag_index;
802 tags->tag_map = tag_map; 801 tags->tag_map = tag_map;
803 802
804 /*
805 * set the upper bits if the depth isn't a multiple of the word size
806 */
807 for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
808 __set_bit(i, tag_map);
809
810 return 0; 803 return 0;
811fail: 804fail:
812 kfree(tag_index); 805 kfree(tag_index);
@@ -871,32 +864,24 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
871 struct blk_queue_tag *bqt = q->queue_tags; 864 struct blk_queue_tag *bqt = q->queue_tags;
872 struct request **tag_index; 865 struct request **tag_index;
873 unsigned long *tag_map; 866 unsigned long *tag_map;
874 int bits, max_depth; 867 int max_depth, nr_ulongs;
875 868
876 if (!bqt) 869 if (!bqt)
877 return -ENXIO; 870 return -ENXIO;
878 871
879 /* 872 /*
880 * don't bother sizing down
881 */
882 if (new_depth <= bqt->real_max_depth) {
883 bqt->max_depth = new_depth;
884 return 0;
885 }
886
887 /*
888 * save the old state info, so we can copy it back 873 * save the old state info, so we can copy it back
889 */ 874 */
890 tag_index = bqt->tag_index; 875 tag_index = bqt->tag_index;
891 tag_map = bqt->tag_map; 876 tag_map = bqt->tag_map;
892 max_depth = bqt->real_max_depth; 877 max_depth = bqt->max_depth;
893 878
894 if (init_tag_map(q, bqt, new_depth)) 879 if (init_tag_map(q, bqt, new_depth))
895 return -ENOMEM; 880 return -ENOMEM;
896 881
897 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); 882 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
898 bits = max_depth / BLK_TAGS_PER_LONG; 883 nr_ulongs = ALIGN(max_depth, BLK_TAGS_PER_LONG) / BLK_TAGS_PER_LONG;
899 memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long)); 884 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
900 885
901 kfree(tag_index); 886 kfree(tag_index);
902 kfree(tag_map); 887 kfree(tag_map);
@@ -926,7 +911,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
926 911
927 BUG_ON(tag == -1); 912 BUG_ON(tag == -1);
928 913
929 if (unlikely(tag >= bqt->real_max_depth)) 914 if (unlikely(tag >= bqt->max_depth))
930 return; 915 return;
931 916
932 if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) { 917 if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {