diff options
author | James Bottomley <James.Bottomley@steeleye.com> | 2006-08-30 15:48:45 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-08-31 11:17:18 -0400 |
commit | 492dfb489658dfe4a755fa29dd0e34e9c8bd8fb8 (patch) | |
tree | 1ad52b1ce98743729c7cacce0c602e22d3a5d076 /block | |
parent | f19eaa7f53736449a6eac89c3863eca2c64d5913 (diff) |
[SCSI] block: add support for shared tag maps
The current block queue implementation already contains most of the
machinery for shared tag maps. The only remaining pieces are a way to
allocate and destroy a tag map independently of the queues (so that
the maps can be managed on the life cycle of the overseeing entity)
Acked-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 109 |
1 files changed, 88 insertions, 21 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index ddd9253f9d55..556a3d354eab 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -848,21 +848,18 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) | |||
848 | EXPORT_SYMBOL(blk_queue_find_tag); | 848 | EXPORT_SYMBOL(blk_queue_find_tag); |
849 | 849 | ||
850 | /** | 850 | /** |
851 | * __blk_queue_free_tags - release tag maintenance info | 851 | * __blk_free_tags - release a given set of tag maintenance info |
852 | * @q: the request queue for the device | 852 | * @bqt: the tag map to free |
853 | * | 853 | * |
854 | * Notes: | 854 | * Tries to free the specified @bqt@. Returns true if it was |
855 | * blk_cleanup_queue() will take care of calling this function, if tagging | 855 | * actually freed and false if there are still references using it |
856 | * has been used. So there's no need to call this directly. | 856 | */ |
857 | **/ | 857 | static int __blk_free_tags(struct blk_queue_tag *bqt) |
858 | static void __blk_queue_free_tags(request_queue_t *q) | ||
859 | { | 858 | { |
860 | struct blk_queue_tag *bqt = q->queue_tags; | 859 | int retval; |
861 | |||
862 | if (!bqt) | ||
863 | return; | ||
864 | 860 | ||
865 | if (atomic_dec_and_test(&bqt->refcnt)) { | 861 | retval = atomic_dec_and_test(&bqt->refcnt); |
862 | if (retval) { | ||
866 | BUG_ON(bqt->busy); | 863 | BUG_ON(bqt->busy); |
867 | BUG_ON(!list_empty(&bqt->busy_list)); | 864 | BUG_ON(!list_empty(&bqt->busy_list)); |
868 | 865 | ||
@@ -873,12 +870,49 @@ static void __blk_queue_free_tags(request_queue_t *q) | |||
873 | bqt->tag_map = NULL; | 870 | bqt->tag_map = NULL; |
874 | 871 | ||
875 | kfree(bqt); | 872 | kfree(bqt); |
873 | |||
876 | } | 874 | } |
877 | 875 | ||
876 | return retval; | ||
877 | } | ||
878 | |||
879 | /** | ||
880 | * __blk_queue_free_tags - release tag maintenance info | ||
881 | * @q: the request queue for the device | ||
882 | * | ||
883 | * Notes: | ||
884 | * blk_cleanup_queue() will take care of calling this function, if tagging | ||
885 | * has been used. So there's no need to call this directly. | ||
886 | **/ | ||
887 | static void __blk_queue_free_tags(request_queue_t *q) | ||
888 | { | ||
889 | struct blk_queue_tag *bqt = q->queue_tags; | ||
890 | |||
891 | if (!bqt) | ||
892 | return; | ||
893 | |||
894 | __blk_free_tags(bqt); | ||
895 | |||
878 | q->queue_tags = NULL; | 896 | q->queue_tags = NULL; |
879 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | 897 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); |
880 | } | 898 | } |
881 | 899 | ||
900 | |||
901 | /** | ||
902 | * blk_free_tags - release a given set of tag maintenance info | ||
903 | * @bqt: the tag map to free | ||
904 | * | ||
905 | * For externally managed @bqt@ frees the map. Callers of this | ||
906 | * function must guarantee to have released all the queues that | ||
907 | * might have been using this tag map. | ||
908 | */ | ||
909 | void blk_free_tags(struct blk_queue_tag *bqt) | ||
910 | { | ||
911 | if (unlikely(!__blk_free_tags(bqt))) | ||
912 | BUG(); | ||
913 | } | ||
914 | EXPORT_SYMBOL(blk_free_tags); | ||
915 | |||
882 | /** | 916 | /** |
883 | * blk_queue_free_tags - release tag maintenance info | 917 | * blk_queue_free_tags - release tag maintenance info |
884 | * @q: the request queue for the device | 918 | * @q: the request queue for the device |
@@ -901,7 +935,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) | |||
901 | unsigned long *tag_map; | 935 | unsigned long *tag_map; |
902 | int nr_ulongs; | 936 | int nr_ulongs; |
903 | 937 | ||
904 | if (depth > q->nr_requests * 2) { | 938 | if (q && depth > q->nr_requests * 2) { |
905 | depth = q->nr_requests * 2; | 939 | depth = q->nr_requests * 2; |
906 | printk(KERN_ERR "%s: adjusted depth to %d\n", | 940 | printk(KERN_ERR "%s: adjusted depth to %d\n", |
907 | __FUNCTION__, depth); | 941 | __FUNCTION__, depth); |
@@ -927,6 +961,38 @@ fail: | |||
927 | return -ENOMEM; | 961 | return -ENOMEM; |
928 | } | 962 | } |
929 | 963 | ||
964 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | ||
965 | int depth) | ||
966 | { | ||
967 | struct blk_queue_tag *tags; | ||
968 | |||
969 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | ||
970 | if (!tags) | ||
971 | goto fail; | ||
972 | |||
973 | if (init_tag_map(q, tags, depth)) | ||
974 | goto fail; | ||
975 | |||
976 | INIT_LIST_HEAD(&tags->busy_list); | ||
977 | tags->busy = 0; | ||
978 | atomic_set(&tags->refcnt, 1); | ||
979 | return tags; | ||
980 | fail: | ||
981 | kfree(tags); | ||
982 | return NULL; | ||
983 | } | ||
984 | |||
985 | /** | ||
986 | * blk_init_tags - initialize the tag info for an external tag map | ||
987 | * @depth: the maximum queue depth supported | ||
988 | * @tags: the tag to use | ||
989 | **/ | ||
990 | struct blk_queue_tag *blk_init_tags(int depth) | ||
991 | { | ||
992 | return __blk_queue_init_tags(NULL, depth); | ||
993 | } | ||
994 | EXPORT_SYMBOL(blk_init_tags); | ||
995 | |||
930 | /** | 996 | /** |
931 | * blk_queue_init_tags - initialize the queue tag info | 997 | * blk_queue_init_tags - initialize the queue tag info |
932 | * @q: the request queue for the device | 998 | * @q: the request queue for the device |
@@ -941,16 +1007,10 @@ int blk_queue_init_tags(request_queue_t *q, int depth, | |||
941 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | 1007 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); |
942 | 1008 | ||
943 | if (!tags && !q->queue_tags) { | 1009 | if (!tags && !q->queue_tags) { |
944 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | 1010 | tags = __blk_queue_init_tags(q, depth); |
945 | if (!tags) | ||
946 | goto fail; | ||
947 | 1011 | ||
948 | if (init_tag_map(q, tags, depth)) | 1012 | if (!tags) |
949 | goto fail; | 1013 | goto fail; |
950 | |||
951 | INIT_LIST_HEAD(&tags->busy_list); | ||
952 | tags->busy = 0; | ||
953 | atomic_set(&tags->refcnt, 1); | ||
954 | } else if (q->queue_tags) { | 1014 | } else if (q->queue_tags) { |
955 | if ((rc = blk_queue_resize_tags(q, depth))) | 1015 | if ((rc = blk_queue_resize_tags(q, depth))) |
956 | return rc; | 1016 | return rc; |
@@ -1002,6 +1062,13 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) | |||
1002 | } | 1062 | } |
1003 | 1063 | ||
1004 | /* | 1064 | /* |
1065 | * Currently cannot replace a shared tag map with a new | ||
1066 | * one, so error out if this is the case | ||
1067 | */ | ||
1068 | if (atomic_read(&bqt->refcnt) != 1) | ||
1069 | return -EBUSY; | ||
1070 | |||
1071 | /* | ||
1005 | * save the old state info, so we can copy it back | 1072 | * save the old state info, so we can copy it back |
1006 | */ | 1073 | */ |
1007 | tag_index = bqt->tag_index; | 1074 | tag_index = bqt->tag_index; |