diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-01-23 10:24:41 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-07-16 02:52:45 -0400 |
commit | 5309cb38de65eddd5f7e125da750accf949f29e8 (patch) | |
tree | 911ee4e9d2c47ccd682505c8eb7d9e39ec889f8a | |
parent | 3862153b673516b2efa0447b9b3778f47ac8f8c8 (diff) |
Add queue resizing support
Just get rid of the preallocated command map, use the slab cache
to get/free commands instead.
Original patch from FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>,
changed by me to not use a mempool.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/bsg.c | 96 |
1 files changed, 32 insertions, 64 deletions
diff --git a/block/bsg.c b/block/bsg.c index 9d77a0c72457..c56618ae54c3 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -33,8 +33,6 @@ | |||
33 | 33 | ||
34 | static char bsg_version[] = "block layer sg (bsg) 0.4"; | 34 | static char bsg_version[] = "block layer sg (bsg) 0.4"; |
35 | 35 | ||
36 | struct bsg_command; | ||
37 | |||
38 | struct bsg_device { | 36 | struct bsg_device { |
39 | struct gendisk *disk; | 37 | struct gendisk *disk; |
40 | request_queue_t *queue; | 38 | request_queue_t *queue; |
@@ -46,8 +44,6 @@ struct bsg_device { | |||
46 | int minor; | 44 | int minor; |
47 | int queued_cmds; | 45 | int queued_cmds; |
48 | int done_cmds; | 46 | int done_cmds; |
49 | unsigned long *cmd_bitmap; | ||
50 | struct bsg_command *cmd_map; | ||
51 | wait_queue_head_t wq_done; | 47 | wait_queue_head_t wq_done; |
52 | wait_queue_head_t wq_free; | 48 | wait_queue_head_t wq_free; |
53 | char name[BDEVNAME_SIZE]; | 49 | char name[BDEVNAME_SIZE]; |
@@ -60,14 +56,7 @@ enum { | |||
60 | BSG_F_WRITE_PERM = 2, | 56 | BSG_F_WRITE_PERM = 2, |
61 | }; | 57 | }; |
62 | 58 | ||
63 | /* | 59 | #define BSG_DEFAULT_CMDS 64 |
64 | * command allocation bitmap defines | ||
65 | */ | ||
66 | #define BSG_CMDS_PAGE_ORDER (1) | ||
67 | #define BSG_CMDS_PER_LONG (sizeof(unsigned long) * 8) | ||
68 | #define BSG_CMDS_MASK (BSG_CMDS_PER_LONG - 1) | ||
69 | #define BSG_CMDS_BYTES (PAGE_SIZE * (1 << BSG_CMDS_PAGE_ORDER)) | ||
70 | #define BSG_CMDS (BSG_CMDS_BYTES / sizeof(struct bsg_command)) | ||
71 | 60 | ||
72 | #undef BSG_DEBUG | 61 | #undef BSG_DEBUG |
73 | 62 | ||
@@ -94,6 +83,8 @@ static struct hlist_head bsg_device_list[BSG_LIST_SIZE]; | |||
94 | static struct class *bsg_class; | 83 | static struct class *bsg_class; |
95 | static LIST_HEAD(bsg_class_list); | 84 | static LIST_HEAD(bsg_class_list); |
96 | 85 | ||
86 | static struct kmem_cache *bsg_cmd_cachep; | ||
87 | |||
97 | /* | 88 | /* |
98 | * our internal command type | 89 | * our internal command type |
99 | */ | 90 | */ |
@@ -111,14 +102,12 @@ struct bsg_command { | |||
111 | static void bsg_free_command(struct bsg_command *bc) | 102 | static void bsg_free_command(struct bsg_command *bc) |
112 | { | 103 | { |
113 | struct bsg_device *bd = bc->bd; | 104 | struct bsg_device *bd = bc->bd; |
114 | unsigned long bitnr = bc - bd->cmd_map; | ||
115 | unsigned long flags; | 105 | unsigned long flags; |
116 | 106 | ||
117 | dprintk("%s: command bit offset %lu\n", bd->name, bitnr); | 107 | kmem_cache_free(bsg_cmd_cachep, bc); |
118 | 108 | ||
119 | spin_lock_irqsave(&bd->lock, flags); | 109 | spin_lock_irqsave(&bd->lock, flags); |
120 | bd->queued_cmds--; | 110 | bd->queued_cmds--; |
121 | __clear_bit(bitnr, bd->cmd_bitmap); | ||
122 | spin_unlock_irqrestore(&bd->lock, flags); | 111 | spin_unlock_irqrestore(&bd->lock, flags); |
123 | 112 | ||
124 | wake_up(&bd->wq_free); | 113 | wake_up(&bd->wq_free); |
@@ -127,32 +116,29 @@ static void bsg_free_command(struct bsg_command *bc) | |||
127 | static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd) | 116 | static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd) |
128 | { | 117 | { |
129 | struct bsg_command *bc = NULL; | 118 | struct bsg_command *bc = NULL; |
130 | unsigned long *map; | ||
131 | int free_nr; | ||
132 | 119 | ||
133 | spin_lock_irq(&bd->lock); | 120 | spin_lock_irq(&bd->lock); |
134 | 121 | ||
135 | if (bd->queued_cmds >= bd->max_queue) | 122 | if (bd->queued_cmds >= bd->max_queue) |
136 | goto out; | 123 | goto out; |
137 | 124 | ||
138 | for (free_nr = 0, map = bd->cmd_bitmap; *map == ~0UL; map++) | ||
139 | free_nr += BSG_CMDS_PER_LONG; | ||
140 | |||
141 | BUG_ON(*map == ~0UL); | ||
142 | |||
143 | bd->queued_cmds++; | 125 | bd->queued_cmds++; |
144 | free_nr += ffz(*map); | ||
145 | __set_bit(free_nr, bd->cmd_bitmap); | ||
146 | spin_unlock_irq(&bd->lock); | 126 | spin_unlock_irq(&bd->lock); |
147 | 127 | ||
148 | bc = bd->cmd_map + free_nr; | 128 | bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER); |
129 | if (unlikely(!bc)) { | ||
130 | spin_lock_irq(&bd->lock); | ||
131 | goto alloc_fail; | ||
132 | } | ||
133 | |||
149 | memset(bc, 0, sizeof(*bc)); | 134 | memset(bc, 0, sizeof(*bc)); |
150 | bc->bd = bd; | 135 | bc->bd = bd; |
151 | INIT_LIST_HEAD(&bc->list); | 136 | INIT_LIST_HEAD(&bc->list); |
152 | dprintk("%s: returning free cmd %p (bit %d)\n", bd->name, bc, free_nr); | 137 | dprintk("%s: returning free cmd %p\n", bd->name, bc); |
153 | return bc; | 138 | return bc; |
139 | alloc_fail: | ||
140 | bd->queued_cmds--; | ||
154 | out: | 141 | out: |
155 | dprintk("%s: failed (depth %d)\n", bd->name, bd->queued_cmds); | ||
156 | spin_unlock_irq(&bd->lock); | 142 | spin_unlock_irq(&bd->lock); |
157 | return bc; | 143 | return bc; |
158 | } | 144 | } |
@@ -356,8 +342,8 @@ static void bsg_rq_end_io(struct request *rq, int uptodate) | |||
356 | struct bsg_device *bd = bc->bd; | 342 | struct bsg_device *bd = bc->bd; |
357 | unsigned long flags; | 343 | unsigned long flags; |
358 | 344 | ||
359 | dprintk("%s: finished rq %p bc %p, bio %p offset %Zd stat %d\n", | 345 | dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", |
360 | bd->name, rq, bc, bc->bio, bc - bd->cmd_map, uptodate); | 346 | bd->name, rq, bc, bc->bio, uptodate); |
361 | 347 | ||
362 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); | 348 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); |
363 | 349 | ||
@@ -703,21 +689,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | |||
703 | return bytes_read; | 689 | return bytes_read; |
704 | } | 690 | } |
705 | 691 | ||
706 | static void bsg_free_device(struct bsg_device *bd) | ||
707 | { | ||
708 | if (bd->cmd_map) | ||
709 | free_pages((unsigned long) bd->cmd_map, BSG_CMDS_PAGE_ORDER); | ||
710 | |||
711 | kfree(bd->cmd_bitmap); | ||
712 | kfree(bd); | ||
713 | } | ||
714 | |||
715 | static struct bsg_device *bsg_alloc_device(void) | 692 | static struct bsg_device *bsg_alloc_device(void) |
716 | { | 693 | { |
717 | struct bsg_command *cmd_map; | ||
718 | unsigned long *cmd_bitmap; | ||
719 | struct bsg_device *bd; | 694 | struct bsg_device *bd; |
720 | int bits; | ||
721 | 695 | ||
722 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); | 696 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); |
723 | if (unlikely(!bd)) | 697 | if (unlikely(!bd)) |
@@ -725,19 +699,7 @@ static struct bsg_device *bsg_alloc_device(void) | |||
725 | 699 | ||
726 | spin_lock_init(&bd->lock); | 700 | spin_lock_init(&bd->lock); |
727 | 701 | ||
728 | bd->max_queue = BSG_CMDS; | 702 | bd->max_queue = BSG_DEFAULT_CMDS; |
729 | |||
730 | bits = (BSG_CMDS / BSG_CMDS_PER_LONG) + 1; | ||
731 | cmd_bitmap = kzalloc(bits * sizeof(unsigned long), GFP_KERNEL); | ||
732 | if (!cmd_bitmap) | ||
733 | goto out_free_bd; | ||
734 | bd->cmd_bitmap = cmd_bitmap; | ||
735 | |||
736 | cmd_map = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
737 | BSG_CMDS_PAGE_ORDER); | ||
738 | if (!cmd_map) | ||
739 | goto out_free_bitmap; | ||
740 | bd->cmd_map = cmd_map; | ||
741 | 703 | ||
742 | INIT_LIST_HEAD(&bd->busy_list); | 704 | INIT_LIST_HEAD(&bd->busy_list); |
743 | INIT_LIST_HEAD(&bd->done_list); | 705 | INIT_LIST_HEAD(&bd->done_list); |
@@ -746,12 +708,6 @@ static struct bsg_device *bsg_alloc_device(void) | |||
746 | init_waitqueue_head(&bd->wq_free); | 708 | init_waitqueue_head(&bd->wq_free); |
747 | init_waitqueue_head(&bd->wq_done); | 709 | init_waitqueue_head(&bd->wq_done); |
748 | return bd; | 710 | return bd; |
749 | |||
750 | out_free_bitmap: | ||
751 | kfree(cmd_bitmap); | ||
752 | out_free_bd: | ||
753 | kfree(bd); | ||
754 | return NULL; | ||
755 | } | 711 | } |
756 | 712 | ||
757 | static int bsg_put_device(struct bsg_device *bd) | 713 | static int bsg_put_device(struct bsg_device *bd) |
@@ -779,7 +735,7 @@ static int bsg_put_device(struct bsg_device *bd) | |||
779 | 735 | ||
780 | blk_put_queue(bd->queue); | 736 | blk_put_queue(bd->queue); |
781 | hlist_del(&bd->dev_list); | 737 | hlist_del(&bd->dev_list); |
782 | bsg_free_device(bd); | 738 | kfree(bd); |
783 | out: | 739 | out: |
784 | mutex_unlock(&bsg_mutex); | 740 | mutex_unlock(&bsg_mutex); |
785 | return ret; | 741 | return ret; |
@@ -918,15 +874,17 @@ bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |||
918 | */ | 874 | */ |
919 | case SG_GET_COMMAND_Q: | 875 | case SG_GET_COMMAND_Q: |
920 | return put_user(bd->max_queue, uarg); | 876 | return put_user(bd->max_queue, uarg); |
921 | case SG_SET_COMMAND_Q: { | 877 | case SG_SET_COMMAND_Q: { |
922 | int queue; | 878 | int queue; |
923 | 879 | ||
924 | if (get_user(queue, uarg)) | 880 | if (get_user(queue, uarg)) |
925 | return -EFAULT; | 881 | return -EFAULT; |
926 | if (queue > BSG_CMDS || queue < 1) | 882 | if (queue < 1) |
927 | return -EINVAL; | 883 | return -EINVAL; |
928 | 884 | ||
885 | spin_lock_irq(&bd->lock); | ||
929 | bd->max_queue = queue; | 886 | bd->max_queue = queue; |
887 | spin_unlock_irq(&bd->lock); | ||
930 | return 0; | 888 | return 0; |
931 | } | 889 | } |
932 | 890 | ||
@@ -1035,15 +993,25 @@ static int __init bsg_init(void) | |||
1035 | { | 993 | { |
1036 | int ret, i; | 994 | int ret, i; |
1037 | 995 | ||
996 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", | ||
997 | sizeof(struct bsg_command), 0, 0, NULL, NULL); | ||
998 | if (!bsg_cmd_cachep) { | ||
999 | printk(KERN_ERR "bsg: failed creating slab cache\n"); | ||
1000 | return -ENOMEM; | ||
1001 | } | ||
1002 | |||
1038 | for (i = 0; i < BSG_LIST_SIZE; i++) | 1003 | for (i = 0; i < BSG_LIST_SIZE; i++) |
1039 | INIT_HLIST_HEAD(&bsg_device_list[i]); | 1004 | INIT_HLIST_HEAD(&bsg_device_list[i]); |
1040 | 1005 | ||
1041 | bsg_class = class_create(THIS_MODULE, "bsg"); | 1006 | bsg_class = class_create(THIS_MODULE, "bsg"); |
1042 | if (IS_ERR(bsg_class)) | 1007 | if (IS_ERR(bsg_class)) { |
1008 | kmem_cache_destroy(bsg_cmd_cachep); | ||
1043 | return PTR_ERR(bsg_class); | 1009 | return PTR_ERR(bsg_class); |
1010 | } | ||
1044 | 1011 | ||
1045 | ret = register_chrdev(BSG_MAJOR, "bsg", &bsg_fops); | 1012 | ret = register_chrdev(BSG_MAJOR, "bsg", &bsg_fops); |
1046 | if (ret) { | 1013 | if (ret) { |
1014 | kmem_cache_destroy(bsg_cmd_cachep); | ||
1047 | class_destroy(bsg_class); | 1015 | class_destroy(bsg_class); |
1048 | return ret; | 1016 | return ret; |
1049 | } | 1017 | } |