aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bsg.c102
1 files changed, 44 insertions, 58 deletions
diff --git a/block/bsg.c b/block/bsg.c
index 576933fe1860..26a9372962ce 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -33,7 +33,7 @@
33#include <scsi/scsi_driver.h> 33#include <scsi/scsi_driver.h>
34#include <scsi/sg.h> 34#include <scsi/sg.h>
35 35
36static char bsg_version[] = "block layer sg (bsg) 0.4"; 36const static char bsg_version[] = "block layer sg (bsg) 0.4";
37 37
38struct bsg_device { 38struct bsg_device {
39 request_queue_t *queue; 39 request_queue_t *queue;
@@ -68,8 +68,6 @@ enum {
68#define dprintk(fmt, args...) 68#define dprintk(fmt, args...)
69#endif 69#endif
70 70
71#define list_entry_bc(entry) list_entry((entry), struct bsg_command, list)
72
73/* 71/*
74 * just for testing 72 * just for testing
75 */ 73 */
@@ -78,9 +76,9 @@ enum {
78static DEFINE_MUTEX(bsg_mutex); 76static DEFINE_MUTEX(bsg_mutex);
79static int bsg_device_nr, bsg_minor_idx; 77static int bsg_device_nr, bsg_minor_idx;
80 78
81#define BSG_LIST_SIZE (8) 79#define BSG_LIST_ARRAY_SIZE 8
82#define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1)) 80#define bsg_list_idx(minor) ((minor) & (BSG_LIST_ARRAY_SIZE - 1))
83static struct hlist_head bsg_device_list[BSG_LIST_SIZE]; 81static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
84 82
85static struct class *bsg_class; 83static struct class *bsg_class;
86static LIST_HEAD(bsg_class_list); 84static LIST_HEAD(bsg_class_list);
@@ -128,7 +126,7 @@ static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
128 bd->queued_cmds++; 126 bd->queued_cmds++;
129 spin_unlock_irq(&bd->lock); 127 spin_unlock_irq(&bd->lock);
130 128
131 bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER); 129 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
132 if (unlikely(!bc)) { 130 if (unlikely(!bc)) {
133 spin_lock_irq(&bd->lock); 131 spin_lock_irq(&bd->lock);
134 bd->queued_cmds--; 132 bd->queued_cmds--;
@@ -136,7 +134,6 @@ static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
136 goto out; 134 goto out;
137 } 135 }
138 136
139 memset(bc, 0, sizeof(*bc));
140 bc->bd = bd; 137 bc->bd = bd;
141 INIT_LIST_HEAD(&bc->list); 138 INIT_LIST_HEAD(&bc->list);
142 dprintk("%s: returning free cmd %p\n", bd->name, bc); 139 dprintk("%s: returning free cmd %p\n", bd->name, bc);
@@ -147,21 +144,11 @@ out:
147} 144}
148 145
149static inline void 146static inline void
150bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
151{
152 bd->done_cmds--;
153 list_del(&bc->list);
154}
155
156static inline void
157bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc) 147bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
158{ 148{
159 bd->done_cmds++;
160 list_add_tail(&bc->list, &bd->done_list);
161 wake_up(&bd->wq_done);
162} 149}
163 150
164static inline int bsg_io_schedule(struct bsg_device *bd, int state) 151static int bsg_io_schedule(struct bsg_device *bd)
165{ 152{
166 DEFINE_WAIT(wait); 153 DEFINE_WAIT(wait);
167 int ret = 0; 154 int ret = 0;
@@ -186,14 +173,11 @@ static inline int bsg_io_schedule(struct bsg_device *bd, int state)
186 goto unlock; 173 goto unlock;
187 } 174 }
188 175
189 prepare_to_wait(&bd->wq_done, &wait, state); 176 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
190 spin_unlock_irq(&bd->lock); 177 spin_unlock_irq(&bd->lock);
191 io_schedule(); 178 io_schedule();
192 finish_wait(&bd->wq_done, &wait); 179 finish_wait(&bd->wq_done, &wait);
193 180
194 if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
195 ret = -ERESTARTSYS;
196
197 return ret; 181 return ret;
198unlock: 182unlock:
199 spin_unlock_irq(&bd->lock); 183 spin_unlock_irq(&bd->lock);
@@ -272,7 +256,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
272{ 256{
273 request_queue_t *q = bd->queue; 257 request_queue_t *q = bd->queue;
274 struct request *rq, *next_rq = NULL; 258 struct request *rq, *next_rq = NULL;
275 int ret, rw = 0; /* shut up gcc */ 259 int ret, rw;
276 unsigned int dxfer_len; 260 unsigned int dxfer_len;
277 void *dxferp = NULL; 261 void *dxferp = NULL;
278 262
@@ -354,9 +338,11 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
354 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); 338 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
355 339
356 spin_lock_irqsave(&bd->lock, flags); 340 spin_lock_irqsave(&bd->lock, flags);
357 list_del(&bc->list); 341 list_move_tail(&bc->list, &bd->done_list);
358 bsg_add_done_cmd(bd, bc); 342 bd->done_cmds++;
359 spin_unlock_irqrestore(&bd->lock, flags); 343 spin_unlock_irqrestore(&bd->lock, flags);
344
345 wake_up(&bd->wq_done);
360} 346}
361 347
362/* 348/*
@@ -387,14 +373,15 @@ static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
387 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); 373 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
388} 374}
389 375
390static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) 376static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
391{ 377{
392 struct bsg_command *bc = NULL; 378 struct bsg_command *bc = NULL;
393 379
394 spin_lock_irq(&bd->lock); 380 spin_lock_irq(&bd->lock);
395 if (bd->done_cmds) { 381 if (bd->done_cmds) {
396 bc = list_entry_bc(bd->done_list.next); 382 bc = list_entry(bd->done_list.next, struct bsg_command, list);
397 bsg_del_done_cmd(bd, bc); 383 list_del(&bc->list);
384 bd->done_cmds--;
398 } 385 }
399 spin_unlock_irq(&bd->lock); 386 spin_unlock_irq(&bd->lock);
400 387
@@ -450,8 +437,8 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
450 hdr->response_len = 0; 437 hdr->response_len = 0;
451 438
452 if (rq->sense_len && hdr->response) { 439 if (rq->sense_len && hdr->response) {
453 int len = min((unsigned int) hdr->max_response_len, 440 int len = min_t(unsigned int, hdr->max_response_len,
454 rq->sense_len); 441 rq->sense_len);
455 442
456 ret = copy_to_user((void*)(unsigned long)hdr->response, 443 ret = copy_to_user((void*)(unsigned long)hdr->response,
457 rq->sense, len); 444 rq->sense, len);
@@ -486,7 +473,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
486 */ 473 */
487 ret = 0; 474 ret = 0;
488 do { 475 do {
489 ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE); 476 ret = bsg_io_schedule(bd);
490 /* 477 /*
491 * look for -ENODATA specifically -- we'll sometimes get 478 * look for -ENODATA specifically -- we'll sometimes get
492 * -ERESTARTSYS when we've taken a signal, but we can't 479 * -ERESTARTSYS when we've taken a signal, but we can't
@@ -523,7 +510,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
523 return ret; 510 return ret;
524} 511}
525 512
526static ssize_t 513static int
527__bsg_read(char __user *buf, size_t count, struct bsg_device *bd, 514__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
528 const struct iovec *iov, ssize_t *bytes_read) 515 const struct iovec *iov, ssize_t *bytes_read)
529{ 516{
@@ -550,7 +537,7 @@ __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
550 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, 537 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
551 bc->bidi_bio); 538 bc->bidi_bio);
552 539
553 if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) 540 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
554 ret = -EFAULT; 541 ret = -EFAULT;
555 542
556 bsg_free_command(bc); 543 bsg_free_command(bc);
@@ -582,6 +569,9 @@ static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
582 clear_bit(BSG_F_WRITE_PERM, &bd->flags); 569 clear_bit(BSG_F_WRITE_PERM, &bd->flags);
583} 570}
584 571
572/*
573 * Check if the error is a "real" error that we should return.
574 */
585static inline int err_block_err(int ret) 575static inline int err_block_err(int ret)
586{ 576{
587 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) 577 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
@@ -610,8 +600,8 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
610 return bytes_read; 600 return bytes_read;
611} 601}
612 602
613static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, 603static int __bsg_write(struct bsg_device *bd, const char __user *buf,
614 size_t count, ssize_t *bytes_read) 604 size_t count, ssize_t *bytes_written)
615{ 605{
616 struct bsg_command *bc; 606 struct bsg_command *bc;
617 struct request *rq; 607 struct request *rq;
@@ -655,7 +645,7 @@ static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
655 rq = NULL; 645 rq = NULL;
656 nr_commands--; 646 nr_commands--;
657 buf += sizeof(struct sg_io_v4); 647 buf += sizeof(struct sg_io_v4);
658 *bytes_read += sizeof(struct sg_io_v4); 648 *bytes_written += sizeof(struct sg_io_v4);
659 } 649 }
660 650
661 if (bc) 651 if (bc)
@@ -668,7 +658,7 @@ static ssize_t
668bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 658bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
669{ 659{
670 struct bsg_device *bd = file->private_data; 660 struct bsg_device *bd = file->private_data;
671 ssize_t bytes_read; 661 ssize_t bytes_written;
672 int ret; 662 int ret;
673 663
674 dprintk("%s: write %Zd bytes\n", bd->name, count); 664 dprintk("%s: write %Zd bytes\n", bd->name, count);
@@ -676,18 +666,18 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
676 bsg_set_block(bd, file); 666 bsg_set_block(bd, file);
677 bsg_set_write_perm(bd, file); 667 bsg_set_write_perm(bd, file);
678 668
679 bytes_read = 0; 669 bytes_written = 0;
680 ret = __bsg_write(bd, buf, count, &bytes_read); 670 ret = __bsg_write(bd, buf, count, &bytes_written);
681 *ppos = bytes_read; 671 *ppos = bytes_written;
682 672
683 /* 673 /*
684 * return bytes written on non-fatal errors 674 * return bytes written on non-fatal errors
685 */ 675 */
686 if (!bytes_read || (bytes_read && err_block_err(ret))) 676 if (!bytes_written || (bytes_written && err_block_err(ret)))
687 bytes_read = ret; 677 bytes_written = ret;
688 678
689 dprintk("%s: returning %Zd\n", bd->name, bytes_read); 679 dprintk("%s: returning %Zd\n", bd->name, bytes_written);
690 return bytes_read; 680 return bytes_written;
691} 681}
692 682
693static struct bsg_device *bsg_alloc_device(void) 683static struct bsg_device *bsg_alloc_device(void)
@@ -746,7 +736,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
746 struct request_queue *rq, 736 struct request_queue *rq,
747 struct file *file) 737 struct file *file)
748{ 738{
749 struct bsg_device *bd = NULL; 739 struct bsg_device *bd;
750#ifdef BSG_DEBUG 740#ifdef BSG_DEBUG
751 unsigned char buf[32]; 741 unsigned char buf[32];
752#endif 742#endif
@@ -762,7 +752,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
762 atomic_set(&bd->ref_count, 1); 752 atomic_set(&bd->ref_count, 1);
763 bd->minor = iminor(inode); 753 bd->minor = iminor(inode);
764 mutex_lock(&bsg_mutex); 754 mutex_lock(&bsg_mutex);
765 hlist_add_head(&bd->dev_list, &bsg_device_list[bsg_list_idx(bd->minor)]); 755 hlist_add_head(&bd->dev_list,
756 &bsg_device_list[bd->minor & (BSG_LIST_ARRAY_SIZE - 1)]);
766 757
767 strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); 758 strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
768 dprintk("bound to <%s>, max queue %d\n", 759 dprintk("bound to <%s>, max queue %d\n",
@@ -774,12 +765,13 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
774 765
775static struct bsg_device *__bsg_get_device(int minor) 766static struct bsg_device *__bsg_get_device(int minor)
776{ 767{
777 struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)]; 768 struct hlist_head *list;
778 struct bsg_device *bd = NULL; 769 struct bsg_device *bd = NULL;
779 struct hlist_node *entry; 770 struct hlist_node *entry;
780 771
781 mutex_lock(&bsg_mutex); 772 mutex_lock(&bsg_mutex);
782 773
774 list = &bsg_device_list[minor & (BSG_LIST_ARRAY_SIZE - 1)];
783 hlist_for_each(entry, list) { 775 hlist_for_each(entry, list) {
784 bd = hlist_entry(entry, struct bsg_device, dev_list); 776 bd = hlist_entry(entry, struct bsg_device, dev_list);
785 if (bd->minor == minor) { 777 if (bd->minor == minor) {
@@ -858,16 +850,11 @@ static unsigned int bsg_poll(struct file *file, poll_table *wait)
858 return mask; 850 return mask;
859} 851}
860 852
861static int 853static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
862bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
863 unsigned long arg)
864{ 854{
865 struct bsg_device *bd = file->private_data; 855 struct bsg_device *bd = file->private_data;
866 int __user *uarg = (int __user *) arg; 856 int __user *uarg = (int __user *) arg;
867 857
868 if (!bd)
869 return -ENXIO;
870
871 switch (cmd) { 858 switch (cmd) {
872 /* 859 /*
873 * our own ioctls 860 * our own ioctls
@@ -944,7 +931,7 @@ static struct file_operations bsg_fops = {
944 .poll = bsg_poll, 931 .poll = bsg_poll,
945 .open = bsg_open, 932 .open = bsg_open,
946 .release = bsg_release, 933 .release = bsg_release,
947 .ioctl = bsg_ioctl, 934 .unlocked_ioctl = bsg_ioctl,
948 .owner = THIS_MODULE, 935 .owner = THIS_MODULE,
949}; 936};
950 937
@@ -952,8 +939,7 @@ void bsg_unregister_queue(struct request_queue *q)
952{ 939{
953 struct bsg_class_device *bcd = &q->bsg_dev; 940 struct bsg_class_device *bcd = &q->bsg_dev;
954 941
955 if (!bcd->class_dev) 942 WARN_ON(!bcd->class_dev);
956 return;
957 943
958 mutex_lock(&bsg_mutex); 944 mutex_lock(&bsg_mutex);
959 sysfs_remove_link(&q->kobj, "bsg"); 945 sysfs_remove_link(&q->kobj, "bsg");
@@ -1069,7 +1055,7 @@ static int __init bsg_init(void)
1069 return -ENOMEM; 1055 return -ENOMEM;
1070 } 1056 }
1071 1057
1072 for (i = 0; i < BSG_LIST_SIZE; i++) 1058 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1073 INIT_HLIST_HEAD(&bsg_device_list[i]); 1059 INIT_HLIST_HEAD(&bsg_device_list[i]);
1074 1060
1075 bsg_class = class_create(THIS_MODULE, "bsg"); 1061 bsg_class = class_create(THIS_MODULE, "bsg");