diff options
| author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 18:26:31 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 18:26:31 -0400 |
| commit | f3d9071667752e5d419e59f76912ed3fb4a6bb9c (patch) | |
| tree | e8787124dbe9bdd7e12d7c427c9ee6036cbe7783 /block | |
| parent | 6dfce901a450534d046b7950682243d5fb665783 (diff) | |
| parent | 5d3a8cd34beb1521a2697c6ed7b647ef9bafdbf1 (diff) | |
Merge branch 'bsg' of git://git.kernel.dk/data/git/linux-2.6-block
* 'bsg' of git://git.kernel.dk/data/git/linux-2.6-block:
bsg: fix missing space in version print
Don't define empty struct bsg_class_device if !CONFIG_BLK_DEV_BSG
bsg: Kconfig updates
bsg: minor cleanup
bsg: device hash table cleanup
bsg: fix initialization error handling bugs
bsg: mark FUJITA Tomonori as bsg maintainer
bsg: convert to dynamic major
bsg: address various review comments
Diffstat (limited to 'block')
| -rw-r--r-- | block/Kconfig | 13 | ||||
| -rw-r--r-- | block/bsg.c | 167 |
2 files changed, 83 insertions, 97 deletions
diff --git a/block/Kconfig b/block/Kconfig index 6597b60e8e69..0768741d6813 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
| @@ -52,11 +52,16 @@ config LSF | |||
| 52 | endif # BLOCK | 52 | endif # BLOCK |
| 53 | 53 | ||
| 54 | config BLK_DEV_BSG | 54 | config BLK_DEV_BSG |
| 55 | bool "Block layer SG support" | 55 | bool "Block layer SG support v4 (EXPERIMENTAL)" |
| 56 | depends on (SCSI=y) && EXPERIMENTAL | 56 | depends on (SCSI=y) && EXPERIMENTAL |
| 57 | default y | ||
| 58 | ---help--- | 57 | ---help--- |
| 59 | Saying Y here will enable generic SG (SCSI generic) v4 | 58 | Saying Y here will enable generic SG (SCSI generic) v4 support |
| 60 | support for any block device. | 59 | for any block device. |
| 60 | |||
| 61 | Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4 | ||
| 62 | can handle complicated SCSI commands: tagged variable length cdbs | ||
| 63 | with bidirectional data transfers and generic request/response | ||
| 64 | protocols (e.g. Task Management Functions and SMP in Serial | ||
| 65 | Attached SCSI). | ||
| 61 | 66 | ||
| 62 | source block/Kconfig.iosched | 67 | source block/Kconfig.iosched |
diff --git a/block/bsg.c b/block/bsg.c index 576933fe1860..baa04e7adf19 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
| @@ -33,7 +33,8 @@ | |||
| 33 | #include <scsi/scsi_driver.h> | 33 | #include <scsi/scsi_driver.h> |
| 34 | #include <scsi/sg.h> | 34 | #include <scsi/sg.h> |
| 35 | 35 | ||
| 36 | static char bsg_version[] = "block layer sg (bsg) 0.4"; | 36 | #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" |
| 37 | #define BSG_VERSION "0.4" | ||
| 37 | 38 | ||
| 38 | struct bsg_device { | 39 | struct bsg_device { |
| 39 | request_queue_t *queue; | 40 | request_queue_t *queue; |
| @@ -68,22 +69,15 @@ enum { | |||
| 68 | #define dprintk(fmt, args...) | 69 | #define dprintk(fmt, args...) |
| 69 | #endif | 70 | #endif |
| 70 | 71 | ||
| 71 | #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list) | ||
| 72 | |||
| 73 | /* | ||
| 74 | * just for testing | ||
| 75 | */ | ||
| 76 | #define BSG_MAJOR (240) | ||
| 77 | |||
| 78 | static DEFINE_MUTEX(bsg_mutex); | 72 | static DEFINE_MUTEX(bsg_mutex); |
| 79 | static int bsg_device_nr, bsg_minor_idx; | 73 | static int bsg_device_nr, bsg_minor_idx; |
| 80 | 74 | ||
| 81 | #define BSG_LIST_SIZE (8) | 75 | #define BSG_LIST_ARRAY_SIZE 8 |
| 82 | #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1)) | 76 | static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; |
| 83 | static struct hlist_head bsg_device_list[BSG_LIST_SIZE]; | ||
| 84 | 77 | ||
| 85 | static struct class *bsg_class; | 78 | static struct class *bsg_class; |
| 86 | static LIST_HEAD(bsg_class_list); | 79 | static LIST_HEAD(bsg_class_list); |
| 80 | static int bsg_major; | ||
| 87 | 81 | ||
| 88 | static struct kmem_cache *bsg_cmd_cachep; | 82 | static struct kmem_cache *bsg_cmd_cachep; |
| 89 | 83 | ||
| @@ -128,7 +122,7 @@ static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) | |||
| 128 | bd->queued_cmds++; | 122 | bd->queued_cmds++; |
| 129 | spin_unlock_irq(&bd->lock); | 123 | spin_unlock_irq(&bd->lock); |
| 130 | 124 | ||
| 131 | bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER); | 125 | bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); |
| 132 | if (unlikely(!bc)) { | 126 | if (unlikely(!bc)) { |
| 133 | spin_lock_irq(&bd->lock); | 127 | spin_lock_irq(&bd->lock); |
| 134 | bd->queued_cmds--; | 128 | bd->queued_cmds--; |
| @@ -136,7 +130,6 @@ static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) | |||
| 136 | goto out; | 130 | goto out; |
| 137 | } | 131 | } |
| 138 | 132 | ||
| 139 | memset(bc, 0, sizeof(*bc)); | ||
| 140 | bc->bd = bd; | 133 | bc->bd = bd; |
| 141 | INIT_LIST_HEAD(&bc->list); | 134 | INIT_LIST_HEAD(&bc->list); |
| 142 | dprintk("%s: returning free cmd %p\n", bd->name, bc); | 135 | dprintk("%s: returning free cmd %p\n", bd->name, bc); |
| @@ -146,22 +139,12 @@ out: | |||
| 146 | return bc; | 139 | return bc; |
| 147 | } | 140 | } |
| 148 | 141 | ||
| 149 | static inline void | 142 | static inline struct hlist_head *bsg_dev_idx_hash(int index) |
| 150 | bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc) | ||
| 151 | { | ||
| 152 | bd->done_cmds--; | ||
| 153 | list_del(&bc->list); | ||
| 154 | } | ||
| 155 | |||
| 156 | static inline void | ||
| 157 | bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc) | ||
| 158 | { | 143 | { |
| 159 | bd->done_cmds++; | 144 | return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; |
| 160 | list_add_tail(&bc->list, &bd->done_list); | ||
| 161 | wake_up(&bd->wq_done); | ||
| 162 | } | 145 | } |
| 163 | 146 | ||
| 164 | static inline int bsg_io_schedule(struct bsg_device *bd, int state) | 147 | static int bsg_io_schedule(struct bsg_device *bd) |
| 165 | { | 148 | { |
| 166 | DEFINE_WAIT(wait); | 149 | DEFINE_WAIT(wait); |
| 167 | int ret = 0; | 150 | int ret = 0; |
| @@ -186,14 +169,11 @@ static inline int bsg_io_schedule(struct bsg_device *bd, int state) | |||
| 186 | goto unlock; | 169 | goto unlock; |
| 187 | } | 170 | } |
| 188 | 171 | ||
| 189 | prepare_to_wait(&bd->wq_done, &wait, state); | 172 | prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); |
| 190 | spin_unlock_irq(&bd->lock); | 173 | spin_unlock_irq(&bd->lock); |
| 191 | io_schedule(); | 174 | io_schedule(); |
| 192 | finish_wait(&bd->wq_done, &wait); | 175 | finish_wait(&bd->wq_done, &wait); |
| 193 | 176 | ||
| 194 | if ((state == TASK_INTERRUPTIBLE) && signal_pending(current)) | ||
| 195 | ret = -ERESTARTSYS; | ||
| 196 | |||
| 197 | return ret; | 177 | return ret; |
| 198 | unlock: | 178 | unlock: |
| 199 | spin_unlock_irq(&bd->lock); | 179 | spin_unlock_irq(&bd->lock); |
| @@ -272,7 +252,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) | |||
| 272 | { | 252 | { |
| 273 | request_queue_t *q = bd->queue; | 253 | request_queue_t *q = bd->queue; |
| 274 | struct request *rq, *next_rq = NULL; | 254 | struct request *rq, *next_rq = NULL; |
| 275 | int ret, rw = 0; /* shut up gcc */ | 255 | int ret, rw; |
| 276 | unsigned int dxfer_len; | 256 | unsigned int dxfer_len; |
| 277 | void *dxferp = NULL; | 257 | void *dxferp = NULL; |
| 278 | 258 | ||
| @@ -354,9 +334,11 @@ static void bsg_rq_end_io(struct request *rq, int uptodate) | |||
| 354 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); | 334 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); |
| 355 | 335 | ||
| 356 | spin_lock_irqsave(&bd->lock, flags); | 336 | spin_lock_irqsave(&bd->lock, flags); |
| 357 | list_del(&bc->list); | 337 | list_move_tail(&bc->list, &bd->done_list); |
| 358 | bsg_add_done_cmd(bd, bc); | 338 | bd->done_cmds++; |
| 359 | spin_unlock_irqrestore(&bd->lock, flags); | 339 | spin_unlock_irqrestore(&bd->lock, flags); |
| 340 | |||
| 341 | wake_up(&bd->wq_done); | ||
| 360 | } | 342 | } |
| 361 | 343 | ||
| 362 | /* | 344 | /* |
| @@ -387,14 +369,15 @@ static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, | |||
| 387 | blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); | 369 | blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); |
| 388 | } | 370 | } |
| 389 | 371 | ||
| 390 | static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) | 372 | static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) |
| 391 | { | 373 | { |
| 392 | struct bsg_command *bc = NULL; | 374 | struct bsg_command *bc = NULL; |
| 393 | 375 | ||
| 394 | spin_lock_irq(&bd->lock); | 376 | spin_lock_irq(&bd->lock); |
| 395 | if (bd->done_cmds) { | 377 | if (bd->done_cmds) { |
| 396 | bc = list_entry_bc(bd->done_list.next); | 378 | bc = list_entry(bd->done_list.next, struct bsg_command, list); |
| 397 | bsg_del_done_cmd(bd, bc); | 379 | list_del(&bc->list); |
| 380 | bd->done_cmds--; | ||
| 398 | } | 381 | } |
| 399 | spin_unlock_irq(&bd->lock); | 382 | spin_unlock_irq(&bd->lock); |
| 400 | 383 | ||
| @@ -450,8 +433,8 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |||
| 450 | hdr->response_len = 0; | 433 | hdr->response_len = 0; |
| 451 | 434 | ||
| 452 | if (rq->sense_len && hdr->response) { | 435 | if (rq->sense_len && hdr->response) { |
| 453 | int len = min((unsigned int) hdr->max_response_len, | 436 | int len = min_t(unsigned int, hdr->max_response_len, |
| 454 | rq->sense_len); | 437 | rq->sense_len); |
| 455 | 438 | ||
| 456 | ret = copy_to_user((void*)(unsigned long)hdr->response, | 439 | ret = copy_to_user((void*)(unsigned long)hdr->response, |
| 457 | rq->sense, len); | 440 | rq->sense, len); |
| @@ -486,7 +469,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd) | |||
| 486 | */ | 469 | */ |
| 487 | ret = 0; | 470 | ret = 0; |
| 488 | do { | 471 | do { |
| 489 | ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE); | 472 | ret = bsg_io_schedule(bd); |
| 490 | /* | 473 | /* |
| 491 | * look for -ENODATA specifically -- we'll sometimes get | 474 | * look for -ENODATA specifically -- we'll sometimes get |
| 492 | * -ERESTARTSYS when we've taken a signal, but we can't | 475 | * -ERESTARTSYS when we've taken a signal, but we can't |
| @@ -523,7 +506,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd) | |||
| 523 | return ret; | 506 | return ret; |
| 524 | } | 507 | } |
| 525 | 508 | ||
| 526 | static ssize_t | 509 | static int |
| 527 | __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, | 510 | __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, |
| 528 | const struct iovec *iov, ssize_t *bytes_read) | 511 | const struct iovec *iov, ssize_t *bytes_read) |
| 529 | { | 512 | { |
| @@ -550,7 +533,7 @@ __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, | |||
| 550 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, | 533 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
| 551 | bc->bidi_bio); | 534 | bc->bidi_bio); |
| 552 | 535 | ||
| 553 | if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) | 536 | if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) |
| 554 | ret = -EFAULT; | 537 | ret = -EFAULT; |
| 555 | 538 | ||
| 556 | bsg_free_command(bc); | 539 | bsg_free_command(bc); |
| @@ -582,6 +565,9 @@ static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file) | |||
| 582 | clear_bit(BSG_F_WRITE_PERM, &bd->flags); | 565 | clear_bit(BSG_F_WRITE_PERM, &bd->flags); |
| 583 | } | 566 | } |
| 584 | 567 | ||
| 568 | /* | ||
| 569 | * Check if the error is a "real" error that we should return. | ||
| 570 | */ | ||
| 585 | static inline int err_block_err(int ret) | 571 | static inline int err_block_err(int ret) |
| 586 | { | 572 | { |
| 587 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) | 573 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) |
| @@ -610,8 +596,8 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
| 610 | return bytes_read; | 596 | return bytes_read; |
| 611 | } | 597 | } |
| 612 | 598 | ||
| 613 | static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | 599 | static int __bsg_write(struct bsg_device *bd, const char __user *buf, |
| 614 | size_t count, ssize_t *bytes_read) | 600 | size_t count, ssize_t *bytes_written) |
| 615 | { | 601 | { |
| 616 | struct bsg_command *bc; | 602 | struct bsg_command *bc; |
| 617 | struct request *rq; | 603 | struct request *rq; |
| @@ -655,7 +641,7 @@ static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | |||
| 655 | rq = NULL; | 641 | rq = NULL; |
| 656 | nr_commands--; | 642 | nr_commands--; |
| 657 | buf += sizeof(struct sg_io_v4); | 643 | buf += sizeof(struct sg_io_v4); |
| 658 | *bytes_read += sizeof(struct sg_io_v4); | 644 | *bytes_written += sizeof(struct sg_io_v4); |
| 659 | } | 645 | } |
| 660 | 646 | ||
| 661 | if (bc) | 647 | if (bc) |
| @@ -668,7 +654,7 @@ static ssize_t | |||
| 668 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | 654 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
| 669 | { | 655 | { |
| 670 | struct bsg_device *bd = file->private_data; | 656 | struct bsg_device *bd = file->private_data; |
| 671 | ssize_t bytes_read; | 657 | ssize_t bytes_written; |
| 672 | int ret; | 658 | int ret; |
| 673 | 659 | ||
| 674 | dprintk("%s: write %Zd bytes\n", bd->name, count); | 660 | dprintk("%s: write %Zd bytes\n", bd->name, count); |
| @@ -676,18 +662,18 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | |||
| 676 | bsg_set_block(bd, file); | 662 | bsg_set_block(bd, file); |
| 677 | bsg_set_write_perm(bd, file); | 663 | bsg_set_write_perm(bd, file); |
| 678 | 664 | ||
| 679 | bytes_read = 0; | 665 | bytes_written = 0; |
| 680 | ret = __bsg_write(bd, buf, count, &bytes_read); | 666 | ret = __bsg_write(bd, buf, count, &bytes_written); |
| 681 | *ppos = bytes_read; | 667 | *ppos = bytes_written; |
| 682 | 668 | ||
| 683 | /* | 669 | /* |
| 684 | * return bytes written on non-fatal errors | 670 | * return bytes written on non-fatal errors |
| 685 | */ | 671 | */ |
| 686 | if (!bytes_read || (bytes_read && err_block_err(ret))) | 672 | if (!bytes_written || (bytes_written && err_block_err(ret))) |
| 687 | bytes_read = ret; | 673 | bytes_written = ret; |
| 688 | 674 | ||
| 689 | dprintk("%s: returning %Zd\n", bd->name, bytes_read); | 675 | dprintk("%s: returning %Zd\n", bd->name, bytes_written); |
| 690 | return bytes_read; | 676 | return bytes_written; |
| 691 | } | 677 | } |
| 692 | 678 | ||
| 693 | static struct bsg_device *bsg_alloc_device(void) | 679 | static struct bsg_device *bsg_alloc_device(void) |
| @@ -746,7 +732,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
| 746 | struct request_queue *rq, | 732 | struct request_queue *rq, |
| 747 | struct file *file) | 733 | struct file *file) |
| 748 | { | 734 | { |
| 749 | struct bsg_device *bd = NULL; | 735 | struct bsg_device *bd; |
| 750 | #ifdef BSG_DEBUG | 736 | #ifdef BSG_DEBUG |
| 751 | unsigned char buf[32]; | 737 | unsigned char buf[32]; |
| 752 | #endif | 738 | #endif |
| @@ -762,7 +748,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
| 762 | atomic_set(&bd->ref_count, 1); | 748 | atomic_set(&bd->ref_count, 1); |
| 763 | bd->minor = iminor(inode); | 749 | bd->minor = iminor(inode); |
| 764 | mutex_lock(&bsg_mutex); | 750 | mutex_lock(&bsg_mutex); |
| 765 | hlist_add_head(&bd->dev_list, &bsg_device_list[bsg_list_idx(bd->minor)]); | 751 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor)); |
| 766 | 752 | ||
| 767 | strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); | 753 | strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); |
| 768 | dprintk("bound to <%s>, max queue %d\n", | 754 | dprintk("bound to <%s>, max queue %d\n", |
| @@ -774,13 +760,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
| 774 | 760 | ||
| 775 | static struct bsg_device *__bsg_get_device(int minor) | 761 | static struct bsg_device *__bsg_get_device(int minor) |
| 776 | { | 762 | { |
| 777 | struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)]; | ||
| 778 | struct bsg_device *bd = NULL; | 763 | struct bsg_device *bd = NULL; |
| 779 | struct hlist_node *entry; | 764 | struct hlist_node *entry; |
| 780 | 765 | ||
| 781 | mutex_lock(&bsg_mutex); | 766 | mutex_lock(&bsg_mutex); |
| 782 | 767 | ||
| 783 | hlist_for_each(entry, list) { | 768 | hlist_for_each(entry, bsg_dev_idx_hash(minor)) { |
| 784 | bd = hlist_entry(entry, struct bsg_device, dev_list); | 769 | bd = hlist_entry(entry, struct bsg_device, dev_list); |
| 785 | if (bd->minor == minor) { | 770 | if (bd->minor == minor) { |
| 786 | atomic_inc(&bd->ref_count); | 771 | atomic_inc(&bd->ref_count); |
| @@ -858,16 +843,11 @@ static unsigned int bsg_poll(struct file *file, poll_table *wait) | |||
| 858 | return mask; | 843 | return mask; |
| 859 | } | 844 | } |
| 860 | 845 | ||
| 861 | static int | 846 | static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 862 | bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | ||
| 863 | unsigned long arg) | ||
| 864 | { | 847 | { |
| 865 | struct bsg_device *bd = file->private_data; | 848 | struct bsg_device *bd = file->private_data; |
| 866 | int __user *uarg = (int __user *) arg; | 849 | int __user *uarg = (int __user *) arg; |
| 867 | 850 | ||
| 868 | if (!bd) | ||
| 869 | return -ENXIO; | ||
| 870 | |||
| 871 | switch (cmd) { | 851 | switch (cmd) { |
| 872 | /* | 852 | /* |
| 873 | * our own ioctls | 853 | * our own ioctls |
| @@ -944,7 +924,7 @@ static struct file_operations bsg_fops = { | |||
| 944 | .poll = bsg_poll, | 924 | .poll = bsg_poll, |
| 945 | .open = bsg_open, | 925 | .open = bsg_open, |
| 946 | .release = bsg_release, | 926 | .release = bsg_release, |
| 947 | .ioctl = bsg_ioctl, | 927 | .unlocked_ioctl = bsg_ioctl, |
| 948 | .owner = THIS_MODULE, | 928 | .owner = THIS_MODULE, |
| 949 | }; | 929 | }; |
| 950 | 930 | ||
| @@ -952,12 +932,11 @@ void bsg_unregister_queue(struct request_queue *q) | |||
| 952 | { | 932 | { |
| 953 | struct bsg_class_device *bcd = &q->bsg_dev; | 933 | struct bsg_class_device *bcd = &q->bsg_dev; |
| 954 | 934 | ||
| 955 | if (!bcd->class_dev) | 935 | WARN_ON(!bcd->class_dev); |
| 956 | return; | ||
| 957 | 936 | ||
| 958 | mutex_lock(&bsg_mutex); | 937 | mutex_lock(&bsg_mutex); |
| 959 | sysfs_remove_link(&q->kobj, "bsg"); | 938 | sysfs_remove_link(&q->kobj, "bsg"); |
| 960 | class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor)); | 939 | class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor)); |
| 961 | bcd->class_dev = NULL; | 940 | bcd->class_dev = NULL; |
| 962 | list_del_init(&bcd->list); | 941 | list_del_init(&bcd->list); |
| 963 | bsg_device_nr--; | 942 | bsg_device_nr--; |
| @@ -1003,7 +982,7 @@ retry: | |||
| 1003 | bsg_minor_idx = 0; | 982 | bsg_minor_idx = 0; |
| 1004 | 983 | ||
| 1005 | bcd->queue = q; | 984 | bcd->queue = q; |
| 1006 | dev = MKDEV(BSG_MAJOR, bcd->minor); | 985 | dev = MKDEV(bsg_major, bcd->minor); |
| 1007 | class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name); | 986 | class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name); |
| 1008 | if (IS_ERR(class_dev)) { | 987 | if (IS_ERR(class_dev)) { |
| 1009 | ret = PTR_ERR(class_dev); | 988 | ret = PTR_ERR(class_dev); |
| @@ -1024,7 +1003,7 @@ retry: | |||
| 1024 | return 0; | 1003 | return 0; |
| 1025 | err: | 1004 | err: |
| 1026 | if (class_dev) | 1005 | if (class_dev) |
| 1027 | class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor)); | 1006 | class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor)); |
| 1028 | mutex_unlock(&bsg_mutex); | 1007 | mutex_unlock(&bsg_mutex); |
| 1029 | return ret; | 1008 | return ret; |
| 1030 | } | 1009 | } |
| @@ -1061,6 +1040,7 @@ static struct cdev bsg_cdev = { | |||
| 1061 | static int __init bsg_init(void) | 1040 | static int __init bsg_init(void) |
| 1062 | { | 1041 | { |
| 1063 | int ret, i; | 1042 | int ret, i; |
| 1043 | dev_t devid; | ||
| 1064 | 1044 | ||
| 1065 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", | 1045 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", |
| 1066 | sizeof(struct bsg_command), 0, 0, NULL, NULL); | 1046 | sizeof(struct bsg_command), 0, 0, NULL, NULL); |
| @@ -1069,46 +1049,47 @@ static int __init bsg_init(void) | |||
| 1069 | return -ENOMEM; | 1049 | return -ENOMEM; |
| 1070 | } | 1050 | } |
| 1071 | 1051 | ||
| 1072 | for (i = 0; i < BSG_LIST_SIZE; i++) | 1052 | for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) |
| 1073 | INIT_HLIST_HEAD(&bsg_device_list[i]); | 1053 | INIT_HLIST_HEAD(&bsg_device_list[i]); |
| 1074 | 1054 | ||
| 1075 | bsg_class = class_create(THIS_MODULE, "bsg"); | 1055 | bsg_class = class_create(THIS_MODULE, "bsg"); |
| 1076 | if (IS_ERR(bsg_class)) { | 1056 | if (IS_ERR(bsg_class)) { |
| 1077 | kmem_cache_destroy(bsg_cmd_cachep); | 1057 | ret = PTR_ERR(bsg_class); |
| 1078 | return PTR_ERR(bsg_class); | 1058 | goto destroy_kmemcache; |
| 1079 | } | 1059 | } |
| 1080 | 1060 | ||
| 1081 | ret = register_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS, "bsg"); | 1061 | ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); |
| 1082 | if (ret) { | 1062 | if (ret) |
| 1083 | kmem_cache_destroy(bsg_cmd_cachep); | 1063 | goto destroy_bsg_class; |
| 1084 | class_destroy(bsg_class); | 1064 | |
| 1085 | return ret; | 1065 | bsg_major = MAJOR(devid); |
| 1086 | } | ||
| 1087 | 1066 | ||
| 1088 | cdev_init(&bsg_cdev, &bsg_fops); | 1067 | cdev_init(&bsg_cdev, &bsg_fops); |
| 1089 | ret = cdev_add(&bsg_cdev, MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS); | 1068 | ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
| 1090 | if (ret) { | 1069 | if (ret) |
| 1091 | kmem_cache_destroy(bsg_cmd_cachep); | 1070 | goto unregister_chrdev; |
| 1092 | class_destroy(bsg_class); | ||
| 1093 | unregister_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS); | ||
| 1094 | return ret; | ||
| 1095 | } | ||
| 1096 | 1071 | ||
| 1097 | ret = scsi_register_interface(&bsg_intf); | 1072 | ret = scsi_register_interface(&bsg_intf); |
| 1098 | if (ret) { | 1073 | if (ret) |
| 1099 | printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret); | 1074 | goto remove_cdev; |
| 1100 | kmem_cache_destroy(bsg_cmd_cachep); | ||
| 1101 | class_destroy(bsg_class); | ||
| 1102 | unregister_chrdev(BSG_MAJOR, "bsg"); | ||
| 1103 | return ret; | ||
| 1104 | } | ||
| 1105 | 1075 | ||
| 1106 | printk(KERN_INFO "%s loaded\n", bsg_version); | 1076 | printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION |
| 1077 | " loaded (major %d)\n", bsg_major); | ||
| 1107 | return 0; | 1078 | return 0; |
| 1079 | remove_cdev: | ||
| 1080 | printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret); | ||
| 1081 | cdev_del(&bsg_cdev); | ||
| 1082 | unregister_chrdev: | ||
| 1083 | unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); | ||
| 1084 | destroy_bsg_class: | ||
| 1085 | class_destroy(bsg_class); | ||
| 1086 | destroy_kmemcache: | ||
| 1087 | kmem_cache_destroy(bsg_cmd_cachep); | ||
| 1088 | return ret; | ||
| 1108 | } | 1089 | } |
| 1109 | 1090 | ||
| 1110 | MODULE_AUTHOR("Jens Axboe"); | 1091 | MODULE_AUTHOR("Jens Axboe"); |
| 1111 | MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver"); | 1092 | MODULE_DESCRIPTION(BSG_DESCRIPTION); |
| 1112 | MODULE_LICENSE("GPL"); | 1093 | MODULE_LICENSE("GPL"); |
| 1113 | 1094 | ||
| 1114 | device_initcall(bsg_init); | 1095 | device_initcall(bsg_init); |
