diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 12:08:13 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 12:08:13 -0400 |
commit | f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b (patch) | |
tree | c2c130a74be25b0b2dff992e1a195e2728bdaadd /drivers/scsi | |
parent | fd0961ff67727482bb20ca7e8ea97b83e9de2ddb (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'drivers/scsi')
40 files changed, 1544 insertions, 943 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 1bb774becf25..e20b7bdd4c78 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -125,7 +125,7 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H | |||
125 | static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); | 125 | static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); |
126 | static char *twa_aen_severity_lookup(unsigned char severity_code); | 126 | static char *twa_aen_severity_lookup(unsigned char severity_code); |
127 | static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); | 127 | static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); |
128 | static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); | 128 | static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
129 | static int twa_chrdev_open(struct inode *inode, struct file *file); | 129 | static int twa_chrdev_open(struct inode *inode, struct file *file); |
130 | static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); | 130 | static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); |
131 | static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); | 131 | static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); |
@@ -220,7 +220,7 @@ static struct device_attribute *twa_host_attrs[] = { | |||
220 | /* File operations struct for character device */ | 220 | /* File operations struct for character device */ |
221 | static const struct file_operations twa_fops = { | 221 | static const struct file_operations twa_fops = { |
222 | .owner = THIS_MODULE, | 222 | .owner = THIS_MODULE, |
223 | .ioctl = twa_chrdev_ioctl, | 223 | .unlocked_ioctl = twa_chrdev_ioctl, |
224 | .open = twa_chrdev_open, | 224 | .open = twa_chrdev_open, |
225 | .release = NULL | 225 | .release = NULL |
226 | }; | 226 | }; |
@@ -637,8 +637,9 @@ out: | |||
637 | } /* End twa_check_srl() */ | 637 | } /* End twa_check_srl() */ |
638 | 638 | ||
639 | /* This function handles ioctl for the character device */ | 639 | /* This function handles ioctl for the character device */ |
640 | static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | 640 | static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
641 | { | 641 | { |
642 | struct inode *inode = file->f_path.dentry->d_inode; | ||
642 | long timeout; | 643 | long timeout; |
643 | unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; | 644 | unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; |
644 | dma_addr_t dma_handle; | 645 | dma_addr_t dma_handle; |
@@ -657,6 +658,8 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int | |||
657 | int retval = TW_IOCTL_ERROR_OS_EFAULT; | 658 | int retval = TW_IOCTL_ERROR_OS_EFAULT; |
658 | void __user *argp = (void __user *)arg; | 659 | void __user *argp = (void __user *)arg; |
659 | 660 | ||
661 | lock_kernel(); | ||
662 | |||
660 | /* Only let one of these through at a time */ | 663 | /* Only let one of these through at a time */ |
661 | if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { | 664 | if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { |
662 | retval = TW_IOCTL_ERROR_OS_EINTR; | 665 | retval = TW_IOCTL_ERROR_OS_EINTR; |
@@ -876,6 +879,7 @@ out3: | |||
876 | out2: | 879 | out2: |
877 | mutex_unlock(&tw_dev->ioctl_lock); | 880 | mutex_unlock(&tw_dev->ioctl_lock); |
878 | out: | 881 | out: |
882 | unlock_kernel(); | ||
879 | return retval; | 883 | return retval; |
880 | } /* End twa_chrdev_ioctl() */ | 884 | } /* End twa_chrdev_ioctl() */ |
881 | 885 | ||
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index d38000db9237..f481e734aad4 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c | |||
@@ -750,19 +750,22 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm | |||
750 | 750 | ||
751 | /* This function handles ioctl for the character device | 751 | /* This function handles ioctl for the character device |
752 | This interface is used by smartmontools open source software */ | 752 | This interface is used by smartmontools open source software */ |
753 | static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | 753 | static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
754 | { | 754 | { |
755 | long timeout; | 755 | long timeout; |
756 | unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; | 756 | unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; |
757 | dma_addr_t dma_handle; | 757 | dma_addr_t dma_handle; |
758 | int request_id = 0; | 758 | int request_id = 0; |
759 | TW_Ioctl_Driver_Command driver_command; | 759 | TW_Ioctl_Driver_Command driver_command; |
760 | struct inode *inode = file->f_dentry->d_inode; | ||
760 | TW_Ioctl_Buf_Apache *tw_ioctl; | 761 | TW_Ioctl_Buf_Apache *tw_ioctl; |
761 | TW_Command_Full *full_command_packet; | 762 | TW_Command_Full *full_command_packet; |
762 | TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)]; | 763 | TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)]; |
763 | int retval = -EFAULT; | 764 | int retval = -EFAULT; |
764 | void __user *argp = (void __user *)arg; | 765 | void __user *argp = (void __user *)arg; |
765 | 766 | ||
767 | lock_kernel(); | ||
768 | |||
766 | /* Only let one of these through at a time */ | 769 | /* Only let one of these through at a time */ |
767 | if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { | 770 | if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { |
768 | retval = -EINTR; | 771 | retval = -EINTR; |
@@ -858,6 +861,7 @@ out3: | |||
858 | out2: | 861 | out2: |
859 | mutex_unlock(&tw_dev->ioctl_lock); | 862 | mutex_unlock(&tw_dev->ioctl_lock); |
860 | out: | 863 | out: |
864 | unlock_kernel(); | ||
861 | return retval; | 865 | return retval; |
862 | } /* End twl_chrdev_ioctl() */ | 866 | } /* End twl_chrdev_ioctl() */ |
863 | 867 | ||
@@ -884,7 +888,7 @@ out: | |||
884 | /* File operations struct for character device */ | 888 | /* File operations struct for character device */ |
885 | static const struct file_operations twl_fops = { | 889 | static const struct file_operations twl_fops = { |
886 | .owner = THIS_MODULE, | 890 | .owner = THIS_MODULE, |
887 | .ioctl = twl_chrdev_ioctl, | 891 | .unlocked_ioctl = twl_chrdev_ioctl, |
888 | .open = twl_chrdev_open, | 892 | .open = twl_chrdev_open, |
889 | .release = NULL | 893 | .release = NULL |
890 | }; | 894 | }; |
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index d119a614bf7d..30d735ad35b5 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
@@ -881,7 +881,7 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) | |||
881 | } /* End tw_allocate_memory() */ | 881 | } /* End tw_allocate_memory() */ |
882 | 882 | ||
883 | /* This function handles ioctl for the character device */ | 883 | /* This function handles ioctl for the character device */ |
884 | static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | 884 | static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
885 | { | 885 | { |
886 | int request_id; | 886 | int request_id; |
887 | dma_addr_t dma_handle; | 887 | dma_addr_t dma_handle; |
@@ -889,6 +889,7 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int | |||
889 | unsigned long flags; | 889 | unsigned long flags; |
890 | unsigned int data_buffer_length = 0; | 890 | unsigned int data_buffer_length = 0; |
891 | unsigned long data_buffer_length_adjusted = 0; | 891 | unsigned long data_buffer_length_adjusted = 0; |
892 | struct inode *inode = file->f_dentry->d_inode; | ||
892 | unsigned long *cpu_addr; | 893 | unsigned long *cpu_addr; |
893 | long timeout; | 894 | long timeout; |
894 | TW_New_Ioctl *tw_ioctl; | 895 | TW_New_Ioctl *tw_ioctl; |
@@ -899,9 +900,12 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int | |||
899 | 900 | ||
900 | dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n"); | 901 | dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n"); |
901 | 902 | ||
903 | lock_kernel(); | ||
902 | /* Only let one of these through at a time */ | 904 | /* Only let one of these through at a time */ |
903 | if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) | 905 | if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { |
906 | unlock_kernel(); | ||
904 | return -EINTR; | 907 | return -EINTR; |
908 | } | ||
905 | 909 | ||
906 | /* First copy down the buffer length */ | 910 | /* First copy down the buffer length */ |
907 | if (copy_from_user(&data_buffer_length, argp, sizeof(unsigned int))) | 911 | if (copy_from_user(&data_buffer_length, argp, sizeof(unsigned int))) |
@@ -1030,6 +1034,7 @@ out2: | |||
1030 | dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle); | 1034 | dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle); |
1031 | out: | 1035 | out: |
1032 | mutex_unlock(&tw_dev->ioctl_lock); | 1036 | mutex_unlock(&tw_dev->ioctl_lock); |
1037 | unlock_kernel(); | ||
1033 | return retval; | 1038 | return retval; |
1034 | } /* End tw_chrdev_ioctl() */ | 1039 | } /* End tw_chrdev_ioctl() */ |
1035 | 1040 | ||
@@ -1052,7 +1057,7 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) | |||
1052 | /* File operations struct for character device */ | 1057 | /* File operations struct for character device */ |
1053 | static const struct file_operations tw_fops = { | 1058 | static const struct file_operations tw_fops = { |
1054 | .owner = THIS_MODULE, | 1059 | .owner = THIS_MODULE, |
1055 | .ioctl = tw_chrdev_ioctl, | 1060 | .unlocked_ioctl = tw_chrdev_ioctl, |
1056 | .open = tw_chrdev_open, | 1061 | .open = tw_chrdev_open, |
1057 | .release = NULL | 1062 | .release = NULL |
1058 | }; | 1063 | }; |
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index 308541ff85cf..1bb5d3f0e260 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c | |||
@@ -1,34 +1,31 @@ | |||
1 | #include <linux/types.h> | 1 | #include <linux/types.h> |
2 | #include <linux/mm.h> | ||
3 | #include <linux/slab.h> | ||
4 | #include <linux/blkdev.h> | ||
5 | #include <linux/init.h> | 2 | #include <linux/init.h> |
6 | #include <linux/interrupt.h> | 3 | #include <linux/interrupt.h> |
4 | #include <linux/mm.h> | ||
5 | #include <linux/slab.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/zorro.h> | ||
7 | 8 | ||
8 | #include <asm/setup.h> | ||
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/amigaints.h> | 11 | #include <asm/amigaints.h> |
12 | #include <asm/amigahw.h> | 12 | #include <asm/amigahw.h> |
13 | #include <linux/zorro.h> | ||
14 | #include <asm/irq.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | 13 | ||
17 | #include "scsi.h" | 14 | #include "scsi.h" |
18 | #include <scsi/scsi_host.h> | ||
19 | #include "wd33c93.h" | 15 | #include "wd33c93.h" |
20 | #include "a2091.h" | 16 | #include "a2091.h" |
21 | 17 | ||
22 | #include <linux/stat.h> | ||
23 | |||
24 | 18 | ||
25 | static int a2091_release(struct Scsi_Host *instance); | 19 | struct a2091_hostdata { |
20 | struct WD33C93_hostdata wh; | ||
21 | struct a2091_scsiregs *regs; | ||
22 | }; | ||
26 | 23 | ||
27 | static irqreturn_t a2091_intr(int irq, void *data) | 24 | static irqreturn_t a2091_intr(int irq, void *data) |
28 | { | 25 | { |
29 | struct Scsi_Host *instance = data; | 26 | struct Scsi_Host *instance = data; |
30 | a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); | 27 | struct a2091_hostdata *hdata = shost_priv(instance); |
31 | unsigned int status = regs->ISTR; | 28 | unsigned int status = hdata->regs->ISTR; |
32 | unsigned long flags; | 29 | unsigned long flags; |
33 | 30 | ||
34 | if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) | 31 | if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) |
@@ -43,38 +40,39 @@ static irqreturn_t a2091_intr(int irq, void *data) | |||
43 | static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | 40 | static int dma_setup(struct scsi_cmnd *cmd, int dir_in) |
44 | { | 41 | { |
45 | struct Scsi_Host *instance = cmd->device->host; | 42 | struct Scsi_Host *instance = cmd->device->host; |
46 | struct WD33C93_hostdata *hdata = shost_priv(instance); | 43 | struct a2091_hostdata *hdata = shost_priv(instance); |
47 | a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); | 44 | struct WD33C93_hostdata *wh = &hdata->wh; |
45 | struct a2091_scsiregs *regs = hdata->regs; | ||
48 | unsigned short cntr = CNTR_PDMD | CNTR_INTEN; | 46 | unsigned short cntr = CNTR_PDMD | CNTR_INTEN; |
49 | unsigned long addr = virt_to_bus(cmd->SCp.ptr); | 47 | unsigned long addr = virt_to_bus(cmd->SCp.ptr); |
50 | 48 | ||
51 | /* don't allow DMA if the physical address is bad */ | 49 | /* don't allow DMA if the physical address is bad */ |
52 | if (addr & A2091_XFER_MASK) { | 50 | if (addr & A2091_XFER_MASK) { |
53 | hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; | 51 | wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; |
54 | hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, | 52 | wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, |
55 | GFP_KERNEL); | 53 | GFP_KERNEL); |
56 | 54 | ||
57 | /* can't allocate memory; use PIO */ | 55 | /* can't allocate memory; use PIO */ |
58 | if (!hdata->dma_bounce_buffer) { | 56 | if (!wh->dma_bounce_buffer) { |
59 | hdata->dma_bounce_len = 0; | 57 | wh->dma_bounce_len = 0; |
60 | return 1; | 58 | return 1; |
61 | } | 59 | } |
62 | 60 | ||
63 | /* get the physical address of the bounce buffer */ | 61 | /* get the physical address of the bounce buffer */ |
64 | addr = virt_to_bus(hdata->dma_bounce_buffer); | 62 | addr = virt_to_bus(wh->dma_bounce_buffer); |
65 | 63 | ||
66 | /* the bounce buffer may not be in the first 16M of physmem */ | 64 | /* the bounce buffer may not be in the first 16M of physmem */ |
67 | if (addr & A2091_XFER_MASK) { | 65 | if (addr & A2091_XFER_MASK) { |
68 | /* we could use chipmem... maybe later */ | 66 | /* we could use chipmem... maybe later */ |
69 | kfree(hdata->dma_bounce_buffer); | 67 | kfree(wh->dma_bounce_buffer); |
70 | hdata->dma_bounce_buffer = NULL; | 68 | wh->dma_bounce_buffer = NULL; |
71 | hdata->dma_bounce_len = 0; | 69 | wh->dma_bounce_len = 0; |
72 | return 1; | 70 | return 1; |
73 | } | 71 | } |
74 | 72 | ||
75 | if (!dir_in) { | 73 | if (!dir_in) { |
76 | /* copy to bounce buffer for a write */ | 74 | /* copy to bounce buffer for a write */ |
77 | memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, | 75 | memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, |
78 | cmd->SCp.this_residual); | 76 | cmd->SCp.this_residual); |
79 | } | 77 | } |
80 | } | 78 | } |
@@ -84,7 +82,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
84 | cntr |= CNTR_DDIR; | 82 | cntr |= CNTR_DDIR; |
85 | 83 | ||
86 | /* remember direction */ | 84 | /* remember direction */ |
87 | hdata->dma_dir = dir_in; | 85 | wh->dma_dir = dir_in; |
88 | 86 | ||
89 | regs->CNTR = cntr; | 87 | regs->CNTR = cntr; |
90 | 88 | ||
@@ -108,20 +106,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
108 | static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, | 106 | static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, |
109 | int status) | 107 | int status) |
110 | { | 108 | { |
111 | struct WD33C93_hostdata *hdata = shost_priv(instance); | 109 | struct a2091_hostdata *hdata = shost_priv(instance); |
112 | a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); | 110 | struct WD33C93_hostdata *wh = &hdata->wh; |
111 | struct a2091_scsiregs *regs = hdata->regs; | ||
113 | 112 | ||
114 | /* disable SCSI interrupts */ | 113 | /* disable SCSI interrupts */ |
115 | unsigned short cntr = CNTR_PDMD; | 114 | unsigned short cntr = CNTR_PDMD; |
116 | 115 | ||
117 | if (!hdata->dma_dir) | 116 | if (!wh->dma_dir) |
118 | cntr |= CNTR_DDIR; | 117 | cntr |= CNTR_DDIR; |
119 | 118 | ||
120 | /* disable SCSI interrupts */ | 119 | /* disable SCSI interrupts */ |
121 | regs->CNTR = cntr; | 120 | regs->CNTR = cntr; |
122 | 121 | ||
123 | /* flush if we were reading */ | 122 | /* flush if we were reading */ |
124 | if (hdata->dma_dir) { | 123 | if (wh->dma_dir) { |
125 | regs->FLUSH = 1; | 124 | regs->FLUSH = 1; |
126 | while (!(regs->ISTR & ISTR_FE_FLG)) | 125 | while (!(regs->ISTR & ISTR_FE_FLG)) |
127 | ; | 126 | ; |
@@ -137,95 +136,37 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, | |||
137 | regs->CNTR = CNTR_PDMD | CNTR_INTEN; | 136 | regs->CNTR = CNTR_PDMD | CNTR_INTEN; |
138 | 137 | ||
139 | /* copy from a bounce buffer, if necessary */ | 138 | /* copy from a bounce buffer, if necessary */ |
140 | if (status && hdata->dma_bounce_buffer) { | 139 | if (status && wh->dma_bounce_buffer) { |
141 | if (hdata->dma_dir) | 140 | if (wh->dma_dir) |
142 | memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, | 141 | memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, |
143 | SCpnt->SCp.this_residual); | 142 | SCpnt->SCp.this_residual); |
144 | kfree(hdata->dma_bounce_buffer); | 143 | kfree(wh->dma_bounce_buffer); |
145 | hdata->dma_bounce_buffer = NULL; | 144 | wh->dma_bounce_buffer = NULL; |
146 | hdata->dma_bounce_len = 0; | 145 | wh->dma_bounce_len = 0; |
147 | } | ||
148 | } | ||
149 | |||
150 | static int __init a2091_detect(struct scsi_host_template *tpnt) | ||
151 | { | ||
152 | static unsigned char called = 0; | ||
153 | struct Scsi_Host *instance; | ||
154 | unsigned long address; | ||
155 | struct zorro_dev *z = NULL; | ||
156 | wd33c93_regs wdregs; | ||
157 | a2091_scsiregs *regs; | ||
158 | struct WD33C93_hostdata *hdata; | ||
159 | int num_a2091 = 0; | ||
160 | |||
161 | if (!MACH_IS_AMIGA || called) | ||
162 | return 0; | ||
163 | called = 1; | ||
164 | |||
165 | tpnt->proc_name = "A2091"; | ||
166 | tpnt->proc_info = &wd33c93_proc_info; | ||
167 | |||
168 | while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { | ||
169 | if (z->id != ZORRO_PROD_CBM_A590_A2091_1 && | ||
170 | z->id != ZORRO_PROD_CBM_A590_A2091_2) | ||
171 | continue; | ||
172 | address = z->resource.start; | ||
173 | if (!request_mem_region(address, 256, "wd33c93")) | ||
174 | continue; | ||
175 | |||
176 | instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); | ||
177 | if (instance == NULL) | ||
178 | goto release; | ||
179 | instance->base = ZTWO_VADDR(address); | ||
180 | instance->irq = IRQ_AMIGA_PORTS; | ||
181 | instance->unique_id = z->slotaddr; | ||
182 | regs = (a2091_scsiregs *)(instance->base); | ||
183 | regs->DAWR = DAWR_A2091; | ||
184 | wdregs.SASR = ®s->SASR; | ||
185 | wdregs.SCMD = ®s->SCMD; | ||
186 | hdata = shost_priv(instance); | ||
187 | hdata->no_sync = 0xff; | ||
188 | hdata->fast = 0; | ||
189 | hdata->dma_mode = CTRL_DMA; | ||
190 | wd33c93_init(instance, wdregs, dma_setup, dma_stop, | ||
191 | WD33C93_FS_8_10); | ||
192 | if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, | ||
193 | "A2091 SCSI", instance)) | ||
194 | goto unregister; | ||
195 | regs->CNTR = CNTR_PDMD | CNTR_INTEN; | ||
196 | num_a2091++; | ||
197 | continue; | ||
198 | |||
199 | unregister: | ||
200 | scsi_unregister(instance); | ||
201 | release: | ||
202 | release_mem_region(address, 256); | ||
203 | } | 146 | } |
204 | |||
205 | return num_a2091; | ||
206 | } | 147 | } |
207 | 148 | ||
208 | static int a2091_bus_reset(struct scsi_cmnd *cmd) | 149 | static int a2091_bus_reset(struct scsi_cmnd *cmd) |
209 | { | 150 | { |
151 | struct Scsi_Host *instance = cmd->device->host; | ||
152 | |||
210 | /* FIXME perform bus-specific reset */ | 153 | /* FIXME perform bus-specific reset */ |
211 | 154 | ||
212 | /* FIXME 2: kill this function, and let midlayer fall back | 155 | /* FIXME 2: kill this function, and let midlayer fall back |
213 | to the same action, calling wd33c93_host_reset() */ | 156 | to the same action, calling wd33c93_host_reset() */ |
214 | 157 | ||
215 | spin_lock_irq(cmd->device->host->host_lock); | 158 | spin_lock_irq(instance->host_lock); |
216 | wd33c93_host_reset(cmd); | 159 | wd33c93_host_reset(cmd); |
217 | spin_unlock_irq(cmd->device->host->host_lock); | 160 | spin_unlock_irq(instance->host_lock); |
218 | 161 | ||
219 | return SUCCESS; | 162 | return SUCCESS; |
220 | } | 163 | } |
221 | 164 | ||
222 | #define HOSTS_C | 165 | static struct scsi_host_template a2091_scsi_template = { |
223 | 166 | .module = THIS_MODULE, | |
224 | static struct scsi_host_template driver_template = { | ||
225 | .proc_name = "A2901", | ||
226 | .name = "Commodore A2091/A590 SCSI", | 167 | .name = "Commodore A2091/A590 SCSI", |
227 | .detect = a2091_detect, | 168 | .proc_info = wd33c93_proc_info, |
228 | .release = a2091_release, | 169 | .proc_name = "A2901", |
229 | .queuecommand = wd33c93_queuecommand, | 170 | .queuecommand = wd33c93_queuecommand, |
230 | .eh_abort_handler = wd33c93_abort, | 171 | .eh_abort_handler = wd33c93_abort, |
231 | .eh_bus_reset_handler = a2091_bus_reset, | 172 | .eh_bus_reset_handler = a2091_bus_reset, |
@@ -237,19 +178,103 @@ static struct scsi_host_template driver_template = { | |||
237 | .use_clustering = DISABLE_CLUSTERING | 178 | .use_clustering = DISABLE_CLUSTERING |
238 | }; | 179 | }; |
239 | 180 | ||
181 | static int __devinit a2091_probe(struct zorro_dev *z, | ||
182 | const struct zorro_device_id *ent) | ||
183 | { | ||
184 | struct Scsi_Host *instance; | ||
185 | int error; | ||
186 | struct a2091_scsiregs *regs; | ||
187 | wd33c93_regs wdregs; | ||
188 | struct a2091_hostdata *hdata; | ||
240 | 189 | ||
241 | #include "scsi_module.c" | 190 | if (!request_mem_region(z->resource.start, 256, "wd33c93")) |
191 | return -EBUSY; | ||
242 | 192 | ||
243 | static int a2091_release(struct Scsi_Host *instance) | 193 | instance = scsi_host_alloc(&a2091_scsi_template, |
194 | sizeof(struct a2091_hostdata)); | ||
195 | if (!instance) { | ||
196 | error = -ENOMEM; | ||
197 | goto fail_alloc; | ||
198 | } | ||
199 | |||
200 | instance->irq = IRQ_AMIGA_PORTS; | ||
201 | instance->unique_id = z->slotaddr; | ||
202 | |||
203 | regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); | ||
204 | regs->DAWR = DAWR_A2091; | ||
205 | |||
206 | wdregs.SASR = ®s->SASR; | ||
207 | wdregs.SCMD = ®s->SCMD; | ||
208 | |||
209 | hdata = shost_priv(instance); | ||
210 | hdata->wh.no_sync = 0xff; | ||
211 | hdata->wh.fast = 0; | ||
212 | hdata->wh.dma_mode = CTRL_DMA; | ||
213 | hdata->regs = regs; | ||
214 | |||
215 | wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); | ||
216 | error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, | ||
217 | "A2091 SCSI", instance); | ||
218 | if (error) | ||
219 | goto fail_irq; | ||
220 | |||
221 | regs->CNTR = CNTR_PDMD | CNTR_INTEN; | ||
222 | |||
223 | error = scsi_add_host(instance, NULL); | ||
224 | if (error) | ||
225 | goto fail_host; | ||
226 | |||
227 | zorro_set_drvdata(z, instance); | ||
228 | |||
229 | scsi_scan_host(instance); | ||
230 | return 0; | ||
231 | |||
232 | fail_host: | ||
233 | free_irq(IRQ_AMIGA_PORTS, instance); | ||
234 | fail_irq: | ||
235 | scsi_host_put(instance); | ||
236 | fail_alloc: | ||
237 | release_mem_region(z->resource.start, 256); | ||
238 | return error; | ||
239 | } | ||
240 | |||
241 | static void __devexit a2091_remove(struct zorro_dev *z) | ||
244 | { | 242 | { |
245 | #ifdef MODULE | 243 | struct Scsi_Host *instance = zorro_get_drvdata(z); |
246 | a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); | 244 | struct a2091_hostdata *hdata = shost_priv(instance); |
247 | 245 | ||
248 | regs->CNTR = 0; | 246 | hdata->regs->CNTR = 0; |
249 | release_mem_region(ZTWO_PADDR(instance->base), 256); | 247 | scsi_remove_host(instance); |
250 | free_irq(IRQ_AMIGA_PORTS, instance); | 248 | free_irq(IRQ_AMIGA_PORTS, instance); |
251 | #endif | 249 | scsi_host_put(instance); |
252 | return 1; | 250 | release_mem_region(z->resource.start, 256); |
251 | } | ||
252 | |||
253 | static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = { | ||
254 | { ZORRO_PROD_CBM_A590_A2091_1 }, | ||
255 | { ZORRO_PROD_CBM_A590_A2091_2 }, | ||
256 | { 0 } | ||
257 | }; | ||
258 | MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl); | ||
259 | |||
260 | static struct zorro_driver a2091_driver = { | ||
261 | .name = "a2091", | ||
262 | .id_table = a2091_zorro_tbl, | ||
263 | .probe = a2091_probe, | ||
264 | .remove = __devexit_p(a2091_remove), | ||
265 | }; | ||
266 | |||
267 | static int __init a2091_init(void) | ||
268 | { | ||
269 | return zorro_register_driver(&a2091_driver); | ||
270 | } | ||
271 | module_init(a2091_init); | ||
272 | |||
273 | static void __exit a2091_exit(void) | ||
274 | { | ||
275 | zorro_unregister_driver(&a2091_driver); | ||
253 | } | 276 | } |
277 | module_exit(a2091_exit); | ||
254 | 278 | ||
279 | MODULE_DESCRIPTION("Commodore A2091/A590 SCSI"); | ||
255 | MODULE_LICENSE("GPL"); | 280 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h index 1c3daa1fd754..794b8e65c711 100644 --- a/drivers/scsi/a2091.h +++ b/drivers/scsi/a2091.h | |||
@@ -25,7 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | #define A2091_XFER_MASK (0xff000001) | 26 | #define A2091_XFER_MASK (0xff000001) |
27 | 27 | ||
28 | typedef struct { | 28 | struct a2091_scsiregs { |
29 | unsigned char pad1[64]; | 29 | unsigned char pad1[64]; |
30 | volatile unsigned short ISTR; | 30 | volatile unsigned short ISTR; |
31 | volatile unsigned short CNTR; | 31 | volatile unsigned short CNTR; |
@@ -44,7 +44,7 @@ typedef struct { | |||
44 | volatile unsigned short CINT; | 44 | volatile unsigned short CINT; |
45 | unsigned char pad7[2]; | 45 | unsigned char pad7[2]; |
46 | volatile unsigned short FLUSH; | 46 | volatile unsigned short FLUSH; |
47 | } a2091_scsiregs; | 47 | }; |
48 | 48 | ||
49 | #define DAWR_A2091 (3) | 49 | #define DAWR_A2091 (3) |
50 | 50 | ||
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index bc6eb69f5fd0..d9468027fb61 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c | |||
@@ -1,53 +1,52 @@ | |||
1 | #include <linux/types.h> | 1 | #include <linux/types.h> |
2 | #include <linux/mm.h> | 2 | #include <linux/mm.h> |
3 | #include <linux/slab.h> | ||
4 | #include <linux/blkdev.h> | ||
5 | #include <linux/ioport.h> | 3 | #include <linux/ioport.h> |
6 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/slab.h> | ||
7 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
8 | #include <linux/interrupt.h> | 7 | #include <linux/interrupt.h> |
8 | #include <linux/platform_device.h> | ||
9 | 9 | ||
10 | #include <asm/setup.h> | ||
11 | #include <asm/page.h> | 10 | #include <asm/page.h> |
12 | #include <asm/pgtable.h> | 11 | #include <asm/pgtable.h> |
13 | #include <asm/amigaints.h> | 12 | #include <asm/amigaints.h> |
14 | #include <asm/amigahw.h> | 13 | #include <asm/amigahw.h> |
15 | #include <asm/irq.h> | ||
16 | 14 | ||
17 | #include "scsi.h" | 15 | #include "scsi.h" |
18 | #include <scsi/scsi_host.h> | ||
19 | #include "wd33c93.h" | 16 | #include "wd33c93.h" |
20 | #include "a3000.h" | 17 | #include "a3000.h" |
21 | 18 | ||
22 | #include <linux/stat.h> | ||
23 | |||
24 | 19 | ||
25 | #define DMA(ptr) ((a3000_scsiregs *)((ptr)->base)) | 20 | struct a3000_hostdata { |
26 | 21 | struct WD33C93_hostdata wh; | |
27 | static struct Scsi_Host *a3000_host = NULL; | 22 | struct a3000_scsiregs *regs; |
28 | 23 | }; | |
29 | static int a3000_release(struct Scsi_Host *instance); | ||
30 | 24 | ||
31 | static irqreturn_t a3000_intr(int irq, void *dummy) | 25 | static irqreturn_t a3000_intr(int irq, void *data) |
32 | { | 26 | { |
27 | struct Scsi_Host *instance = data; | ||
28 | struct a3000_hostdata *hdata = shost_priv(instance); | ||
29 | unsigned int status = hdata->regs->ISTR; | ||
33 | unsigned long flags; | 30 | unsigned long flags; |
34 | unsigned int status = DMA(a3000_host)->ISTR; | ||
35 | 31 | ||
36 | if (!(status & ISTR_INT_P)) | 32 | if (!(status & ISTR_INT_P)) |
37 | return IRQ_NONE; | 33 | return IRQ_NONE; |
38 | if (status & ISTR_INTS) { | 34 | if (status & ISTR_INTS) { |
39 | spin_lock_irqsave(a3000_host->host_lock, flags); | 35 | spin_lock_irqsave(instance->host_lock, flags); |
40 | wd33c93_intr(a3000_host); | 36 | wd33c93_intr(instance); |
41 | spin_unlock_irqrestore(a3000_host->host_lock, flags); | 37 | spin_unlock_irqrestore(instance->host_lock, flags); |
42 | return IRQ_HANDLED; | 38 | return IRQ_HANDLED; |
43 | } | 39 | } |
44 | printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); | 40 | pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); |
45 | return IRQ_NONE; | 41 | return IRQ_NONE; |
46 | } | 42 | } |
47 | 43 | ||
48 | static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | 44 | static int dma_setup(struct scsi_cmnd *cmd, int dir_in) |
49 | { | 45 | { |
50 | struct WD33C93_hostdata *hdata = shost_priv(a3000_host); | 46 | struct Scsi_Host *instance = cmd->device->host; |
47 | struct a3000_hostdata *hdata = shost_priv(instance); | ||
48 | struct WD33C93_hostdata *wh = &hdata->wh; | ||
49 | struct a3000_scsiregs *regs = hdata->regs; | ||
51 | unsigned short cntr = CNTR_PDMD | CNTR_INTEN; | 50 | unsigned short cntr = CNTR_PDMD | CNTR_INTEN; |
52 | unsigned long addr = virt_to_bus(cmd->SCp.ptr); | 51 | unsigned long addr = virt_to_bus(cmd->SCp.ptr); |
53 | 52 | ||
@@ -58,23 +57,23 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
58 | * buffer | 57 | * buffer |
59 | */ | 58 | */ |
60 | if (addr & A3000_XFER_MASK) { | 59 | if (addr & A3000_XFER_MASK) { |
61 | hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; | 60 | wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; |
62 | hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, | 61 | wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, |
63 | GFP_KERNEL); | 62 | GFP_KERNEL); |
64 | 63 | ||
65 | /* can't allocate memory; use PIO */ | 64 | /* can't allocate memory; use PIO */ |
66 | if (!hdata->dma_bounce_buffer) { | 65 | if (!wh->dma_bounce_buffer) { |
67 | hdata->dma_bounce_len = 0; | 66 | wh->dma_bounce_len = 0; |
68 | return 1; | 67 | return 1; |
69 | } | 68 | } |
70 | 69 | ||
71 | if (!dir_in) { | 70 | if (!dir_in) { |
72 | /* copy to bounce buffer for a write */ | 71 | /* copy to bounce buffer for a write */ |
73 | memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, | 72 | memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, |
74 | cmd->SCp.this_residual); | 73 | cmd->SCp.this_residual); |
75 | } | 74 | } |
76 | 75 | ||
77 | addr = virt_to_bus(hdata->dma_bounce_buffer); | 76 | addr = virt_to_bus(wh->dma_bounce_buffer); |
78 | } | 77 | } |
79 | 78 | ||
80 | /* setup dma direction */ | 79 | /* setup dma direction */ |
@@ -82,12 +81,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
82 | cntr |= CNTR_DDIR; | 81 | cntr |= CNTR_DDIR; |
83 | 82 | ||
84 | /* remember direction */ | 83 | /* remember direction */ |
85 | hdata->dma_dir = dir_in; | 84 | wh->dma_dir = dir_in; |
86 | 85 | ||
87 | DMA(a3000_host)->CNTR = cntr; | 86 | regs->CNTR = cntr; |
88 | 87 | ||
89 | /* setup DMA *physical* address */ | 88 | /* setup DMA *physical* address */ |
90 | DMA(a3000_host)->ACR = addr; | 89 | regs->ACR = addr; |
91 | 90 | ||
92 | if (dir_in) { | 91 | if (dir_in) { |
93 | /* invalidate any cache */ | 92 | /* invalidate any cache */ |
@@ -99,7 +98,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
99 | 98 | ||
100 | /* start DMA */ | 99 | /* start DMA */ |
101 | mb(); /* make sure setup is completed */ | 100 | mb(); /* make sure setup is completed */ |
102 | DMA(a3000_host)->ST_DMA = 1; | 101 | regs->ST_DMA = 1; |
103 | mb(); /* make sure DMA has started before next IO */ | 102 | mb(); /* make sure DMA has started before next IO */ |
104 | 103 | ||
105 | /* return success */ | 104 | /* return success */ |
@@ -109,22 +108,24 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
109 | static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, | 108 | static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, |
110 | int status) | 109 | int status) |
111 | { | 110 | { |
112 | struct WD33C93_hostdata *hdata = shost_priv(instance); | 111 | struct a3000_hostdata *hdata = shost_priv(instance); |
112 | struct WD33C93_hostdata *wh = &hdata->wh; | ||
113 | struct a3000_scsiregs *regs = hdata->regs; | ||
113 | 114 | ||
114 | /* disable SCSI interrupts */ | 115 | /* disable SCSI interrupts */ |
115 | unsigned short cntr = CNTR_PDMD; | 116 | unsigned short cntr = CNTR_PDMD; |
116 | 117 | ||
117 | if (!hdata->dma_dir) | 118 | if (!wh->dma_dir) |
118 | cntr |= CNTR_DDIR; | 119 | cntr |= CNTR_DDIR; |
119 | 120 | ||
120 | DMA(instance)->CNTR = cntr; | 121 | regs->CNTR = cntr; |
121 | mb(); /* make sure CNTR is updated before next IO */ | 122 | mb(); /* make sure CNTR is updated before next IO */ |
122 | 123 | ||
123 | /* flush if we were reading */ | 124 | /* flush if we were reading */ |
124 | if (hdata->dma_dir) { | 125 | if (wh->dma_dir) { |
125 | DMA(instance)->FLUSH = 1; | 126 | regs->FLUSH = 1; |
126 | mb(); /* don't allow prefetch */ | 127 | mb(); /* don't allow prefetch */ |
127 | while (!(DMA(instance)->ISTR & ISTR_FE_FLG)) | 128 | while (!(regs->ISTR & ISTR_FE_FLG)) |
128 | barrier(); | 129 | barrier(); |
129 | mb(); /* no IO until FLUSH is done */ | 130 | mb(); /* no IO until FLUSH is done */ |
130 | } | 131 | } |
@@ -133,96 +134,54 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, | |||
133 | /* I think that this CINT is only necessary if you are | 134 | /* I think that this CINT is only necessary if you are |
134 | * using the terminal count features. HM 7 Mar 1994 | 135 | * using the terminal count features. HM 7 Mar 1994 |
135 | */ | 136 | */ |
136 | DMA(instance)->CINT = 1; | 137 | regs->CINT = 1; |
137 | 138 | ||
138 | /* stop DMA */ | 139 | /* stop DMA */ |
139 | DMA(instance)->SP_DMA = 1; | 140 | regs->SP_DMA = 1; |
140 | mb(); /* make sure DMA is stopped before next IO */ | 141 | mb(); /* make sure DMA is stopped before next IO */ |
141 | 142 | ||
142 | /* restore the CONTROL bits (minus the direction flag) */ | 143 | /* restore the CONTROL bits (minus the direction flag) */ |
143 | DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; | 144 | regs->CNTR = CNTR_PDMD | CNTR_INTEN; |
144 | mb(); /* make sure CNTR is updated before next IO */ | 145 | mb(); /* make sure CNTR is updated before next IO */ |
145 | 146 | ||
146 | /* copy from a bounce buffer, if necessary */ | 147 | /* copy from a bounce buffer, if necessary */ |
147 | if (status && hdata->dma_bounce_buffer) { | 148 | if (status && wh->dma_bounce_buffer) { |
148 | if (SCpnt) { | 149 | if (SCpnt) { |
149 | if (hdata->dma_dir && SCpnt) | 150 | if (wh->dma_dir && SCpnt) |
150 | memcpy(SCpnt->SCp.ptr, | 151 | memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, |
151 | hdata->dma_bounce_buffer, | ||
152 | SCpnt->SCp.this_residual); | 152 | SCpnt->SCp.this_residual); |
153 | kfree(hdata->dma_bounce_buffer); | 153 | kfree(wh->dma_bounce_buffer); |
154 | hdata->dma_bounce_buffer = NULL; | 154 | wh->dma_bounce_buffer = NULL; |
155 | hdata->dma_bounce_len = 0; | 155 | wh->dma_bounce_len = 0; |
156 | } else { | 156 | } else { |
157 | kfree(hdata->dma_bounce_buffer); | 157 | kfree(wh->dma_bounce_buffer); |
158 | hdata->dma_bounce_buffer = NULL; | 158 | wh->dma_bounce_buffer = NULL; |
159 | hdata->dma_bounce_len = 0; | 159 | wh->dma_bounce_len = 0; |
160 | } | 160 | } |
161 | } | 161 | } |
162 | } | 162 | } |
163 | 163 | ||
164 | static int __init a3000_detect(struct scsi_host_template *tpnt) | ||
165 | { | ||
166 | wd33c93_regs regs; | ||
167 | struct WD33C93_hostdata *hdata; | ||
168 | |||
169 | if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI)) | ||
170 | return 0; | ||
171 | if (!request_mem_region(0xDD0000, 256, "wd33c93")) | ||
172 | return 0; | ||
173 | |||
174 | tpnt->proc_name = "A3000"; | ||
175 | tpnt->proc_info = &wd33c93_proc_info; | ||
176 | |||
177 | a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); | ||
178 | if (a3000_host == NULL) | ||
179 | goto fail_register; | ||
180 | |||
181 | a3000_host->base = ZTWO_VADDR(0xDD0000); | ||
182 | a3000_host->irq = IRQ_AMIGA_PORTS; | ||
183 | DMA(a3000_host)->DAWR = DAWR_A3000; | ||
184 | regs.SASR = &(DMA(a3000_host)->SASR); | ||
185 | regs.SCMD = &(DMA(a3000_host)->SCMD); | ||
186 | hdata = shost_priv(a3000_host); | ||
187 | hdata->no_sync = 0xff; | ||
188 | hdata->fast = 0; | ||
189 | hdata->dma_mode = CTRL_DMA; | ||
190 | wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15); | ||
191 | if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI", | ||
192 | a3000_intr)) | ||
193 | goto fail_irq; | ||
194 | DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN; | ||
195 | |||
196 | return 1; | ||
197 | |||
198 | fail_irq: | ||
199 | scsi_unregister(a3000_host); | ||
200 | fail_register: | ||
201 | release_mem_region(0xDD0000, 256); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static int a3000_bus_reset(struct scsi_cmnd *cmd) | 164 | static int a3000_bus_reset(struct scsi_cmnd *cmd) |
206 | { | 165 | { |
166 | struct Scsi_Host *instance = cmd->device->host; | ||
167 | |||
207 | /* FIXME perform bus-specific reset */ | 168 | /* FIXME perform bus-specific reset */ |
208 | 169 | ||
209 | /* FIXME 2: kill this entire function, which should | 170 | /* FIXME 2: kill this entire function, which should |
210 | cause mid-layer to call wd33c93_host_reset anyway? */ | 171 | cause mid-layer to call wd33c93_host_reset anyway? */ |
211 | 172 | ||
212 | spin_lock_irq(cmd->device->host->host_lock); | 173 | spin_lock_irq(instance->host_lock); |
213 | wd33c93_host_reset(cmd); | 174 | wd33c93_host_reset(cmd); |
214 | spin_unlock_irq(cmd->device->host->host_lock); | 175 | spin_unlock_irq(instance->host_lock); |
215 | 176 | ||
216 | return SUCCESS; | 177 | return SUCCESS; |
217 | } | 178 | } |
218 | 179 | ||
219 | #define HOSTS_C | 180 | static struct scsi_host_template amiga_a3000_scsi_template = { |
220 | 181 | .module = THIS_MODULE, | |
221 | static struct scsi_host_template driver_template = { | ||
222 | .proc_name = "A3000", | ||
223 | .name = "Amiga 3000 built-in SCSI", | 182 | .name = "Amiga 3000 built-in SCSI", |
224 | .detect = a3000_detect, | 183 | .proc_info = wd33c93_proc_info, |
225 | .release = a3000_release, | 184 | .proc_name = "A3000", |
226 | .queuecommand = wd33c93_queuecommand, | 185 | .queuecommand = wd33c93_queuecommand, |
227 | .eh_abort_handler = wd33c93_abort, | 186 | .eh_abort_handler = wd33c93_abort, |
228 | .eh_bus_reset_handler = a3000_bus_reset, | 187 | .eh_bus_reset_handler = a3000_bus_reset, |
@@ -234,15 +193,104 @@ static struct scsi_host_template driver_template = { | |||
234 | .use_clustering = ENABLE_CLUSTERING | 193 | .use_clustering = ENABLE_CLUSTERING |
235 | }; | 194 | }; |
236 | 195 | ||
196 | static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) | ||
197 | { | ||
198 | struct resource *res; | ||
199 | struct Scsi_Host *instance; | ||
200 | int error; | ||
201 | struct a3000_scsiregs *regs; | ||
202 | wd33c93_regs wdregs; | ||
203 | struct a3000_hostdata *hdata; | ||
204 | |||
205 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
206 | if (!res) | ||
207 | return -ENODEV; | ||
208 | |||
209 | if (!request_mem_region(res->start, resource_size(res), "wd33c93")) | ||
210 | return -EBUSY; | ||
211 | |||
212 | instance = scsi_host_alloc(&amiga_a3000_scsi_template, | ||
213 | sizeof(struct a3000_hostdata)); | ||
214 | if (!instance) { | ||
215 | error = -ENOMEM; | ||
216 | goto fail_alloc; | ||
217 | } | ||
218 | |||
219 | instance->irq = IRQ_AMIGA_PORTS; | ||
237 | 220 | ||
238 | #include "scsi_module.c" | 221 | regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start); |
222 | regs->DAWR = DAWR_A3000; | ||
223 | |||
224 | wdregs.SASR = ®s->SASR; | ||
225 | wdregs.SCMD = ®s->SCMD; | ||
226 | |||
227 | hdata = shost_priv(instance); | ||
228 | hdata->wh.no_sync = 0xff; | ||
229 | hdata->wh.fast = 0; | ||
230 | hdata->wh.dma_mode = CTRL_DMA; | ||
231 | hdata->regs = regs; | ||
232 | |||
233 | wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15); | ||
234 | error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, | ||
235 | "A3000 SCSI", instance); | ||
236 | if (error) | ||
237 | goto fail_irq; | ||
238 | |||
239 | regs->CNTR = CNTR_PDMD | CNTR_INTEN; | ||
240 | |||
241 | error = scsi_add_host(instance, NULL); | ||
242 | if (error) | ||
243 | goto fail_host; | ||
244 | |||
245 | platform_set_drvdata(pdev, instance); | ||
246 | |||
247 | scsi_scan_host(instance); | ||
248 | return 0; | ||
249 | |||
250 | fail_host: | ||
251 | free_irq(IRQ_AMIGA_PORTS, instance); | ||
252 | fail_irq: | ||
253 | scsi_host_put(instance); | ||
254 | fail_alloc: | ||
255 | release_mem_region(res->start, resource_size(res)); | ||
256 | return error; | ||
257 | } | ||
258 | |||
259 | static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev) | ||
260 | { | ||
261 | struct Scsi_Host *instance = platform_get_drvdata(pdev); | ||
262 | struct a3000_hostdata *hdata = shost_priv(instance); | ||
263 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
264 | |||
265 | hdata->regs->CNTR = 0; | ||
266 | scsi_remove_host(instance); | ||
267 | free_irq(IRQ_AMIGA_PORTS, instance); | ||
268 | scsi_host_put(instance); | ||
269 | release_mem_region(res->start, resource_size(res)); | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | static struct platform_driver amiga_a3000_scsi_driver = { | ||
274 | .remove = __exit_p(amiga_a3000_scsi_remove), | ||
275 | .driver = { | ||
276 | .name = "amiga-a3000-scsi", | ||
277 | .owner = THIS_MODULE, | ||
278 | }, | ||
279 | }; | ||
280 | |||
281 | static int __init amiga_a3000_scsi_init(void) | ||
282 | { | ||
283 | return platform_driver_probe(&amiga_a3000_scsi_driver, | ||
284 | amiga_a3000_scsi_probe); | ||
285 | } | ||
286 | module_init(amiga_a3000_scsi_init); | ||
239 | 287 | ||
240 | static int a3000_release(struct Scsi_Host *instance) | 288 | static void __exit amiga_a3000_scsi_exit(void) |
241 | { | 289 | { |
242 | DMA(instance)->CNTR = 0; | 290 | platform_driver_unregister(&amiga_a3000_scsi_driver); |
243 | release_mem_region(0xDD0000, 256); | ||
244 | free_irq(IRQ_AMIGA_PORTS, a3000_intr); | ||
245 | return 1; | ||
246 | } | 291 | } |
292 | module_exit(amiga_a3000_scsi_exit); | ||
247 | 293 | ||
294 | MODULE_DESCRIPTION("Amiga 3000 built-in SCSI"); | ||
248 | MODULE_LICENSE("GPL"); | 295 | MODULE_LICENSE("GPL"); |
296 | MODULE_ALIAS("platform:amiga-a3000-scsi"); | ||
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h index 684813ee378c..49db4a335aab 100644 --- a/drivers/scsi/a3000.h +++ b/drivers/scsi/a3000.h | |||
@@ -25,7 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | #define A3000_XFER_MASK (0x00000003) | 26 | #define A3000_XFER_MASK (0x00000003) |
27 | 27 | ||
28 | typedef struct { | 28 | struct a3000_scsiregs { |
29 | unsigned char pad1[2]; | 29 | unsigned char pad1[2]; |
30 | volatile unsigned short DAWR; | 30 | volatile unsigned short DAWR; |
31 | volatile unsigned int WTC; | 31 | volatile unsigned int WTC; |
@@ -46,7 +46,7 @@ typedef struct { | |||
46 | volatile unsigned char SASR; | 46 | volatile unsigned char SASR; |
47 | unsigned char pad9; | 47 | unsigned char pad9; |
48 | volatile unsigned char SCMD; | 48 | volatile unsigned char SCMD; |
49 | } a3000_scsiregs; | 49 | }; |
50 | 50 | ||
51 | #define DAWR_A3000 (3) | 51 | #define DAWR_A3000 (3) |
52 | 52 | ||
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index 11ae6be8aeaf..23c76f41883c 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c | |||
@@ -20,10 +20,6 @@ | |||
20 | 20 | ||
21 | #include "53c700.h" | 21 | #include "53c700.h" |
22 | 22 | ||
23 | MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>"); | ||
24 | MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver"); | ||
25 | MODULE_LICENSE("GPL"); | ||
26 | |||
27 | 23 | ||
28 | static struct scsi_host_template a4000t_scsi_driver_template = { | 24 | static struct scsi_host_template a4000t_scsi_driver_template = { |
29 | .name = "A4000T builtin SCSI", | 25 | .name = "A4000T builtin SCSI", |
@@ -32,30 +28,35 @@ static struct scsi_host_template a4000t_scsi_driver_template = { | |||
32 | .module = THIS_MODULE, | 28 | .module = THIS_MODULE, |
33 | }; | 29 | }; |
34 | 30 | ||
35 | static struct platform_device *a4000t_scsi_device; | ||
36 | 31 | ||
37 | #define A4000T_SCSI_ADDR 0xdd0040 | 32 | #define A4000T_SCSI_OFFSET 0x40 |
38 | 33 | ||
39 | static int __devinit a4000t_probe(struct platform_device *dev) | 34 | static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev) |
40 | { | 35 | { |
41 | struct Scsi_Host *host; | 36 | struct resource *res; |
37 | phys_addr_t scsi_addr; | ||
42 | struct NCR_700_Host_Parameters *hostdata; | 38 | struct NCR_700_Host_Parameters *hostdata; |
39 | struct Scsi_Host *host; | ||
43 | 40 | ||
44 | if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI))) | 41 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
45 | goto out; | 42 | if (!res) |
43 | return -ENODEV; | ||
46 | 44 | ||
47 | if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000, | 45 | if (!request_mem_region(res->start, resource_size(res), |
48 | "A4000T builtin SCSI")) | 46 | "A4000T builtin SCSI")) |
49 | goto out; | 47 | return -EBUSY; |
50 | 48 | ||
51 | hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); | 49 | hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), |
50 | GFP_KERNEL); | ||
52 | if (!hostdata) { | 51 | if (!hostdata) { |
53 | printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n"); | 52 | dev_err(&pdev->dev, "Failed to allocate host data\n"); |
54 | goto out_release; | 53 | goto out_release; |
55 | } | 54 | } |
56 | 55 | ||
56 | scsi_addr = res->start + A4000T_SCSI_OFFSET; | ||
57 | |||
57 | /* Fill in the required pieces of hostdata */ | 58 | /* Fill in the required pieces of hostdata */ |
58 | hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR); | 59 | hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr); |
59 | hostdata->clock = 50; | 60 | hostdata->clock = 50; |
60 | hostdata->chip710 = 1; | 61 | hostdata->chip710 = 1; |
61 | hostdata->dmode_extra = DMODE_FC2; | 62 | hostdata->dmode_extra = DMODE_FC2; |
@@ -63,26 +64,25 @@ static int __devinit a4000t_probe(struct platform_device *dev) | |||
63 | 64 | ||
64 | /* and register the chip */ | 65 | /* and register the chip */ |
65 | host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, | 66 | host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, |
66 | &dev->dev); | 67 | &pdev->dev); |
67 | if (!host) { | 68 | if (!host) { |
68 | printk(KERN_ERR "a4000t-scsi: No host detected; " | 69 | dev_err(&pdev->dev, |
69 | "board configuration problem?\n"); | 70 | "No host detected; board configuration problem?\n"); |
70 | goto out_free; | 71 | goto out_free; |
71 | } | 72 | } |
72 | 73 | ||
73 | host->this_id = 7; | 74 | host->this_id = 7; |
74 | host->base = A4000T_SCSI_ADDR; | 75 | host->base = scsi_addr; |
75 | host->irq = IRQ_AMIGA_PORTS; | 76 | host->irq = IRQ_AMIGA_PORTS; |
76 | 77 | ||
77 | if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", | 78 | if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", |
78 | host)) { | 79 | host)) { |
79 | printk(KERN_ERR "a4000t-scsi: request_irq failed\n"); | 80 | dev_err(&pdev->dev, "request_irq failed\n"); |
80 | goto out_put_host; | 81 | goto out_put_host; |
81 | } | 82 | } |
82 | 83 | ||
83 | platform_set_drvdata(dev, host); | 84 | platform_set_drvdata(pdev, host); |
84 | scsi_scan_host(host); | 85 | scsi_scan_host(host); |
85 | |||
86 | return 0; | 86 | return 0; |
87 | 87 | ||
88 | out_put_host: | 88 | out_put_host: |
@@ -90,58 +90,49 @@ static int __devinit a4000t_probe(struct platform_device *dev) | |||
90 | out_free: | 90 | out_free: |
91 | kfree(hostdata); | 91 | kfree(hostdata); |
92 | out_release: | 92 | out_release: |
93 | release_mem_region(A4000T_SCSI_ADDR, 0x1000); | 93 | release_mem_region(res->start, resource_size(res)); |
94 | out: | ||
95 | return -ENODEV; | 94 | return -ENODEV; |
96 | } | 95 | } |
97 | 96 | ||
98 | static __devexit int a4000t_device_remove(struct platform_device *dev) | 97 | static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev) |
99 | { | 98 | { |
100 | struct Scsi_Host *host = platform_get_drvdata(dev); | 99 | struct Scsi_Host *host = platform_get_drvdata(pdev); |
101 | struct NCR_700_Host_Parameters *hostdata = shost_priv(host); | 100 | struct NCR_700_Host_Parameters *hostdata = shost_priv(host); |
101 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
102 | 102 | ||
103 | scsi_remove_host(host); | 103 | scsi_remove_host(host); |
104 | |||
105 | NCR_700_release(host); | 104 | NCR_700_release(host); |
106 | kfree(hostdata); | 105 | kfree(hostdata); |
107 | free_irq(host->irq, host); | 106 | free_irq(host->irq, host); |
108 | release_mem_region(A4000T_SCSI_ADDR, 0x1000); | 107 | release_mem_region(res->start, resource_size(res)); |
109 | |||
110 | return 0; | 108 | return 0; |
111 | } | 109 | } |
112 | 110 | ||
113 | static struct platform_driver a4000t_scsi_driver = { | 111 | static struct platform_driver amiga_a4000t_scsi_driver = { |
114 | .driver = { | 112 | .remove = __exit_p(amiga_a4000t_scsi_remove), |
115 | .name = "a4000t-scsi", | 113 | .driver = { |
116 | .owner = THIS_MODULE, | 114 | .name = "amiga-a4000t-scsi", |
115 | .owner = THIS_MODULE, | ||
117 | }, | 116 | }, |
118 | .probe = a4000t_probe, | ||
119 | .remove = __devexit_p(a4000t_device_remove), | ||
120 | }; | 117 | }; |
121 | 118 | ||
122 | static int __init a4000t_scsi_init(void) | 119 | static int __init amiga_a4000t_scsi_init(void) |
123 | { | 120 | { |
124 | int err; | 121 | return platform_driver_probe(&amiga_a4000t_scsi_driver, |
125 | 122 | amiga_a4000t_scsi_probe); | |
126 | err = platform_driver_register(&a4000t_scsi_driver); | ||
127 | if (err) | ||
128 | return err; | ||
129 | |||
130 | a4000t_scsi_device = platform_device_register_simple("a4000t-scsi", | ||
131 | -1, NULL, 0); | ||
132 | if (IS_ERR(a4000t_scsi_device)) { | ||
133 | platform_driver_unregister(&a4000t_scsi_driver); | ||
134 | return PTR_ERR(a4000t_scsi_device); | ||
135 | } | ||
136 | |||
137 | return err; | ||
138 | } | 123 | } |
139 | 124 | ||
140 | static void __exit a4000t_scsi_exit(void) | 125 | module_init(amiga_a4000t_scsi_init); |
126 | |||
127 | static void __exit amiga_a4000t_scsi_exit(void) | ||
141 | { | 128 | { |
142 | platform_device_unregister(a4000t_scsi_device); | 129 | platform_driver_unregister(&amiga_a4000t_scsi_driver); |
143 | platform_driver_unregister(&a4000t_scsi_driver); | ||
144 | } | 130 | } |
145 | 131 | ||
146 | module_init(a4000t_scsi_init); | 132 | module_exit(amiga_a4000t_scsi_exit); |
147 | module_exit(a4000t_scsi_exit); | 133 | |
134 | MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / " | ||
135 | "Kars de Jong <jongk@linux-m68k.org>"); | ||
136 | MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver"); | ||
137 | MODULE_LICENSE("GPL"); | ||
138 | MODULE_ALIAS("platform:amiga-a4000t-scsi"); | ||
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 9c0c91178538..1a5bf5724750 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
655 | /* Does this really need to be GFP_DMA? */ | 655 | /* Does this really need to be GFP_DMA? */ |
656 | p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); | 656 | p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); |
657 | if(!p) { | 657 | if(!p) { |
658 | kfree (usg); | 658 | dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
659 | dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | ||
660 | usg->sg[i].count,i,usg->count)); | 659 | usg->sg[i].count,i,usg->count)); |
660 | kfree(usg); | ||
661 | rcode = -ENOMEM; | 661 | rcode = -ENOMEM; |
662 | goto cleanup; | 662 | goto cleanup; |
663 | } | 663 | } |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index e9373a2d14fa..33898b61fdb5 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -705,12 +705,17 @@ static int aac_cfg_open(struct inode *inode, struct file *file) | |||
705 | * Bugs: Needs to handle hot plugging | 705 | * Bugs: Needs to handle hot plugging |
706 | */ | 706 | */ |
707 | 707 | ||
708 | static int aac_cfg_ioctl(struct inode *inode, struct file *file, | 708 | static long aac_cfg_ioctl(struct file *file, |
709 | unsigned int cmd, unsigned long arg) | 709 | unsigned int cmd, unsigned long arg) |
710 | { | 710 | { |
711 | int ret; | ||
711 | if (!capable(CAP_SYS_RAWIO)) | 712 | if (!capable(CAP_SYS_RAWIO)) |
712 | return -EPERM; | 713 | return -EPERM; |
713 | return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); | 714 | lock_kernel(); |
715 | ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg); | ||
716 | unlock_kernel(); | ||
717 | |||
718 | return ret; | ||
714 | } | 719 | } |
715 | 720 | ||
716 | #ifdef CONFIG_COMPAT | 721 | #ifdef CONFIG_COMPAT |
@@ -1029,7 +1034,7 @@ ssize_t aac_get_serial_number(struct device *device, char *buf) | |||
1029 | 1034 | ||
1030 | static const struct file_operations aac_cfg_fops = { | 1035 | static const struct file_operations aac_cfg_fops = { |
1031 | .owner = THIS_MODULE, | 1036 | .owner = THIS_MODULE, |
1032 | .ioctl = aac_cfg_ioctl, | 1037 | .unlocked_ioctl = aac_cfg_ioctl, |
1033 | #ifdef CONFIG_COMPAT | 1038 | #ifdef CONFIG_COMPAT |
1034 | .compat_ioctl = aac_compat_cfg_ioctl, | 1039 | .compat_ioctl = aac_compat_cfg_ioctl, |
1035 | #endif | 1040 | #endif |
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index ab646e580d64..ce5371b3cdd5 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h | |||
@@ -48,7 +48,7 @@ struct device_attribute; | |||
48 | /*The limit of outstanding scsi command that firmware can handle*/ | 48 | /*The limit of outstanding scsi command that firmware can handle*/ |
49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 | 49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 |
50 | #define ARCMSR_MAX_FREECCB_NUM 320 | 50 | #define ARCMSR_MAX_FREECCB_NUM 320 |
51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" | 51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03" |
52 | #define ARCMSR_SCSI_INITIATOR_ID 255 | 52 | #define ARCMSR_SCSI_INITIATOR_ID 255 |
53 | #define ARCMSR_MAX_XFER_SECTORS 512 | 53 | #define ARCMSR_MAX_XFER_SECTORS 512 |
54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 | 54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 |
@@ -110,6 +110,8 @@ struct CMD_MESSAGE_FIELD | |||
110 | #define FUNCTION_SAY_HELLO 0x0807 | 110 | #define FUNCTION_SAY_HELLO 0x0807 |
111 | #define FUNCTION_SAY_GOODBYE 0x0808 | 111 | #define FUNCTION_SAY_GOODBYE 0x0808 |
112 | #define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 | 112 | #define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 |
113 | #define FUNCTION_GET_FIRMWARE_STATUS 0x080A | ||
114 | #define FUNCTION_HARDWARE_RESET 0x080B | ||
113 | /* ARECA IO CONTROL CODE*/ | 115 | /* ARECA IO CONTROL CODE*/ |
114 | #define ARCMSR_MESSAGE_READ_RQBUFFER \ | 116 | #define ARCMSR_MESSAGE_READ_RQBUFFER \ |
115 | ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER | 117 | ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER |
@@ -133,6 +135,7 @@ struct CMD_MESSAGE_FIELD | |||
133 | #define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 | 135 | #define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 |
134 | #define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 | 136 | #define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 |
135 | #define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F | 137 | #define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F |
138 | #define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088 | ||
136 | /* | 139 | /* |
137 | ************************************************************* | 140 | ************************************************************* |
138 | ** structure for holding DMA address data | 141 | ** structure for holding DMA address data |
@@ -341,13 +344,13 @@ struct MessageUnit_B | |||
341 | uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; | 344 | uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; |
342 | uint32_t postq_index; | 345 | uint32_t postq_index; |
343 | uint32_t doneq_index; | 346 | uint32_t doneq_index; |
344 | void __iomem *drv2iop_doorbell_reg; | 347 | uint32_t __iomem *drv2iop_doorbell_reg; |
345 | void __iomem *drv2iop_doorbell_mask_reg; | 348 | uint32_t __iomem *drv2iop_doorbell_mask_reg; |
346 | void __iomem *iop2drv_doorbell_reg; | 349 | uint32_t __iomem *iop2drv_doorbell_reg; |
347 | void __iomem *iop2drv_doorbell_mask_reg; | 350 | uint32_t __iomem *iop2drv_doorbell_mask_reg; |
348 | void __iomem *msgcode_rwbuffer_reg; | 351 | uint32_t __iomem *msgcode_rwbuffer_reg; |
349 | void __iomem *ioctl_wbuffer_reg; | 352 | uint32_t __iomem *ioctl_wbuffer_reg; |
350 | void __iomem *ioctl_rbuffer_reg; | 353 | uint32_t __iomem *ioctl_rbuffer_reg; |
351 | }; | 354 | }; |
352 | 355 | ||
353 | /* | 356 | /* |
@@ -375,6 +378,7 @@ struct AdapterControlBlock | |||
375 | /* message unit ATU inbound base address0 */ | 378 | /* message unit ATU inbound base address0 */ |
376 | 379 | ||
377 | uint32_t acb_flags; | 380 | uint32_t acb_flags; |
381 | uint8_t adapter_index; | ||
378 | #define ACB_F_SCSISTOPADAPTER 0x0001 | 382 | #define ACB_F_SCSISTOPADAPTER 0x0001 |
379 | #define ACB_F_MSG_STOP_BGRB 0x0002 | 383 | #define ACB_F_MSG_STOP_BGRB 0x0002 |
380 | /* stop RAID background rebuild */ | 384 | /* stop RAID background rebuild */ |
@@ -390,7 +394,7 @@ struct AdapterControlBlock | |||
390 | #define ACB_F_BUS_RESET 0x0080 | 394 | #define ACB_F_BUS_RESET 0x0080 |
391 | #define ACB_F_IOP_INITED 0x0100 | 395 | #define ACB_F_IOP_INITED 0x0100 |
392 | /* iop init */ | 396 | /* iop init */ |
393 | 397 | #define ACB_F_FIRMWARE_TRAP 0x0400 | |
394 | struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; | 398 | struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; |
395 | /* used for memory free */ | 399 | /* used for memory free */ |
396 | struct list_head ccb_free_list; | 400 | struct list_head ccb_free_list; |
@@ -423,12 +427,19 @@ struct AdapterControlBlock | |||
423 | #define ARECA_RAID_GOOD 0xaa | 427 | #define ARECA_RAID_GOOD 0xaa |
424 | uint32_t num_resets; | 428 | uint32_t num_resets; |
425 | uint32_t num_aborts; | 429 | uint32_t num_aborts; |
430 | uint32_t signature; | ||
426 | uint32_t firm_request_len; | 431 | uint32_t firm_request_len; |
427 | uint32_t firm_numbers_queue; | 432 | uint32_t firm_numbers_queue; |
428 | uint32_t firm_sdram_size; | 433 | uint32_t firm_sdram_size; |
429 | uint32_t firm_hd_channels; | 434 | uint32_t firm_hd_channels; |
430 | char firm_model[12]; | 435 | char firm_model[12]; |
431 | char firm_version[20]; | 436 | char firm_version[20]; |
437 | char device_map[20]; /*21,84-99*/ | ||
438 | struct work_struct arcmsr_do_message_isr_bh; | ||
439 | struct timer_list eternal_timer; | ||
440 | unsigned short fw_state; | ||
441 | atomic_t rq_map_token; | ||
442 | int ante_token_value; | ||
432 | };/* HW_DEVICE_EXTENSION */ | 443 | };/* HW_DEVICE_EXTENSION */ |
433 | /* | 444 | /* |
434 | ******************************************************************************* | 445 | ******************************************************************************* |
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index a4e04c50c436..07fdfe57e38e 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c | |||
@@ -192,6 +192,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = { | |||
192 | .attr = { | 192 | .attr = { |
193 | .name = "mu_read", | 193 | .name = "mu_read", |
194 | .mode = S_IRUSR , | 194 | .mode = S_IRUSR , |
195 | .owner = THIS_MODULE, | ||
195 | }, | 196 | }, |
196 | .size = 1032, | 197 | .size = 1032, |
197 | .read = arcmsr_sysfs_iop_message_read, | 198 | .read = arcmsr_sysfs_iop_message_read, |
@@ -201,6 +202,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = { | |||
201 | .attr = { | 202 | .attr = { |
202 | .name = "mu_write", | 203 | .name = "mu_write", |
203 | .mode = S_IWUSR, | 204 | .mode = S_IWUSR, |
205 | .owner = THIS_MODULE, | ||
204 | }, | 206 | }, |
205 | .size = 1032, | 207 | .size = 1032, |
206 | .write = arcmsr_sysfs_iop_message_write, | 208 | .write = arcmsr_sysfs_iop_message_write, |
@@ -210,6 +212,7 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = { | |||
210 | .attr = { | 212 | .attr = { |
211 | .name = "mu_clear", | 213 | .name = "mu_clear", |
212 | .mode = S_IWUSR, | 214 | .mode = S_IWUSR, |
215 | .owner = THIS_MODULE, | ||
213 | }, | 216 | }, |
214 | .size = 1, | 217 | .size = 1, |
215 | .write = arcmsr_sysfs_iop_message_clear, | 218 | .write = arcmsr_sysfs_iop_message_clear, |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index ffbe2192da3c..ffa54792bb33 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -72,8 +72,16 @@ | |||
72 | #include <scsi/scsicam.h> | 72 | #include <scsi/scsicam.h> |
73 | #include "arcmsr.h" | 73 | #include "arcmsr.h" |
74 | 74 | ||
75 | #ifdef CONFIG_SCSI_ARCMSR_RESET | ||
76 | static int sleeptime = 20; | ||
77 | static int retrycount = 12; | ||
78 | module_param(sleeptime, int, S_IRUGO|S_IWUSR); | ||
79 | MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset"); | ||
80 | module_param(retrycount, int, S_IRUGO|S_IWUSR); | ||
81 | MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset"); | ||
82 | #endif | ||
75 | MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); | 83 | MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); |
76 | MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter"); | 84 | MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter"); |
77 | MODULE_LICENSE("Dual BSD/GPL"); | 85 | MODULE_LICENSE("Dual BSD/GPL"); |
78 | MODULE_VERSION(ARCMSR_DRIVER_VERSION); | 86 | MODULE_VERSION(ARCMSR_DRIVER_VERSION); |
79 | 87 | ||
@@ -96,6 +104,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); | |||
96 | static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); | 104 | static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); |
97 | static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); | 105 | static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); |
98 | static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); | 106 | static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); |
107 | static void arcmsr_request_device_map(unsigned long pacb); | ||
108 | static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); | ||
109 | static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); | ||
110 | static void arcmsr_message_isr_bh_fn(struct work_struct *work); | ||
111 | static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode); | ||
112 | static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); | ||
113 | |||
99 | static const char *arcmsr_info(struct Scsi_Host *); | 114 | static const char *arcmsr_info(struct Scsi_Host *); |
100 | static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); | 115 | static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); |
101 | static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, | 116 | static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, |
@@ -112,7 +127,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, | |||
112 | 127 | ||
113 | static struct scsi_host_template arcmsr_scsi_host_template = { | 128 | static struct scsi_host_template arcmsr_scsi_host_template = { |
114 | .module = THIS_MODULE, | 129 | .module = THIS_MODULE, |
115 | .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter" | 130 | .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter" |
116 | ARCMSR_DRIVER_VERSION, | 131 | ARCMSR_DRIVER_VERSION, |
117 | .info = arcmsr_info, | 132 | .info = arcmsr_info, |
118 | .queuecommand = arcmsr_queue_command, | 133 | .queuecommand = arcmsr_queue_command, |
@@ -128,16 +143,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = { | |||
128 | .use_clustering = ENABLE_CLUSTERING, | 143 | .use_clustering = ENABLE_CLUSTERING, |
129 | .shost_attrs = arcmsr_host_attrs, | 144 | .shost_attrs = arcmsr_host_attrs, |
130 | }; | 145 | }; |
131 | #ifdef CONFIG_SCSI_ARCMSR_AER | ||
132 | static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev); | ||
133 | static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, | ||
134 | pci_channel_state_t state); | ||
135 | |||
136 | static struct pci_error_handlers arcmsr_pci_error_handlers = { | ||
137 | .error_detected = arcmsr_pci_error_detected, | ||
138 | .slot_reset = arcmsr_pci_slot_reset, | ||
139 | }; | ||
140 | #endif | ||
141 | static struct pci_device_id arcmsr_device_id_table[] = { | 146 | static struct pci_device_id arcmsr_device_id_table[] = { |
142 | {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, | 147 | {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, |
143 | {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, | 148 | {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, |
@@ -166,9 +171,6 @@ static struct pci_driver arcmsr_pci_driver = { | |||
166 | .probe = arcmsr_probe, | 171 | .probe = arcmsr_probe, |
167 | .remove = arcmsr_remove, | 172 | .remove = arcmsr_remove, |
168 | .shutdown = arcmsr_shutdown, | 173 | .shutdown = arcmsr_shutdown, |
169 | #ifdef CONFIG_SCSI_ARCMSR_AER | ||
170 | .err_handler = &arcmsr_pci_error_handlers, | ||
171 | #endif | ||
172 | }; | 174 | }; |
173 | 175 | ||
174 | static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) | 176 | static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) |
@@ -236,10 +238,9 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
236 | void *dma_coherent; | 238 | void *dma_coherent; |
237 | dma_addr_t dma_coherent_handle, dma_addr; | 239 | dma_addr_t dma_coherent_handle, dma_addr; |
238 | struct CommandControlBlock *ccb_tmp; | 240 | struct CommandControlBlock *ccb_tmp; |
239 | uint32_t intmask_org; | ||
240 | int i, j; | 241 | int i, j; |
241 | 242 | ||
242 | acb->pmuA = pci_ioremap_bar(pdev, 0); | 243 | acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); |
243 | if (!acb->pmuA) { | 244 | if (!acb->pmuA) { |
244 | printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", | 245 | printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", |
245 | acb->host->host_no); | 246 | acb->host->host_no); |
@@ -281,12 +282,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
281 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | 282 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) |
282 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) | 283 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) |
283 | acb->devstate[i][j] = ARECA_RAID_GONE; | 284 | acb->devstate[i][j] = ARECA_RAID_GONE; |
284 | |||
285 | /* | ||
286 | ** here we need to tell iop 331 our ccb_tmp.HighPart | ||
287 | ** if ccb_tmp.HighPart is not zero | ||
288 | */ | ||
289 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
290 | } | 285 | } |
291 | break; | 286 | break; |
292 | 287 | ||
@@ -297,7 +292,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
297 | void __iomem *mem_base0, *mem_base1; | 292 | void __iomem *mem_base0, *mem_base1; |
298 | void *dma_coherent; | 293 | void *dma_coherent; |
299 | dma_addr_t dma_coherent_handle, dma_addr; | 294 | dma_addr_t dma_coherent_handle, dma_addr; |
300 | uint32_t intmask_org; | ||
301 | struct CommandControlBlock *ccb_tmp; | 295 | struct CommandControlBlock *ccb_tmp; |
302 | int i, j; | 296 | int i, j; |
303 | 297 | ||
@@ -333,11 +327,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
333 | reg = (struct MessageUnit_B *)(dma_coherent + | 327 | reg = (struct MessageUnit_B *)(dma_coherent + |
334 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); | 328 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); |
335 | acb->pmuB = reg; | 329 | acb->pmuB = reg; |
336 | mem_base0 = pci_ioremap_bar(pdev, 0); | 330 | mem_base0 = ioremap(pci_resource_start(pdev, 0), |
331 | pci_resource_len(pdev, 0)); | ||
337 | if (!mem_base0) | 332 | if (!mem_base0) |
338 | goto out; | 333 | goto out; |
339 | 334 | ||
340 | mem_base1 = pci_ioremap_bar(pdev, 2); | 335 | mem_base1 = ioremap(pci_resource_start(pdev, 2), |
336 | pci_resource_len(pdev, 2)); | ||
341 | if (!mem_base1) { | 337 | if (!mem_base1) { |
342 | iounmap(mem_base0); | 338 | iounmap(mem_base0); |
343 | goto out; | 339 | goto out; |
@@ -357,12 +353,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
357 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | 353 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) |
358 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) | 354 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) |
359 | acb->devstate[i][j] = ARECA_RAID_GOOD; | 355 | acb->devstate[i][j] = ARECA_RAID_GOOD; |
360 | |||
361 | /* | ||
362 | ** here we need to tell iop 331 our ccb_tmp.HighPart | ||
363 | ** if ccb_tmp.HighPart is not zero | ||
364 | */ | ||
365 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
366 | } | 356 | } |
367 | break; | 357 | break; |
368 | } | 358 | } |
@@ -374,6 +364,88 @@ out: | |||
374 | sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); | 364 | sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); |
375 | return -ENOMEM; | 365 | return -ENOMEM; |
376 | } | 366 | } |
367 | static void arcmsr_message_isr_bh_fn(struct work_struct *work) | ||
368 | { | ||
369 | struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh); | ||
370 | |||
371 | switch (acb->adapter_type) { | ||
372 | case ACB_ADAPTER_TYPE_A: { | ||
373 | |||
374 | struct MessageUnit_A __iomem *reg = acb->pmuA; | ||
375 | char *acb_dev_map = (char *)acb->device_map; | ||
376 | uint32_t __iomem *signature = (uint32_t __iomem *) (®->message_rwbuffer[0]); | ||
377 | char __iomem *devicemap = (char __iomem *) (®->message_rwbuffer[21]); | ||
378 | int target, lun; | ||
379 | struct scsi_device *psdev; | ||
380 | char diff; | ||
381 | |||
382 | atomic_inc(&acb->rq_map_token); | ||
383 | if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { | ||
384 | for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { | ||
385 | diff = (*acb_dev_map)^readb(devicemap); | ||
386 | if (diff != 0) { | ||
387 | char temp; | ||
388 | *acb_dev_map = readb(devicemap); | ||
389 | temp = *acb_dev_map; | ||
390 | for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { | ||
391 | if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { | ||
392 | scsi_add_device(acb->host, 0, target, lun); | ||
393 | } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { | ||
394 | psdev = scsi_device_lookup(acb->host, 0, target, lun); | ||
395 | if (psdev != NULL) { | ||
396 | scsi_remove_device(psdev); | ||
397 | scsi_device_put(psdev); | ||
398 | } | ||
399 | } | ||
400 | temp >>= 1; | ||
401 | diff >>= 1; | ||
402 | } | ||
403 | } | ||
404 | devicemap++; | ||
405 | acb_dev_map++; | ||
406 | } | ||
407 | } | ||
408 | break; | ||
409 | } | ||
410 | |||
411 | case ACB_ADAPTER_TYPE_B: { | ||
412 | struct MessageUnit_B *reg = acb->pmuB; | ||
413 | char *acb_dev_map = (char *)acb->device_map; | ||
414 | uint32_t __iomem *signature = (uint32_t __iomem *)(®->msgcode_rwbuffer_reg[0]); | ||
415 | char __iomem *devicemap = (char __iomem *)(®->msgcode_rwbuffer_reg[21]); | ||
416 | int target, lun; | ||
417 | struct scsi_device *psdev; | ||
418 | char diff; | ||
419 | |||
420 | atomic_inc(&acb->rq_map_token); | ||
421 | if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { | ||
422 | for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { | ||
423 | diff = (*acb_dev_map)^readb(devicemap); | ||
424 | if (diff != 0) { | ||
425 | char temp; | ||
426 | *acb_dev_map = readb(devicemap); | ||
427 | temp = *acb_dev_map; | ||
428 | for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { | ||
429 | if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { | ||
430 | scsi_add_device(acb->host, 0, target, lun); | ||
431 | } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { | ||
432 | psdev = scsi_device_lookup(acb->host, 0, target, lun); | ||
433 | if (psdev != NULL) { | ||
434 | scsi_remove_device(psdev); | ||
435 | scsi_device_put(psdev); | ||
436 | } | ||
437 | } | ||
438 | temp >>= 1; | ||
439 | diff >>= 1; | ||
440 | } | ||
441 | } | ||
442 | devicemap++; | ||
443 | acb_dev_map++; | ||
444 | } | ||
445 | } | ||
446 | } | ||
447 | } | ||
448 | } | ||
377 | 449 | ||
378 | static int arcmsr_probe(struct pci_dev *pdev, | 450 | static int arcmsr_probe(struct pci_dev *pdev, |
379 | const struct pci_device_id *id) | 451 | const struct pci_device_id *id) |
@@ -432,17 +504,17 @@ static int arcmsr_probe(struct pci_dev *pdev, | |||
432 | ACB_F_MESSAGE_WQBUFFER_READED); | 504 | ACB_F_MESSAGE_WQBUFFER_READED); |
433 | acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; | 505 | acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; |
434 | INIT_LIST_HEAD(&acb->ccb_free_list); | 506 | INIT_LIST_HEAD(&acb->ccb_free_list); |
435 | 507 | INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); | |
436 | error = arcmsr_alloc_ccb_pool(acb); | 508 | error = arcmsr_alloc_ccb_pool(acb); |
437 | if (error) | 509 | if (error) |
438 | goto out_release_regions; | 510 | goto out_release_regions; |
439 | 511 | ||
512 | arcmsr_iop_init(acb); | ||
440 | error = request_irq(pdev->irq, arcmsr_do_interrupt, | 513 | error = request_irq(pdev->irq, arcmsr_do_interrupt, |
441 | IRQF_SHARED, "arcmsr", acb); | 514 | IRQF_SHARED, "arcmsr", acb); |
442 | if (error) | 515 | if (error) |
443 | goto out_free_ccb_pool; | 516 | goto out_free_ccb_pool; |
444 | 517 | ||
445 | arcmsr_iop_init(acb); | ||
446 | pci_set_drvdata(pdev, host); | 518 | pci_set_drvdata(pdev, host); |
447 | if (strncmp(acb->firm_version, "V1.42", 5) >= 0) | 519 | if (strncmp(acb->firm_version, "V1.42", 5) >= 0) |
448 | host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; | 520 | host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; |
@@ -459,6 +531,14 @@ static int arcmsr_probe(struct pci_dev *pdev, | |||
459 | #ifdef CONFIG_SCSI_ARCMSR_AER | 531 | #ifdef CONFIG_SCSI_ARCMSR_AER |
460 | pci_enable_pcie_error_reporting(pdev); | 532 | pci_enable_pcie_error_reporting(pdev); |
461 | #endif | 533 | #endif |
534 | atomic_set(&acb->rq_map_token, 16); | ||
535 | acb->fw_state = true; | ||
536 | init_timer(&acb->eternal_timer); | ||
537 | acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ); | ||
538 | acb->eternal_timer.data = (unsigned long) acb; | ||
539 | acb->eternal_timer.function = &arcmsr_request_device_map; | ||
540 | add_timer(&acb->eternal_timer); | ||
541 | |||
462 | return 0; | 542 | return 0; |
463 | out_free_sysfs: | 543 | out_free_sysfs: |
464 | out_free_irq: | 544 | out_free_irq: |
@@ -518,40 +598,48 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) | |||
518 | return 0xff; | 598 | return 0xff; |
519 | } | 599 | } |
520 | 600 | ||
521 | static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) | 601 | static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) |
522 | { | 602 | { |
523 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 603 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
524 | 604 | ||
525 | writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); | 605 | writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); |
526 | if (arcmsr_hba_wait_msgint_ready(acb)) | 606 | if (arcmsr_hba_wait_msgint_ready(acb)) { |
527 | printk(KERN_NOTICE | 607 | printk(KERN_NOTICE |
528 | "arcmsr%d: wait 'abort all outstanding command' timeout \n" | 608 | "arcmsr%d: wait 'abort all outstanding command' timeout \n" |
529 | , acb->host->host_no); | 609 | , acb->host->host_no); |
610 | return 0xff; | ||
611 | } | ||
612 | return 0x00; | ||
530 | } | 613 | } |
531 | 614 | ||
532 | static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) | 615 | static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) |
533 | { | 616 | { |
534 | struct MessageUnit_B *reg = acb->pmuB; | 617 | struct MessageUnit_B *reg = acb->pmuB; |
535 | 618 | ||
536 | writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); | 619 | writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); |
537 | if (arcmsr_hbb_wait_msgint_ready(acb)) | 620 | if (arcmsr_hbb_wait_msgint_ready(acb)) { |
538 | printk(KERN_NOTICE | 621 | printk(KERN_NOTICE |
539 | "arcmsr%d: wait 'abort all outstanding command' timeout \n" | 622 | "arcmsr%d: wait 'abort all outstanding command' timeout \n" |
540 | , acb->host->host_no); | 623 | , acb->host->host_no); |
624 | return 0xff; | ||
625 | } | ||
626 | return 0x00; | ||
541 | } | 627 | } |
542 | 628 | ||
543 | static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) | 629 | static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) |
544 | { | 630 | { |
631 | uint8_t rtnval = 0; | ||
545 | switch (acb->adapter_type) { | 632 | switch (acb->adapter_type) { |
546 | case ACB_ADAPTER_TYPE_A: { | 633 | case ACB_ADAPTER_TYPE_A: { |
547 | arcmsr_abort_hba_allcmd(acb); | 634 | rtnval = arcmsr_abort_hba_allcmd(acb); |
548 | } | 635 | } |
549 | break; | 636 | break; |
550 | 637 | ||
551 | case ACB_ADAPTER_TYPE_B: { | 638 | case ACB_ADAPTER_TYPE_B: { |
552 | arcmsr_abort_hbb_allcmd(acb); | 639 | rtnval = arcmsr_abort_hbb_allcmd(acb); |
553 | } | 640 | } |
554 | } | 641 | } |
642 | return rtnval; | ||
555 | } | 643 | } |
556 | 644 | ||
557 | static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) | 645 | static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) |
@@ -649,8 +737,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) | |||
649 | 737 | ||
650 | case ACB_ADAPTER_TYPE_A : { | 738 | case ACB_ADAPTER_TYPE_A : { |
651 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 739 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
652 | orig_mask = readl(®->outbound_intmask)|\ | 740 | orig_mask = readl(®->outbound_intmask); |
653 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; | ||
654 | writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ | 741 | writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ |
655 | ®->outbound_intmask); | 742 | ®->outbound_intmask); |
656 | } | 743 | } |
@@ -658,8 +745,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) | |||
658 | 745 | ||
659 | case ACB_ADAPTER_TYPE_B : { | 746 | case ACB_ADAPTER_TYPE_B : { |
660 | struct MessageUnit_B *reg = acb->pmuB; | 747 | struct MessageUnit_B *reg = acb->pmuB; |
661 | orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ | 748 | orig_mask = readl(reg->iop2drv_doorbell_mask_reg); |
662 | (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); | ||
663 | writel(0, reg->iop2drv_doorbell_mask_reg); | 749 | writel(0, reg->iop2drv_doorbell_mask_reg); |
664 | } | 750 | } |
665 | break; | 751 | break; |
@@ -795,12 +881,13 @@ static void arcmsr_remove(struct pci_dev *pdev) | |||
795 | struct AdapterControlBlock *acb = | 881 | struct AdapterControlBlock *acb = |
796 | (struct AdapterControlBlock *) host->hostdata; | 882 | (struct AdapterControlBlock *) host->hostdata; |
797 | int poll_count = 0; | 883 | int poll_count = 0; |
798 | |||
799 | arcmsr_free_sysfs_attr(acb); | 884 | arcmsr_free_sysfs_attr(acb); |
800 | scsi_remove_host(host); | 885 | scsi_remove_host(host); |
886 | flush_scheduled_work(); | ||
887 | del_timer_sync(&acb->eternal_timer); | ||
888 | arcmsr_disable_outbound_ints(acb); | ||
801 | arcmsr_stop_adapter_bgrb(acb); | 889 | arcmsr_stop_adapter_bgrb(acb); |
802 | arcmsr_flush_adapter_cache(acb); | 890 | arcmsr_flush_adapter_cache(acb); |
803 | arcmsr_disable_outbound_ints(acb); | ||
804 | acb->acb_flags |= ACB_F_SCSISTOPADAPTER; | 891 | acb->acb_flags |= ACB_F_SCSISTOPADAPTER; |
805 | acb->acb_flags &= ~ACB_F_IOP_INITED; | 892 | acb->acb_flags &= ~ACB_F_IOP_INITED; |
806 | 893 | ||
@@ -841,7 +928,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev) | |||
841 | struct Scsi_Host *host = pci_get_drvdata(pdev); | 928 | struct Scsi_Host *host = pci_get_drvdata(pdev); |
842 | struct AdapterControlBlock *acb = | 929 | struct AdapterControlBlock *acb = |
843 | (struct AdapterControlBlock *)host->hostdata; | 930 | (struct AdapterControlBlock *)host->hostdata; |
844 | 931 | del_timer_sync(&acb->eternal_timer); | |
932 | arcmsr_disable_outbound_ints(acb); | ||
933 | flush_scheduled_work(); | ||
845 | arcmsr_stop_adapter_bgrb(acb); | 934 | arcmsr_stop_adapter_bgrb(acb); |
846 | arcmsr_flush_adapter_cache(acb); | 935 | arcmsr_flush_adapter_cache(acb); |
847 | } | 936 | } |
@@ -861,7 +950,7 @@ static void arcmsr_module_exit(void) | |||
861 | module_init(arcmsr_module_init); | 950 | module_init(arcmsr_module_init); |
862 | module_exit(arcmsr_module_exit); | 951 | module_exit(arcmsr_module_exit); |
863 | 952 | ||
864 | static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | 953 | static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, |
865 | u32 intmask_org) | 954 | u32 intmask_org) |
866 | { | 955 | { |
867 | u32 mask; | 956 | u32 mask; |
@@ -871,7 +960,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | |||
871 | case ACB_ADAPTER_TYPE_A : { | 960 | case ACB_ADAPTER_TYPE_A : { |
872 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 961 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
873 | mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | | 962 | mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | |
874 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); | 963 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| |
964 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); | ||
875 | writel(mask, ®->outbound_intmask); | 965 | writel(mask, ®->outbound_intmask); |
876 | acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; | 966 | acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; |
877 | } | 967 | } |
@@ -879,8 +969,10 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | |||
879 | 969 | ||
880 | case ACB_ADAPTER_TYPE_B : { | 970 | case ACB_ADAPTER_TYPE_B : { |
881 | struct MessageUnit_B *reg = acb->pmuB; | 971 | struct MessageUnit_B *reg = acb->pmuB; |
882 | mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ | 972 | mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | |
883 | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); | 973 | ARCMSR_IOP2DRV_DATA_READ_OK | |
974 | ARCMSR_IOP2DRV_CDB_DONE | | ||
975 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); | ||
884 | writel(mask, reg->iop2drv_doorbell_mask_reg); | 976 | writel(mask, reg->iop2drv_doorbell_mask_reg); |
885 | acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; | 977 | acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; |
886 | } | 978 | } |
@@ -1048,8 +1140,8 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) | |||
1048 | } | 1140 | } |
1049 | case ACB_ADAPTER_TYPE_B: { | 1141 | case ACB_ADAPTER_TYPE_B: { |
1050 | struct MessageUnit_B *reg = acb->pmuB; | 1142 | struct MessageUnit_B *reg = acb->pmuB; |
1051 | iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); | 1143 | iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); |
1052 | iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); | 1144 | iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); |
1053 | dma_free_coherent(&acb->pdev->dev, | 1145 | dma_free_coherent(&acb->pdev->dev, |
1054 | (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + | 1146 | (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + |
1055 | sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); | 1147 | sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); |
@@ -1249,13 +1341,36 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) | |||
1249 | reg->doneq_index = index; | 1341 | reg->doneq_index = index; |
1250 | } | 1342 | } |
1251 | } | 1343 | } |
1344 | /* | ||
1345 | ********************************************************************************** | ||
1346 | ** Handle a message interrupt | ||
1347 | ** | ||
1348 | ** The only message interrupt we expect is in response to a query for the current adapter config. | ||
1349 | ** We want this in order to compare the drivemap so that we can detect newly-attached drives. | ||
1350 | ********************************************************************************** | ||
1351 | */ | ||
1352 | static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) | ||
1353 | { | ||
1354 | struct MessageUnit_A *reg = acb->pmuA; | ||
1355 | |||
1356 | /*clear interrupt and message state*/ | ||
1357 | writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); | ||
1358 | schedule_work(&acb->arcmsr_do_message_isr_bh); | ||
1359 | } | ||
1360 | static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) | ||
1361 | { | ||
1362 | struct MessageUnit_B *reg = acb->pmuB; | ||
1252 | 1363 | ||
1364 | /*clear interrupt and message state*/ | ||
1365 | writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); | ||
1366 | schedule_work(&acb->arcmsr_do_message_isr_bh); | ||
1367 | } | ||
1253 | static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) | 1368 | static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) |
1254 | { | 1369 | { |
1255 | uint32_t outbound_intstatus; | 1370 | uint32_t outbound_intstatus; |
1256 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 1371 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1257 | 1372 | ||
1258 | outbound_intstatus = readl(®->outbound_intstatus) & \ | 1373 | outbound_intstatus = readl(®->outbound_intstatus) & |
1259 | acb->outbound_int_enable; | 1374 | acb->outbound_int_enable; |
1260 | if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { | 1375 | if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { |
1261 | return 1; | 1376 | return 1; |
@@ -1267,6 +1382,10 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) | |||
1267 | if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { | 1382 | if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { |
1268 | arcmsr_hba_postqueue_isr(acb); | 1383 | arcmsr_hba_postqueue_isr(acb); |
1269 | } | 1384 | } |
1385 | if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { | ||
1386 | /* messenger of "driver to iop commands" */ | ||
1387 | arcmsr_hba_message_isr(acb); | ||
1388 | } | ||
1270 | return 0; | 1389 | return 0; |
1271 | } | 1390 | } |
1272 | 1391 | ||
@@ -1275,13 +1394,14 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) | |||
1275 | uint32_t outbound_doorbell; | 1394 | uint32_t outbound_doorbell; |
1276 | struct MessageUnit_B *reg = acb->pmuB; | 1395 | struct MessageUnit_B *reg = acb->pmuB; |
1277 | 1396 | ||
1278 | outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ | 1397 | outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & |
1279 | acb->outbound_int_enable; | 1398 | acb->outbound_int_enable; |
1280 | if (!outbound_doorbell) | 1399 | if (!outbound_doorbell) |
1281 | return 1; | 1400 | return 1; |
1282 | 1401 | ||
1283 | writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); | 1402 | writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); |
1284 | /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/ | 1403 | /*in case the last action of doorbell interrupt clearance is cached, |
1404 | this action can push HW to write down the clear bit*/ | ||
1285 | readl(reg->iop2drv_doorbell_reg); | 1405 | readl(reg->iop2drv_doorbell_reg); |
1286 | writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); | 1406 | writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); |
1287 | if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { | 1407 | if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { |
@@ -1293,6 +1413,10 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) | |||
1293 | if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { | 1413 | if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { |
1294 | arcmsr_hbb_postqueue_isr(acb); | 1414 | arcmsr_hbb_postqueue_isr(acb); |
1295 | } | 1415 | } |
1416 | if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { | ||
1417 | /* messenger of "driver to iop commands" */ | ||
1418 | arcmsr_hbb_message_isr(acb); | ||
1419 | } | ||
1296 | 1420 | ||
1297 | return 0; | 1421 | return 0; |
1298 | } | 1422 | } |
@@ -1360,7 +1484,7 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) | |||
1360 | } | 1484 | } |
1361 | } | 1485 | } |
1362 | 1486 | ||
1363 | static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | 1487 | static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, |
1364 | struct scsi_cmnd *cmd) | 1488 | struct scsi_cmnd *cmd) |
1365 | { | 1489 | { |
1366 | struct CMD_MESSAGE_FIELD *pcmdmessagefld; | 1490 | struct CMD_MESSAGE_FIELD *pcmdmessagefld; |
@@ -1398,6 +1522,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1398 | retvalue = ARCMSR_MESSAGE_FAIL; | 1522 | retvalue = ARCMSR_MESSAGE_FAIL; |
1399 | goto message_out; | 1523 | goto message_out; |
1400 | } | 1524 | } |
1525 | |||
1526 | if (!acb->fw_state) { | ||
1527 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1528 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1529 | goto message_out; | ||
1530 | } | ||
1531 | |||
1401 | ptmpQbuffer = ver_addr; | 1532 | ptmpQbuffer = ver_addr; |
1402 | while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) | 1533 | while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) |
1403 | && (allxfer_len < 1031)) { | 1534 | && (allxfer_len < 1031)) { |
@@ -1444,6 +1575,12 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1444 | retvalue = ARCMSR_MESSAGE_FAIL; | 1575 | retvalue = ARCMSR_MESSAGE_FAIL; |
1445 | goto message_out; | 1576 | goto message_out; |
1446 | } | 1577 | } |
1578 | if (!acb->fw_state) { | ||
1579 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1580 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1581 | goto message_out; | ||
1582 | } | ||
1583 | |||
1447 | ptmpuserbuffer = ver_addr; | 1584 | ptmpuserbuffer = ver_addr; |
1448 | user_len = pcmdmessagefld->cmdmessage.Length; | 1585 | user_len = pcmdmessagefld->cmdmessage.Length; |
1449 | memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); | 1586 | memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); |
@@ -1496,6 +1633,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1496 | 1633 | ||
1497 | case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { | 1634 | case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { |
1498 | uint8_t *pQbuffer = acb->rqbuffer; | 1635 | uint8_t *pQbuffer = acb->rqbuffer; |
1636 | if (!acb->fw_state) { | ||
1637 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1638 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1639 | goto message_out; | ||
1640 | } | ||
1499 | 1641 | ||
1500 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 1642 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
1501 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 1643 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
@@ -1511,6 +1653,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1511 | 1653 | ||
1512 | case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { | 1654 | case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { |
1513 | uint8_t *pQbuffer = acb->wqbuffer; | 1655 | uint8_t *pQbuffer = acb->wqbuffer; |
1656 | if (!acb->fw_state) { | ||
1657 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1658 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1659 | goto message_out; | ||
1660 | } | ||
1514 | 1661 | ||
1515 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 1662 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
1516 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 1663 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
@@ -1529,6 +1676,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1529 | 1676 | ||
1530 | case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { | 1677 | case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { |
1531 | uint8_t *pQbuffer; | 1678 | uint8_t *pQbuffer; |
1679 | if (!acb->fw_state) { | ||
1680 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1681 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1682 | goto message_out; | ||
1683 | } | ||
1532 | 1684 | ||
1533 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 1685 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
1534 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 1686 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
@@ -1551,13 +1703,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1551 | break; | 1703 | break; |
1552 | 1704 | ||
1553 | case ARCMSR_MESSAGE_RETURN_CODE_3F: { | 1705 | case ARCMSR_MESSAGE_RETURN_CODE_3F: { |
1706 | if (!acb->fw_state) { | ||
1707 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1708 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1709 | goto message_out; | ||
1710 | } | ||
1554 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; | 1711 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; |
1555 | } | 1712 | } |
1556 | break; | 1713 | break; |
1557 | 1714 | ||
1558 | case ARCMSR_MESSAGE_SAY_HELLO: { | 1715 | case ARCMSR_MESSAGE_SAY_HELLO: { |
1559 | int8_t *hello_string = "Hello! I am ARCMSR"; | 1716 | int8_t *hello_string = "Hello! I am ARCMSR"; |
1560 | 1717 | if (!acb->fw_state) { | |
1718 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1719 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1720 | goto message_out; | ||
1721 | } | ||
1561 | memcpy(pcmdmessagefld->messagedatabuffer, hello_string | 1722 | memcpy(pcmdmessagefld->messagedatabuffer, hello_string |
1562 | , (int16_t)strlen(hello_string)); | 1723 | , (int16_t)strlen(hello_string)); |
1563 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; | 1724 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; |
@@ -1565,10 +1726,20 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1565 | break; | 1726 | break; |
1566 | 1727 | ||
1567 | case ARCMSR_MESSAGE_SAY_GOODBYE: | 1728 | case ARCMSR_MESSAGE_SAY_GOODBYE: |
1729 | if (!acb->fw_state) { | ||
1730 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1731 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1732 | goto message_out; | ||
1733 | } | ||
1568 | arcmsr_iop_parking(acb); | 1734 | arcmsr_iop_parking(acb); |
1569 | break; | 1735 | break; |
1570 | 1736 | ||
1571 | case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: | 1737 | case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: |
1738 | if (!acb->fw_state) { | ||
1739 | pcmdmessagefld->cmdmessage.ReturnCode = | ||
1740 | ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; | ||
1741 | goto message_out; | ||
1742 | } | ||
1572 | arcmsr_flush_adapter_cache(acb); | 1743 | arcmsr_flush_adapter_cache(acb); |
1573 | break; | 1744 | break; |
1574 | 1745 | ||
@@ -1651,16 +1822,57 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, | |||
1651 | struct CommandControlBlock *ccb; | 1822 | struct CommandControlBlock *ccb; |
1652 | int target = cmd->device->id; | 1823 | int target = cmd->device->id; |
1653 | int lun = cmd->device->lun; | 1824 | int lun = cmd->device->lun; |
1654 | 1825 | uint8_t scsicmd = cmd->cmnd[0]; | |
1655 | cmd->scsi_done = done; | 1826 | cmd->scsi_done = done; |
1656 | cmd->host_scribble = NULL; | 1827 | cmd->host_scribble = NULL; |
1657 | cmd->result = 0; | 1828 | cmd->result = 0; |
1829 | |||
1830 | if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) { | ||
1831 | if (acb->devstate[target][lun] == ARECA_RAID_GONE) { | ||
1832 | cmd->result = (DID_NO_CONNECT << 16); | ||
1833 | } | ||
1834 | cmd->scsi_done(cmd); | ||
1835 | return 0; | ||
1836 | } | ||
1837 | |||
1658 | if (acb->acb_flags & ACB_F_BUS_RESET) { | 1838 | if (acb->acb_flags & ACB_F_BUS_RESET) { |
1659 | printk(KERN_NOTICE "arcmsr%d: bus reset" | 1839 | switch (acb->adapter_type) { |
1660 | " and return busy \n" | 1840 | case ACB_ADAPTER_TYPE_A: { |
1661 | , acb->host->host_no); | 1841 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1842 | uint32_t intmask_org, outbound_doorbell; | ||
1843 | |||
1844 | if ((readl(®->outbound_msgaddr1) & | ||
1845 | ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { | ||
1846 | printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n", | ||
1847 | acb->host->host_no); | ||
1662 | return SCSI_MLQUEUE_HOST_BUSY; | 1848 | return SCSI_MLQUEUE_HOST_BUSY; |
1663 | } | 1849 | } |
1850 | |||
1851 | acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; | ||
1852 | printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n", | ||
1853 | acb->host->host_no); | ||
1854 | /* disable all outbound interrupt */ | ||
1855 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
1856 | arcmsr_get_firmware_spec(acb, 1); | ||
1857 | /*start background rebuild*/ | ||
1858 | arcmsr_start_adapter_bgrb(acb); | ||
1859 | /* clear Qbuffer if door bell ringed */ | ||
1860 | outbound_doorbell = readl(®->outbound_doorbell); | ||
1861 | /*clear interrupt */ | ||
1862 | writel(outbound_doorbell, ®->outbound_doorbell); | ||
1863 | writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, | ||
1864 | ®->inbound_doorbell); | ||
1865 | /* enable outbound Post Queue,outbound doorbell Interrupt */ | ||
1866 | arcmsr_enable_outbound_ints(acb, intmask_org); | ||
1867 | acb->acb_flags |= ACB_F_IOP_INITED; | ||
1868 | acb->acb_flags &= ~ACB_F_BUS_RESET; | ||
1869 | } | ||
1870 | break; | ||
1871 | case ACB_ADAPTER_TYPE_B: { | ||
1872 | } | ||
1873 | } | ||
1874 | } | ||
1875 | |||
1664 | if (target == 16) { | 1876 | if (target == 16) { |
1665 | /* virtual device for iop message transfer */ | 1877 | /* virtual device for iop message transfer */ |
1666 | arcmsr_handle_virtual_command(acb, cmd); | 1878 | arcmsr_handle_virtual_command(acb, cmd); |
@@ -1699,21 +1911,25 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, | |||
1699 | return 0; | 1911 | return 0; |
1700 | } | 1912 | } |
1701 | 1913 | ||
1702 | static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) | 1914 | static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode) |
1703 | { | 1915 | { |
1704 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 1916 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1705 | char *acb_firm_model = acb->firm_model; | 1917 | char *acb_firm_model = acb->firm_model; |
1706 | char *acb_firm_version = acb->firm_version; | 1918 | char *acb_firm_version = acb->firm_version; |
1919 | char *acb_device_map = acb->device_map; | ||
1707 | char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); | 1920 | char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); |
1708 | char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); | 1921 | char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); |
1922 | char __iomem *iop_device_map = (char __iomem *) (®->message_rwbuffer[21]); | ||
1709 | int count; | 1923 | int count; |
1710 | 1924 | ||
1711 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); | 1925 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); |
1712 | if (arcmsr_hba_wait_msgint_ready(acb)) { | 1926 | if (arcmsr_hba_wait_msgint_ready(acb)) { |
1713 | printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ | 1927 | printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ |
1714 | miscellaneous data' timeout \n", acb->host->host_no); | 1928 | miscellaneous data' timeout \n", acb->host->host_no); |
1929 | return NULL; | ||
1715 | } | 1930 | } |
1716 | 1931 | ||
1932 | if (mode == 1) { | ||
1717 | count = 8; | 1933 | count = 8; |
1718 | while (count) { | 1934 | while (count) { |
1719 | *acb_firm_model = readb(iop_firm_model); | 1935 | *acb_firm_model = readb(iop_firm_model); |
@@ -1730,34 +1946,48 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) | |||
1730 | count--; | 1946 | count--; |
1731 | } | 1947 | } |
1732 | 1948 | ||
1949 | count = 16; | ||
1950 | while (count) { | ||
1951 | *acb_device_map = readb(iop_device_map); | ||
1952 | acb_device_map++; | ||
1953 | iop_device_map++; | ||
1954 | count--; | ||
1955 | } | ||
1956 | |||
1733 | printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" | 1957 | printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" |
1734 | , acb->host->host_no | 1958 | , acb->host->host_no |
1735 | , acb->firm_version); | 1959 | , acb->firm_version); |
1736 | 1960 | acb->signature = readl(®->message_rwbuffer[0]); | |
1737 | acb->firm_request_len = readl(®->message_rwbuffer[1]); | 1961 | acb->firm_request_len = readl(®->message_rwbuffer[1]); |
1738 | acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); | 1962 | acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); |
1739 | acb->firm_sdram_size = readl(®->message_rwbuffer[3]); | 1963 | acb->firm_sdram_size = readl(®->message_rwbuffer[3]); |
1740 | acb->firm_hd_channels = readl(®->message_rwbuffer[4]); | 1964 | acb->firm_hd_channels = readl(®->message_rwbuffer[4]); |
1741 | } | 1965 | } |
1742 | 1966 | return reg->message_rwbuffer; | |
1743 | static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) | 1967 | } |
1968 | static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode) | ||
1744 | { | 1969 | { |
1745 | struct MessageUnit_B *reg = acb->pmuB; | 1970 | struct MessageUnit_B *reg = acb->pmuB; |
1746 | uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; | 1971 | uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; |
1747 | char *acb_firm_model = acb->firm_model; | 1972 | char *acb_firm_model = acb->firm_model; |
1748 | char *acb_firm_version = acb->firm_version; | 1973 | char *acb_firm_version = acb->firm_version; |
1974 | char *acb_device_map = acb->device_map; | ||
1749 | char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); | 1975 | char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); |
1750 | /*firm_model,15,60-67*/ | 1976 | /*firm_model,15,60-67*/ |
1751 | char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); | 1977 | char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); |
1752 | /*firm_version,17,68-83*/ | 1978 | /*firm_version,17,68-83*/ |
1979 | char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]); | ||
1980 | /*firm_version,21,84-99*/ | ||
1753 | int count; | 1981 | int count; |
1754 | 1982 | ||
1755 | writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); | 1983 | writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); |
1756 | if (arcmsr_hbb_wait_msgint_ready(acb)) { | 1984 | if (arcmsr_hbb_wait_msgint_ready(acb)) { |
1757 | printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ | 1985 | printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ |
1758 | miscellaneous data' timeout \n", acb->host->host_no); | 1986 | miscellaneous data' timeout \n", acb->host->host_no); |
1987 | return NULL; | ||
1759 | } | 1988 | } |
1760 | 1989 | ||
1990 | if (mode == 1) { | ||
1761 | count = 8; | 1991 | count = 8; |
1762 | while (count) | 1992 | while (count) |
1763 | { | 1993 | { |
@@ -1776,11 +2006,20 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) | |||
1776 | count--; | 2006 | count--; |
1777 | } | 2007 | } |
1778 | 2008 | ||
2009 | count = 16; | ||
2010 | while (count) { | ||
2011 | *acb_device_map = readb(iop_device_map); | ||
2012 | acb_device_map++; | ||
2013 | iop_device_map++; | ||
2014 | count--; | ||
2015 | } | ||
2016 | |||
1779 | printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", | 2017 | printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", |
1780 | acb->host->host_no, | 2018 | acb->host->host_no, |
1781 | acb->firm_version); | 2019 | acb->firm_version); |
1782 | 2020 | ||
1783 | lrwbuffer++; | 2021 | acb->signature = readl(lrwbuffer++); |
2022 | /*firm_signature,1,00-03*/ | ||
1784 | acb->firm_request_len = readl(lrwbuffer++); | 2023 | acb->firm_request_len = readl(lrwbuffer++); |
1785 | /*firm_request_len,1,04-07*/ | 2024 | /*firm_request_len,1,04-07*/ |
1786 | acb->firm_numbers_queue = readl(lrwbuffer++); | 2025 | acb->firm_numbers_queue = readl(lrwbuffer++); |
@@ -1790,20 +2029,23 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) | |||
1790 | acb->firm_hd_channels = readl(lrwbuffer); | 2029 | acb->firm_hd_channels = readl(lrwbuffer); |
1791 | /*firm_ide_channels,4,16-19*/ | 2030 | /*firm_ide_channels,4,16-19*/ |
1792 | } | 2031 | } |
1793 | 2032 | return reg->msgcode_rwbuffer_reg; | |
1794 | static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) | 2033 | } |
2034 | static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode) | ||
1795 | { | 2035 | { |
2036 | void *rtnval = 0; | ||
1796 | switch (acb->adapter_type) { | 2037 | switch (acb->adapter_type) { |
1797 | case ACB_ADAPTER_TYPE_A: { | 2038 | case ACB_ADAPTER_TYPE_A: { |
1798 | arcmsr_get_hba_config(acb); | 2039 | rtnval = arcmsr_get_hba_config(acb, mode); |
1799 | } | 2040 | } |
1800 | break; | 2041 | break; |
1801 | 2042 | ||
1802 | case ACB_ADAPTER_TYPE_B: { | 2043 | case ACB_ADAPTER_TYPE_B: { |
1803 | arcmsr_get_hbb_config(acb); | 2044 | rtnval = arcmsr_get_hbb_config(acb, mode); |
1804 | } | 2045 | } |
1805 | break; | 2046 | break; |
1806 | } | 2047 | } |
2048 | return rtnval; | ||
1807 | } | 2049 | } |
1808 | 2050 | ||
1809 | static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, | 2051 | static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, |
@@ -2043,6 +2285,66 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) | |||
2043 | } | 2285 | } |
2044 | } | 2286 | } |
2045 | 2287 | ||
2288 | static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) | ||
2289 | { | ||
2290 | struct MessageUnit_A __iomem *reg = acb->pmuA; | ||
2291 | |||
2292 | if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { | ||
2293 | acb->fw_state = false; | ||
2294 | } else { | ||
2295 | /*to prevent rq_map_token from changing by other interrupt, then | ||
2296 | avoid the dead-lock*/ | ||
2297 | acb->fw_state = true; | ||
2298 | atomic_dec(&acb->rq_map_token); | ||
2299 | if (!(acb->fw_state) || | ||
2300 | (acb->ante_token_value == atomic_read(&acb->rq_map_token))) { | ||
2301 | atomic_set(&acb->rq_map_token, 16); | ||
2302 | } | ||
2303 | acb->ante_token_value = atomic_read(&acb->rq_map_token); | ||
2304 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); | ||
2305 | } | ||
2306 | mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000)); | ||
2307 | return; | ||
2308 | } | ||
2309 | |||
2310 | static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) | ||
2311 | { | ||
2312 | struct MessageUnit_B __iomem *reg = acb->pmuB; | ||
2313 | |||
2314 | if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { | ||
2315 | acb->fw_state = false; | ||
2316 | } else { | ||
2317 | /*to prevent rq_map_token from changing by other interrupt, then | ||
2318 | avoid the dead-lock*/ | ||
2319 | acb->fw_state = true; | ||
2320 | atomic_dec(&acb->rq_map_token); | ||
2321 | if (!(acb->fw_state) || | ||
2322 | (acb->ante_token_value == atomic_read(&acb->rq_map_token))) { | ||
2323 | atomic_set(&acb->rq_map_token, 16); | ||
2324 | } | ||
2325 | acb->ante_token_value = atomic_read(&acb->rq_map_token); | ||
2326 | writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); | ||
2327 | } | ||
2328 | mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000)); | ||
2329 | return; | ||
2330 | } | ||
2331 | |||
2332 | static void arcmsr_request_device_map(unsigned long pacb) | ||
2333 | { | ||
2334 | struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; | ||
2335 | |||
2336 | switch (acb->adapter_type) { | ||
2337 | case ACB_ADAPTER_TYPE_A: { | ||
2338 | arcmsr_request_hba_device_map(acb); | ||
2339 | } | ||
2340 | break; | ||
2341 | case ACB_ADAPTER_TYPE_B: { | ||
2342 | arcmsr_request_hbb_device_map(acb); | ||
2343 | } | ||
2344 | break; | ||
2345 | } | ||
2346 | } | ||
2347 | |||
2046 | static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) | 2348 | static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) |
2047 | { | 2349 | { |
2048 | struct MessageUnit_A __iomem *reg = acb->pmuA; | 2350 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
@@ -2121,6 +2423,60 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) | |||
2121 | return; | 2423 | return; |
2122 | } | 2424 | } |
2123 | 2425 | ||
2426 | static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) | ||
2427 | { | ||
2428 | uint8_t value[64]; | ||
2429 | int i; | ||
2430 | |||
2431 | /* backup pci config data */ | ||
2432 | for (i = 0; i < 64; i++) { | ||
2433 | pci_read_config_byte(acb->pdev, i, &value[i]); | ||
2434 | } | ||
2435 | /* hardware reset signal */ | ||
2436 | pci_write_config_byte(acb->pdev, 0x84, 0x20); | ||
2437 | msleep(1000); | ||
2438 | /* write back pci config data */ | ||
2439 | for (i = 0; i < 64; i++) { | ||
2440 | pci_write_config_byte(acb->pdev, i, value[i]); | ||
2441 | } | ||
2442 | msleep(1000); | ||
2443 | return; | ||
2444 | } | ||
2445 | /* | ||
2446 | **************************************************************************** | ||
2447 | **************************************************************************** | ||
2448 | */ | ||
2449 | #ifdef CONFIG_SCSI_ARCMSR_RESET | ||
2450 | int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd) | ||
2451 | { | ||
2452 | struct Scsi_Host *shost = NULL; | ||
2453 | spinlock_t *host_lock = NULL; | ||
2454 | int i, isleep; | ||
2455 | |||
2456 | shost = cmd->device->host; | ||
2457 | host_lock = shost->host_lock; | ||
2458 | |||
2459 | printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n", | ||
2460 | shost->host_no, sleeptime, shost->host_busy, shost->can_queue); | ||
2461 | isleep = sleeptime / 10; | ||
2462 | spin_unlock_irq(host_lock); | ||
2463 | if (isleep > 0) { | ||
2464 | for (i = 0; i < isleep; i++) { | ||
2465 | msleep(10000); | ||
2466 | printk(KERN_NOTICE "^%d^\n", i); | ||
2467 | } | ||
2468 | } | ||
2469 | |||
2470 | isleep = sleeptime % 10; | ||
2471 | if (isleep > 0) { | ||
2472 | msleep(isleep * 1000); | ||
2473 | printk(KERN_NOTICE "^v^\n"); | ||
2474 | } | ||
2475 | spin_lock_irq(host_lock); | ||
2476 | printk(KERN_NOTICE "***** wake up *****\n"); | ||
2477 | return 0; | ||
2478 | } | ||
2479 | #endif | ||
2124 | static void arcmsr_iop_init(struct AdapterControlBlock *acb) | 2480 | static void arcmsr_iop_init(struct AdapterControlBlock *acb) |
2125 | { | 2481 | { |
2126 | uint32_t intmask_org; | 2482 | uint32_t intmask_org; |
@@ -2129,7 +2485,7 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb) | |||
2129 | intmask_org = arcmsr_disable_outbound_ints(acb); | 2485 | intmask_org = arcmsr_disable_outbound_ints(acb); |
2130 | arcmsr_wait_firmware_ready(acb); | 2486 | arcmsr_wait_firmware_ready(acb); |
2131 | arcmsr_iop_confirm(acb); | 2487 | arcmsr_iop_confirm(acb); |
2132 | arcmsr_get_firmware_spec(acb); | 2488 | arcmsr_get_firmware_spec(acb, 1); |
2133 | /*start background rebuild*/ | 2489 | /*start background rebuild*/ |
2134 | arcmsr_start_adapter_bgrb(acb); | 2490 | arcmsr_start_adapter_bgrb(acb); |
2135 | /* empty doorbell Qbuffer if door bell ringed */ | 2491 | /* empty doorbell Qbuffer if door bell ringed */ |
@@ -2140,51 +2496,110 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb) | |||
2140 | acb->acb_flags |= ACB_F_IOP_INITED; | 2496 | acb->acb_flags |= ACB_F_IOP_INITED; |
2141 | } | 2497 | } |
2142 | 2498 | ||
2143 | static void arcmsr_iop_reset(struct AdapterControlBlock *acb) | 2499 | static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) |
2144 | { | 2500 | { |
2145 | struct CommandControlBlock *ccb; | 2501 | struct CommandControlBlock *ccb; |
2146 | uint32_t intmask_org; | 2502 | uint32_t intmask_org; |
2503 | uint8_t rtnval = 0x00; | ||
2147 | int i = 0; | 2504 | int i = 0; |
2148 | 2505 | ||
2149 | if (atomic_read(&acb->ccboutstandingcount) != 0) { | 2506 | if (atomic_read(&acb->ccboutstandingcount) != 0) { |
2507 | /* disable all outbound interrupt */ | ||
2508 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
2150 | /* talk to iop 331 outstanding command aborted */ | 2509 | /* talk to iop 331 outstanding command aborted */ |
2151 | arcmsr_abort_allcmd(acb); | 2510 | rtnval = arcmsr_abort_allcmd(acb); |
2152 | |||
2153 | /* wait for 3 sec for all command aborted*/ | 2511 | /* wait for 3 sec for all command aborted*/ |
2154 | ssleep(3); | 2512 | ssleep(3); |
2155 | |||
2156 | /* disable all outbound interrupt */ | ||
2157 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
2158 | /* clear all outbound posted Q */ | 2513 | /* clear all outbound posted Q */ |
2159 | arcmsr_done4abort_postqueue(acb); | 2514 | arcmsr_done4abort_postqueue(acb); |
2160 | for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { | 2515 | for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { |
2161 | ccb = acb->pccb_pool[i]; | 2516 | ccb = acb->pccb_pool[i]; |
2162 | if (ccb->startdone == ARCMSR_CCB_START) { | 2517 | if (ccb->startdone == ARCMSR_CCB_START) { |
2163 | ccb->startdone = ARCMSR_CCB_ABORTED; | ||
2164 | arcmsr_ccb_complete(ccb, 1); | 2518 | arcmsr_ccb_complete(ccb, 1); |
2165 | } | 2519 | } |
2166 | } | 2520 | } |
2521 | atomic_set(&acb->ccboutstandingcount, 0); | ||
2167 | /* enable all outbound interrupt */ | 2522 | /* enable all outbound interrupt */ |
2168 | arcmsr_enable_outbound_ints(acb, intmask_org); | 2523 | arcmsr_enable_outbound_ints(acb, intmask_org); |
2524 | return rtnval; | ||
2169 | } | 2525 | } |
2526 | return rtnval; | ||
2170 | } | 2527 | } |
2171 | 2528 | ||
2172 | static int arcmsr_bus_reset(struct scsi_cmnd *cmd) | 2529 | static int arcmsr_bus_reset(struct scsi_cmnd *cmd) |
2173 | { | 2530 | { |
2174 | struct AdapterControlBlock *acb = | 2531 | struct AdapterControlBlock *acb = |
2175 | (struct AdapterControlBlock *)cmd->device->host->hostdata; | 2532 | (struct AdapterControlBlock *)cmd->device->host->hostdata; |
2176 | int i; | 2533 | int retry = 0; |
2177 | 2534 | ||
2178 | acb->num_resets++; | 2535 | if (acb->acb_flags & ACB_F_BUS_RESET) |
2536 | return SUCCESS; | ||
2537 | |||
2538 | printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index); | ||
2179 | acb->acb_flags |= ACB_F_BUS_RESET; | 2539 | acb->acb_flags |= ACB_F_BUS_RESET; |
2180 | for (i = 0; i < 400; i++) { | 2540 | acb->num_resets++; |
2181 | if (!atomic_read(&acb->ccboutstandingcount)) | 2541 | while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) { |
2542 | arcmsr_interrupt(acb); | ||
2543 | retry++; | ||
2544 | } | ||
2545 | |||
2546 | if (arcmsr_iop_reset(acb)) { | ||
2547 | switch (acb->adapter_type) { | ||
2548 | case ACB_ADAPTER_TYPE_A: { | ||
2549 | printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n", | ||
2550 | acb->adapter_index, acb->num_resets, acb->num_aborts); | ||
2551 | arcmsr_hardware_reset(acb); | ||
2552 | acb->acb_flags |= ACB_F_FIRMWARE_TRAP; | ||
2553 | acb->acb_flags &= ~ACB_F_IOP_INITED; | ||
2554 | #ifdef CONFIG_SCSI_ARCMSR_RESET | ||
2555 | struct MessageUnit_A __iomem *reg = acb->pmuA; | ||
2556 | uint32_t intmask_org, outbound_doorbell; | ||
2557 | int retry_count = 0; | ||
2558 | sleep_again: | ||
2559 | arcmsr_sleep_for_bus_reset(cmd); | ||
2560 | if ((readl(®->outbound_msgaddr1) & | ||
2561 | ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { | ||
2562 | printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n", | ||
2563 | acb->host->host_no, retry_count); | ||
2564 | if (retry_count > retrycount) { | ||
2565 | printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n", | ||
2566 | acb->host->host_no); | ||
2567 | return SUCCESS; | ||
2568 | } | ||
2569 | retry_count++; | ||
2570 | goto sleep_again; | ||
2571 | } | ||
2572 | acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; | ||
2573 | acb->acb_flags |= ACB_F_IOP_INITED; | ||
2574 | acb->acb_flags &= ~ACB_F_BUS_RESET; | ||
2575 | printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n", | ||
2576 | acb->host->host_no); | ||
2577 | /* disable all outbound interrupt */ | ||
2578 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
2579 | arcmsr_get_firmware_spec(acb, 1); | ||
2580 | /*start background rebuild*/ | ||
2581 | arcmsr_start_adapter_bgrb(acb); | ||
2582 | /* clear Qbuffer if door bell ringed */ | ||
2583 | outbound_doorbell = readl(®->outbound_doorbell); | ||
2584 | writel(outbound_doorbell, ®->outbound_doorbell); /*clear interrupt */ | ||
2585 | writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); | ||
2586 | /* enable outbound Post Queue,outbound doorbell Interrupt */ | ||
2587 | arcmsr_enable_outbound_ints(acb, intmask_org); | ||
2588 | atomic_set(&acb->rq_map_token, 16); | ||
2589 | init_timer(&acb->eternal_timer); | ||
2590 | acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ); | ||
2591 | acb->eternal_timer.data = (unsigned long) acb; | ||
2592 | acb->eternal_timer.function = &arcmsr_request_device_map; | ||
2593 | add_timer(&acb->eternal_timer); | ||
2594 | #endif | ||
2595 | } | ||
2182 | break; | 2596 | break; |
2183 | arcmsr_interrupt(acb);/* FIXME: need spinlock */ | 2597 | case ACB_ADAPTER_TYPE_B: { |
2184 | msleep(25); | ||
2185 | } | 2598 | } |
2186 | arcmsr_iop_reset(acb); | 2599 | } |
2600 | } else { | ||
2187 | acb->acb_flags &= ~ACB_F_BUS_RESET; | 2601 | acb->acb_flags &= ~ACB_F_BUS_RESET; |
2602 | } | ||
2188 | return SUCCESS; | 2603 | return SUCCESS; |
2189 | } | 2604 | } |
2190 | 2605 | ||
@@ -2277,98 +2692,3 @@ static const char *arcmsr_info(struct Scsi_Host *host) | |||
2277 | ARCMSR_DRIVER_VERSION); | 2692 | ARCMSR_DRIVER_VERSION); |
2278 | return buf; | 2693 | return buf; |
2279 | } | 2694 | } |
2280 | #ifdef CONFIG_SCSI_ARCMSR_AER | ||
2281 | static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev) | ||
2282 | { | ||
2283 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
2284 | struct AdapterControlBlock *acb = | ||
2285 | (struct AdapterControlBlock *) host->hostdata; | ||
2286 | uint32_t intmask_org; | ||
2287 | int i, j; | ||
2288 | |||
2289 | if (pci_enable_device(pdev)) { | ||
2290 | return PCI_ERS_RESULT_DISCONNECT; | ||
2291 | } | ||
2292 | pci_set_master(pdev); | ||
2293 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
2294 | acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | | ||
2295 | ACB_F_MESSAGE_RQBUFFER_CLEARED | | ||
2296 | ACB_F_MESSAGE_WQBUFFER_READED); | ||
2297 | acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; | ||
2298 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | ||
2299 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) | ||
2300 | acb->devstate[i][j] = ARECA_RAID_GONE; | ||
2301 | |||
2302 | arcmsr_wait_firmware_ready(acb); | ||
2303 | arcmsr_iop_confirm(acb); | ||
2304 | /* disable all outbound interrupt */ | ||
2305 | arcmsr_get_firmware_spec(acb); | ||
2306 | /*start background rebuild*/ | ||
2307 | arcmsr_start_adapter_bgrb(acb); | ||
2308 | /* empty doorbell Qbuffer if door bell ringed */ | ||
2309 | arcmsr_clear_doorbell_queue_buffer(acb); | ||
2310 | arcmsr_enable_eoi_mode(acb); | ||
2311 | /* enable outbound Post Queue,outbound doorbell Interrupt */ | ||
2312 | arcmsr_enable_outbound_ints(acb, intmask_org); | ||
2313 | acb->acb_flags |= ACB_F_IOP_INITED; | ||
2314 | |||
2315 | pci_enable_pcie_error_reporting(pdev); | ||
2316 | return PCI_ERS_RESULT_RECOVERED; | ||
2317 | } | ||
2318 | |||
2319 | static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev) | ||
2320 | { | ||
2321 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
2322 | struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; | ||
2323 | struct CommandControlBlock *ccb; | ||
2324 | uint32_t intmask_org; | ||
2325 | int i = 0; | ||
2326 | |||
2327 | if (atomic_read(&acb->ccboutstandingcount) != 0) { | ||
2328 | /* talk to iop 331 outstanding command aborted */ | ||
2329 | arcmsr_abort_allcmd(acb); | ||
2330 | /* wait for 3 sec for all command aborted*/ | ||
2331 | ssleep(3); | ||
2332 | /* disable all outbound interrupt */ | ||
2333 | intmask_org = arcmsr_disable_outbound_ints(acb); | ||
2334 | /* clear all outbound posted Q */ | ||
2335 | arcmsr_done4abort_postqueue(acb); | ||
2336 | for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { | ||
2337 | ccb = acb->pccb_pool[i]; | ||
2338 | if (ccb->startdone == ARCMSR_CCB_START) { | ||
2339 | ccb->startdone = ARCMSR_CCB_ABORTED; | ||
2340 | arcmsr_ccb_complete(ccb, 1); | ||
2341 | } | ||
2342 | } | ||
2343 | /* enable all outbound interrupt */ | ||
2344 | arcmsr_enable_outbound_ints(acb, intmask_org); | ||
2345 | } | ||
2346 | pci_disable_device(pdev); | ||
2347 | } | ||
2348 | |||
2349 | static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev) | ||
2350 | { | ||
2351 | struct Scsi_Host *host = pci_get_drvdata(pdev); | ||
2352 | struct AdapterControlBlock *acb = \ | ||
2353 | (struct AdapterControlBlock *)host->hostdata; | ||
2354 | |||
2355 | arcmsr_stop_adapter_bgrb(acb); | ||
2356 | arcmsr_flush_adapter_cache(acb); | ||
2357 | } | ||
2358 | |||
2359 | static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, | ||
2360 | pci_channel_state_t state) | ||
2361 | { | ||
2362 | switch (state) { | ||
2363 | case pci_channel_io_frozen: | ||
2364 | arcmsr_pci_ers_need_reset_forepart(pdev); | ||
2365 | return PCI_ERS_RESULT_NEED_RESET; | ||
2366 | case pci_channel_io_perm_failure: | ||
2367 | arcmsr_pci_ers_disconnect_forepart(pdev); | ||
2368 | return PCI_ERS_RESULT_DISCONNECT; | ||
2369 | break; | ||
2370 | default: | ||
2371 | return PCI_ERS_RESULT_NEED_RESET; | ||
2372 | } | ||
2373 | } | ||
2374 | #endif | ||
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index e641922f20bc..350cbeaae160 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
@@ -167,10 +167,9 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
167 | &nonemb_cmd.dma); | 167 | &nonemb_cmd.dma); |
168 | if (nonemb_cmd.va == NULL) { | 168 | if (nonemb_cmd.va == NULL) { |
169 | SE_DEBUG(DBG_LVL_1, | 169 | SE_DEBUG(DBG_LVL_1, |
170 | "Failed to allocate memory for" | 170 | "Failed to allocate memory for mgmt_invalidate_icds\n"); |
171 | "mgmt_invalidate_icds \n"); | ||
172 | spin_unlock(&ctrl->mbox_lock); | 171 | spin_unlock(&ctrl->mbox_lock); |
173 | return -1; | 172 | return 0; |
174 | } | 173 | } |
175 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); | 174 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); |
176 | req = nonemb_cmd.va; | 175 | req = nonemb_cmd.va; |
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 0c08e185a766..3a7b3f88932f 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c | |||
@@ -84,11 +84,32 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) | |||
84 | for (i = 0; hal_mods[i]; i++) | 84 | for (i = 0; hal_mods[i]; i++) |
85 | hal_mods[i]->meminfo(cfg, &km_len, &dm_len); | 85 | hal_mods[i]->meminfo(cfg, &km_len, &dm_len); |
86 | 86 | ||
87 | dm_len += bfa_port_meminfo(); | ||
87 | 88 | ||
88 | meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; | 89 | meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; |
89 | meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; | 90 | meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; |
90 | } | 91 | } |
91 | 92 | ||
93 | static void | ||
94 | bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) | ||
95 | { | ||
96 | struct bfa_port_s *port = &bfa->modules.port; | ||
97 | uint32_t dm_len; | ||
98 | uint8_t *dm_kva; | ||
99 | uint64_t dm_pa; | ||
100 | |||
101 | dm_len = bfa_port_meminfo(); | ||
102 | dm_kva = bfa_meminfo_dma_virt(mi); | ||
103 | dm_pa = bfa_meminfo_dma_phys(mi); | ||
104 | |||
105 | memset(port, 0, sizeof(struct bfa_port_s)); | ||
106 | bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm); | ||
107 | bfa_port_mem_claim(port, dm_kva, dm_pa); | ||
108 | |||
109 | bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; | ||
110 | bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; | ||
111 | } | ||
112 | |||
92 | /** | 113 | /** |
93 | * Use this function to do attach the driver instance with the BFA | 114 | * Use this function to do attach the driver instance with the BFA |
94 | * library. This function will not trigger any HW initialization | 115 | * library. This function will not trigger any HW initialization |
@@ -140,6 +161,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
140 | for (i = 0; hal_mods[i]; i++) | 161 | for (i = 0; hal_mods[i]; i++) |
141 | hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); | 162 | hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); |
142 | 163 | ||
164 | bfa_com_port_attach(bfa, meminfo); | ||
143 | } | 165 | } |
144 | 166 | ||
145 | /** | 167 | /** |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 0435d044c9da..b0c576f84b28 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -114,12 +114,13 @@ static int hba_count = 0; | |||
114 | 114 | ||
115 | static struct class *adpt_sysfs_class; | 115 | static struct class *adpt_sysfs_class; |
116 | 116 | ||
117 | static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long); | ||
117 | #ifdef CONFIG_COMPAT | 118 | #ifdef CONFIG_COMPAT |
118 | static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long); | 119 | static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long); |
119 | #endif | 120 | #endif |
120 | 121 | ||
121 | static const struct file_operations adpt_fops = { | 122 | static const struct file_operations adpt_fops = { |
122 | .ioctl = adpt_ioctl, | 123 | .unlocked_ioctl = adpt_unlocked_ioctl, |
123 | .open = adpt_open, | 124 | .open = adpt_open, |
124 | .release = adpt_close, | 125 | .release = adpt_close, |
125 | #ifdef CONFIG_COMPAT | 126 | #ifdef CONFIG_COMPAT |
@@ -2069,8 +2070,7 @@ static int adpt_system_info(void __user *buffer) | |||
2069 | return 0; | 2070 | return 0; |
2070 | } | 2071 | } |
2071 | 2072 | ||
2072 | static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, | 2073 | static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) |
2073 | ulong arg) | ||
2074 | { | 2074 | { |
2075 | int minor; | 2075 | int minor; |
2076 | int error = 0; | 2076 | int error = 0; |
@@ -2153,6 +2153,20 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, | |||
2153 | return error; | 2153 | return error; |
2154 | } | 2154 | } |
2155 | 2155 | ||
2156 | static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg) | ||
2157 | { | ||
2158 | struct inode *inode; | ||
2159 | long ret; | ||
2160 | |||
2161 | inode = file->f_dentry->d_inode; | ||
2162 | |||
2163 | lock_kernel(); | ||
2164 | ret = adpt_ioctl(inode, file, cmd, arg); | ||
2165 | unlock_kernel(); | ||
2166 | |||
2167 | return ret; | ||
2168 | } | ||
2169 | |||
2156 | #ifdef CONFIG_COMPAT | 2170 | #ifdef CONFIG_COMPAT |
2157 | static long compat_adpt_ioctl(struct file *file, | 2171 | static long compat_adpt_ioctl(struct file *file, |
2158 | unsigned int cmd, unsigned long arg) | 2172 | unsigned int cmd, unsigned long arg) |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index bc39542481a4..fe568effe967 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -688,7 +688,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) | |||
688 | } | 688 | } |
689 | 689 | ||
690 | if (!lport->vport) | 690 | if (!lport->vport) |
691 | fc_host_max_npiv_vports(lport->host) = USHORT_MAX; | 691 | fc_host_max_npiv_vports(lport->host) = USHRT_MAX; |
692 | 692 | ||
693 | snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, | 693 | snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, |
694 | "%s v%s over %s", FCOE_NAME, FCOE_VERSION, | 694 | "%s v%s over %s", FCOE_NAME, FCOE_VERSION, |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index a765fe7a55c3..f672d6213eea 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -180,8 +180,8 @@ static const char *gdth_ctr_name(gdth_ha_str *ha); | |||
180 | 180 | ||
181 | static int gdth_open(struct inode *inode, struct file *filep); | 181 | static int gdth_open(struct inode *inode, struct file *filep); |
182 | static int gdth_close(struct inode *inode, struct file *filep); | 182 | static int gdth_close(struct inode *inode, struct file *filep); |
183 | static int gdth_ioctl(struct inode *inode, struct file *filep, | 183 | static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd, |
184 | unsigned int cmd, unsigned long arg); | 184 | unsigned long arg); |
185 | 185 | ||
186 | static void gdth_flush(gdth_ha_str *ha); | 186 | static void gdth_flush(gdth_ha_str *ha); |
187 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); | 187 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); |
@@ -369,7 +369,7 @@ MODULE_LICENSE("GPL"); | |||
369 | 369 | ||
370 | /* ioctl interface */ | 370 | /* ioctl interface */ |
371 | static const struct file_operations gdth_fops = { | 371 | static const struct file_operations gdth_fops = { |
372 | .ioctl = gdth_ioctl, | 372 | .unlocked_ioctl = gdth_unlocked_ioctl, |
373 | .open = gdth_open, | 373 | .open = gdth_open, |
374 | .release = gdth_close, | 374 | .release = gdth_close, |
375 | }; | 375 | }; |
@@ -4462,8 +4462,7 @@ free_fail: | |||
4462 | return rc; | 4462 | return rc; |
4463 | } | 4463 | } |
4464 | 4464 | ||
4465 | static int gdth_ioctl(struct inode *inode, struct file *filep, | 4465 | static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) |
4466 | unsigned int cmd, unsigned long arg) | ||
4467 | { | 4466 | { |
4468 | gdth_ha_str *ha; | 4467 | gdth_ha_str *ha; |
4469 | Scsi_Cmnd *scp; | 4468 | Scsi_Cmnd *scp; |
@@ -4611,6 +4610,17 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
4611 | return 0; | 4610 | return 0; |
4612 | } | 4611 | } |
4613 | 4612 | ||
4613 | static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd, | ||
4614 | unsigned long arg) | ||
4615 | { | ||
4616 | int ret; | ||
4617 | |||
4618 | lock_kernel(); | ||
4619 | ret = gdth_ioctl(file, cmd, arg); | ||
4620 | unlock_kernel(); | ||
4621 | |||
4622 | return ret; | ||
4623 | } | ||
4614 | 4624 | ||
4615 | /* flush routine */ | 4625 | /* flush routine */ |
4616 | static void gdth_flush(gdth_ha_str *ha) | 4626 | static void gdth_flush(gdth_ha_str *ha) |
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index 18b7102bb80e..2ce26eb7a1ec 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c | |||
@@ -1,36 +1,35 @@ | |||
1 | #include <linux/types.h> | 1 | #include <linux/types.h> |
2 | #include <linux/mm.h> | ||
3 | #include <linux/slab.h> | ||
4 | #include <linux/blkdev.h> | ||
5 | #include <linux/init.h> | 2 | #include <linux/init.h> |
6 | #include <linux/interrupt.h> | 3 | #include <linux/interrupt.h> |
4 | #include <linux/mm.h> | ||
5 | #include <linux/slab.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/zorro.h> | ||
7 | 8 | ||
8 | #include <asm/setup.h> | ||
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/amigaints.h> | 11 | #include <asm/amigaints.h> |
12 | #include <asm/amigahw.h> | 12 | #include <asm/amigahw.h> |
13 | #include <linux/zorro.h> | ||
14 | #include <asm/irq.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | 13 | ||
17 | #include "scsi.h" | 14 | #include "scsi.h" |
18 | #include <scsi/scsi_host.h> | ||
19 | #include "wd33c93.h" | 15 | #include "wd33c93.h" |
20 | #include "gvp11.h" | 16 | #include "gvp11.h" |
21 | 17 | ||
22 | #include <linux/stat.h> | ||
23 | 18 | ||
19 | #define CHECK_WD33C93 | ||
24 | 20 | ||
25 | #define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base)) | 21 | struct gvp11_hostdata { |
22 | struct WD33C93_hostdata wh; | ||
23 | struct gvp11_scsiregs *regs; | ||
24 | }; | ||
26 | 25 | ||
27 | static irqreturn_t gvp11_intr(int irq, void *_instance) | 26 | static irqreturn_t gvp11_intr(int irq, void *data) |
28 | { | 27 | { |
28 | struct Scsi_Host *instance = data; | ||
29 | struct gvp11_hostdata *hdata = shost_priv(instance); | ||
30 | unsigned int status = hdata->regs->CNTR; | ||
29 | unsigned long flags; | 31 | unsigned long flags; |
30 | unsigned int status; | ||
31 | struct Scsi_Host *instance = (struct Scsi_Host *)_instance; | ||
32 | 32 | ||
33 | status = DMA(instance)->CNTR; | ||
34 | if (!(status & GVP11_DMAC_INT_PENDING)) | 33 | if (!(status & GVP11_DMAC_INT_PENDING)) |
35 | return IRQ_NONE; | 34 | return IRQ_NONE; |
36 | 35 | ||
@@ -50,64 +49,66 @@ void gvp11_setup(char *str, int *ints) | |||
50 | static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | 49 | static int dma_setup(struct scsi_cmnd *cmd, int dir_in) |
51 | { | 50 | { |
52 | struct Scsi_Host *instance = cmd->device->host; | 51 | struct Scsi_Host *instance = cmd->device->host; |
53 | struct WD33C93_hostdata *hdata = shost_priv(instance); | 52 | struct gvp11_hostdata *hdata = shost_priv(instance); |
53 | struct WD33C93_hostdata *wh = &hdata->wh; | ||
54 | struct gvp11_scsiregs *regs = hdata->regs; | ||
54 | unsigned short cntr = GVP11_DMAC_INT_ENABLE; | 55 | unsigned short cntr = GVP11_DMAC_INT_ENABLE; |
55 | unsigned long addr = virt_to_bus(cmd->SCp.ptr); | 56 | unsigned long addr = virt_to_bus(cmd->SCp.ptr); |
56 | int bank_mask; | 57 | int bank_mask; |
57 | static int scsi_alloc_out_of_range = 0; | 58 | static int scsi_alloc_out_of_range = 0; |
58 | 59 | ||
59 | /* use bounce buffer if the physical address is bad */ | 60 | /* use bounce buffer if the physical address is bad */ |
60 | if (addr & hdata->dma_xfer_mask) { | 61 | if (addr & wh->dma_xfer_mask) { |
61 | hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; | 62 | wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; |
62 | 63 | ||
63 | if (!scsi_alloc_out_of_range) { | 64 | if (!scsi_alloc_out_of_range) { |
64 | hdata->dma_bounce_buffer = | 65 | wh->dma_bounce_buffer = |
65 | kmalloc(hdata->dma_bounce_len, GFP_KERNEL); | 66 | kmalloc(wh->dma_bounce_len, GFP_KERNEL); |
66 | hdata->dma_buffer_pool = BUF_SCSI_ALLOCED; | 67 | wh->dma_buffer_pool = BUF_SCSI_ALLOCED; |
67 | } | 68 | } |
68 | 69 | ||
69 | if (scsi_alloc_out_of_range || | 70 | if (scsi_alloc_out_of_range || |
70 | !hdata->dma_bounce_buffer) { | 71 | !wh->dma_bounce_buffer) { |
71 | hdata->dma_bounce_buffer = | 72 | wh->dma_bounce_buffer = |
72 | amiga_chip_alloc(hdata->dma_bounce_len, | 73 | amiga_chip_alloc(wh->dma_bounce_len, |
73 | "GVP II SCSI Bounce Buffer"); | 74 | "GVP II SCSI Bounce Buffer"); |
74 | 75 | ||
75 | if (!hdata->dma_bounce_buffer) { | 76 | if (!wh->dma_bounce_buffer) { |
76 | hdata->dma_bounce_len = 0; | 77 | wh->dma_bounce_len = 0; |
77 | return 1; | 78 | return 1; |
78 | } | 79 | } |
79 | 80 | ||
80 | hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; | 81 | wh->dma_buffer_pool = BUF_CHIP_ALLOCED; |
81 | } | 82 | } |
82 | 83 | ||
83 | /* check if the address of the bounce buffer is OK */ | 84 | /* check if the address of the bounce buffer is OK */ |
84 | addr = virt_to_bus(hdata->dma_bounce_buffer); | 85 | addr = virt_to_bus(wh->dma_bounce_buffer); |
85 | 86 | ||
86 | if (addr & hdata->dma_xfer_mask) { | 87 | if (addr & wh->dma_xfer_mask) { |
87 | /* fall back to Chip RAM if address out of range */ | 88 | /* fall back to Chip RAM if address out of range */ |
88 | if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) { | 89 | if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { |
89 | kfree(hdata->dma_bounce_buffer); | 90 | kfree(wh->dma_bounce_buffer); |
90 | scsi_alloc_out_of_range = 1; | 91 | scsi_alloc_out_of_range = 1; |
91 | } else { | 92 | } else { |
92 | amiga_chip_free(hdata->dma_bounce_buffer); | 93 | amiga_chip_free(wh->dma_bounce_buffer); |
93 | } | 94 | } |
94 | 95 | ||
95 | hdata->dma_bounce_buffer = | 96 | wh->dma_bounce_buffer = |
96 | amiga_chip_alloc(hdata->dma_bounce_len, | 97 | amiga_chip_alloc(wh->dma_bounce_len, |
97 | "GVP II SCSI Bounce Buffer"); | 98 | "GVP II SCSI Bounce Buffer"); |
98 | 99 | ||
99 | if (!hdata->dma_bounce_buffer) { | 100 | if (!wh->dma_bounce_buffer) { |
100 | hdata->dma_bounce_len = 0; | 101 | wh->dma_bounce_len = 0; |
101 | return 1; | 102 | return 1; |
102 | } | 103 | } |
103 | 104 | ||
104 | addr = virt_to_bus(hdata->dma_bounce_buffer); | 105 | addr = virt_to_bus(wh->dma_bounce_buffer); |
105 | hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; | 106 | wh->dma_buffer_pool = BUF_CHIP_ALLOCED; |
106 | } | 107 | } |
107 | 108 | ||
108 | if (!dir_in) { | 109 | if (!dir_in) { |
109 | /* copy to bounce buffer for a write */ | 110 | /* copy to bounce buffer for a write */ |
110 | memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, | 111 | memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, |
111 | cmd->SCp.this_residual); | 112 | cmd->SCp.this_residual); |
112 | } | 113 | } |
113 | } | 114 | } |
@@ -116,11 +117,11 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
116 | if (!dir_in) | 117 | if (!dir_in) |
117 | cntr |= GVP11_DMAC_DIR_WRITE; | 118 | cntr |= GVP11_DMAC_DIR_WRITE; |
118 | 119 | ||
119 | hdata->dma_dir = dir_in; | 120 | wh->dma_dir = dir_in; |
120 | DMA(cmd->device->host)->CNTR = cntr; | 121 | regs->CNTR = cntr; |
121 | 122 | ||
122 | /* setup DMA *physical* address */ | 123 | /* setup DMA *physical* address */ |
123 | DMA(cmd->device->host)->ACR = addr; | 124 | regs->ACR = addr; |
124 | 125 | ||
125 | if (dir_in) { | 126 | if (dir_in) { |
126 | /* invalidate any cache */ | 127 | /* invalidate any cache */ |
@@ -130,12 +131,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
130 | cache_push(addr, cmd->SCp.this_residual); | 131 | cache_push(addr, cmd->SCp.this_residual); |
131 | } | 132 | } |
132 | 133 | ||
133 | bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0; | 134 | bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0; |
134 | if (bank_mask) | 135 | if (bank_mask) |
135 | DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18); | 136 | regs->BANK = bank_mask & (addr >> 18); |
136 | 137 | ||
137 | /* start DMA */ | 138 | /* start DMA */ |
138 | DMA(cmd->device->host)->ST_DMA = 1; | 139 | regs->ST_DMA = 1; |
139 | 140 | ||
140 | /* return success */ | 141 | /* return success */ |
141 | return 0; | 142 | return 0; |
@@ -144,236 +145,53 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | |||
144 | static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, | 145 | static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, |
145 | int status) | 146 | int status) |
146 | { | 147 | { |
147 | struct WD33C93_hostdata *hdata = shost_priv(instance); | 148 | struct gvp11_hostdata *hdata = shost_priv(instance); |
149 | struct WD33C93_hostdata *wh = &hdata->wh; | ||
150 | struct gvp11_scsiregs *regs = hdata->regs; | ||
148 | 151 | ||
149 | /* stop DMA */ | 152 | /* stop DMA */ |
150 | DMA(instance)->SP_DMA = 1; | 153 | regs->SP_DMA = 1; |
151 | /* remove write bit from CONTROL bits */ | 154 | /* remove write bit from CONTROL bits */ |
152 | DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; | 155 | regs->CNTR = GVP11_DMAC_INT_ENABLE; |
153 | 156 | ||
154 | /* copy from a bounce buffer, if necessary */ | 157 | /* copy from a bounce buffer, if necessary */ |
155 | if (status && hdata->dma_bounce_buffer) { | 158 | if (status && wh->dma_bounce_buffer) { |
156 | if (hdata->dma_dir && SCpnt) | 159 | if (wh->dma_dir && SCpnt) |
157 | memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, | 160 | memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, |
158 | SCpnt->SCp.this_residual); | 161 | SCpnt->SCp.this_residual); |
159 | 162 | ||
160 | if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) | 163 | if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) |
161 | kfree(hdata->dma_bounce_buffer); | 164 | kfree(wh->dma_bounce_buffer); |
162 | else | ||
163 | amiga_chip_free(hdata->dma_bounce_buffer); | ||
164 | |||
165 | hdata->dma_bounce_buffer = NULL; | ||
166 | hdata->dma_bounce_len = 0; | ||
167 | } | ||
168 | } | ||
169 | |||
170 | #define CHECK_WD33C93 | ||
171 | |||
172 | int __init gvp11_detect(struct scsi_host_template *tpnt) | ||
173 | { | ||
174 | static unsigned char called = 0; | ||
175 | struct Scsi_Host *instance; | ||
176 | unsigned long address; | ||
177 | unsigned int epc; | ||
178 | struct zorro_dev *z = NULL; | ||
179 | unsigned int default_dma_xfer_mask; | ||
180 | struct WD33C93_hostdata *hdata; | ||
181 | wd33c93_regs regs; | ||
182 | int num_gvp11 = 0; | ||
183 | #ifdef CHECK_WD33C93 | ||
184 | volatile unsigned char *sasr_3393, *scmd_3393; | ||
185 | unsigned char save_sasr; | ||
186 | unsigned char q, qq; | ||
187 | #endif | ||
188 | |||
189 | if (!MACH_IS_AMIGA || called) | ||
190 | return 0; | ||
191 | called = 1; | ||
192 | |||
193 | tpnt->proc_name = "GVP11"; | ||
194 | tpnt->proc_info = &wd33c93_proc_info; | ||
195 | |||
196 | while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { | ||
197 | /* | ||
198 | * This should (hopefully) be the correct way to identify | ||
199 | * all the different GVP SCSI controllers (except for the | ||
200 | * SERIES I though). | ||
201 | */ | ||
202 | |||
203 | if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI || | ||
204 | z->id == ZORRO_PROD_GVP_SERIES_II) | ||
205 | default_dma_xfer_mask = ~0x00ffffff; | ||
206 | else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI || | ||
207 | z->id == ZORRO_PROD_GVP_A530_SCSI || | ||
208 | z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI) | ||
209 | default_dma_xfer_mask = ~0x01ffffff; | ||
210 | else if (z->id == ZORRO_PROD_GVP_A1291 || | ||
211 | z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1) | ||
212 | default_dma_xfer_mask = ~0x07ffffff; | ||
213 | else | 165 | else |
214 | continue; | 166 | amiga_chip_free(wh->dma_bounce_buffer); |
215 | |||
216 | /* | ||
217 | * Rumors state that some GVP ram boards use the same product | ||
218 | * code as the SCSI controllers. Therefore if the board-size | ||
219 | * is not 64KB we asume it is a ram board and bail out. | ||
220 | */ | ||
221 | if (z->resource.end - z->resource.start != 0xffff) | ||
222 | continue; | ||
223 | 167 | ||
224 | address = z->resource.start; | 168 | wh->dma_bounce_buffer = NULL; |
225 | if (!request_mem_region(address, 256, "wd33c93")) | 169 | wh->dma_bounce_len = 0; |
226 | continue; | ||
227 | |||
228 | #ifdef CHECK_WD33C93 | ||
229 | |||
230 | /* | ||
231 | * These darn GVP boards are a problem - it can be tough to tell | ||
232 | * whether or not they include a SCSI controller. This is the | ||
233 | * ultimate Yet-Another-GVP-Detection-Hack in that it actually | ||
234 | * probes for a WD33c93 chip: If we find one, it's extremely | ||
235 | * likely that this card supports SCSI, regardless of Product_ | ||
236 | * Code, Board_Size, etc. | ||
237 | */ | ||
238 | |||
239 | /* Get pointers to the presumed register locations and save contents */ | ||
240 | |||
241 | sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR); | ||
242 | scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD); | ||
243 | save_sasr = *sasr_3393; | ||
244 | |||
245 | /* First test the AuxStatus Reg */ | ||
246 | |||
247 | q = *sasr_3393; /* read it */ | ||
248 | if (q & 0x08) /* bit 3 should always be clear */ | ||
249 | goto release; | ||
250 | *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ | ||
251 | if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ | ||
252 | *sasr_3393 = save_sasr; /* Oops - restore this byte */ | ||
253 | goto release; | ||
254 | } | ||
255 | if (*sasr_3393 != q) { /* should still read the same */ | ||
256 | *sasr_3393 = save_sasr; /* Oops - restore this byte */ | ||
257 | goto release; | ||
258 | } | ||
259 | if (*scmd_3393 != q) /* and so should the image at 0x1f */ | ||
260 | goto release; | ||
261 | |||
262 | /* | ||
263 | * Ok, we probably have a wd33c93, but let's check a few other places | ||
264 | * for good measure. Make sure that this works for both 'A and 'B | ||
265 | * chip versions. | ||
266 | */ | ||
267 | |||
268 | *sasr_3393 = WD_SCSI_STATUS; | ||
269 | q = *scmd_3393; | ||
270 | *sasr_3393 = WD_SCSI_STATUS; | ||
271 | *scmd_3393 = ~q; | ||
272 | *sasr_3393 = WD_SCSI_STATUS; | ||
273 | qq = *scmd_3393; | ||
274 | *sasr_3393 = WD_SCSI_STATUS; | ||
275 | *scmd_3393 = q; | ||
276 | if (qq != q) /* should be read only */ | ||
277 | goto release; | ||
278 | *sasr_3393 = 0x1e; /* this register is unimplemented */ | ||
279 | q = *scmd_3393; | ||
280 | *sasr_3393 = 0x1e; | ||
281 | *scmd_3393 = ~q; | ||
282 | *sasr_3393 = 0x1e; | ||
283 | qq = *scmd_3393; | ||
284 | *sasr_3393 = 0x1e; | ||
285 | *scmd_3393 = q; | ||
286 | if (qq != q || qq != 0xff) /* should be read only, all 1's */ | ||
287 | goto release; | ||
288 | *sasr_3393 = WD_TIMEOUT_PERIOD; | ||
289 | q = *scmd_3393; | ||
290 | *sasr_3393 = WD_TIMEOUT_PERIOD; | ||
291 | *scmd_3393 = ~q; | ||
292 | *sasr_3393 = WD_TIMEOUT_PERIOD; | ||
293 | qq = *scmd_3393; | ||
294 | *sasr_3393 = WD_TIMEOUT_PERIOD; | ||
295 | *scmd_3393 = q; | ||
296 | if (qq != (~q & 0xff)) /* should be read/write */ | ||
297 | goto release; | ||
298 | #endif | ||
299 | |||
300 | instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); | ||
301 | if (instance == NULL) | ||
302 | goto release; | ||
303 | instance->base = ZTWO_VADDR(address); | ||
304 | instance->irq = IRQ_AMIGA_PORTS; | ||
305 | instance->unique_id = z->slotaddr; | ||
306 | |||
307 | hdata = shost_priv(instance); | ||
308 | if (gvp11_xfer_mask) | ||
309 | hdata->dma_xfer_mask = gvp11_xfer_mask; | ||
310 | else | ||
311 | hdata->dma_xfer_mask = default_dma_xfer_mask; | ||
312 | |||
313 | DMA(instance)->secret2 = 1; | ||
314 | DMA(instance)->secret1 = 0; | ||
315 | DMA(instance)->secret3 = 15; | ||
316 | while (DMA(instance)->CNTR & GVP11_DMAC_BUSY) | ||
317 | ; | ||
318 | DMA(instance)->CNTR = 0; | ||
319 | |||
320 | DMA(instance)->BANK = 0; | ||
321 | |||
322 | epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); | ||
323 | |||
324 | /* | ||
325 | * Check for 14MHz SCSI clock | ||
326 | */ | ||
327 | regs.SASR = &(DMA(instance)->SASR); | ||
328 | regs.SCMD = &(DMA(instance)->SCMD); | ||
329 | hdata->no_sync = 0xff; | ||
330 | hdata->fast = 0; | ||
331 | hdata->dma_mode = CTRL_DMA; | ||
332 | wd33c93_init(instance, regs, dma_setup, dma_stop, | ||
333 | (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 | ||
334 | : WD33C93_FS_12_15); | ||
335 | |||
336 | if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, | ||
337 | "GVP11 SCSI", instance)) | ||
338 | goto unregister; | ||
339 | DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; | ||
340 | num_gvp11++; | ||
341 | continue; | ||
342 | |||
343 | unregister: | ||
344 | scsi_unregister(instance); | ||
345 | release: | ||
346 | release_mem_region(address, 256); | ||
347 | } | 170 | } |
348 | |||
349 | return num_gvp11; | ||
350 | } | 171 | } |
351 | 172 | ||
352 | static int gvp11_bus_reset(struct scsi_cmnd *cmd) | 173 | static int gvp11_bus_reset(struct scsi_cmnd *cmd) |
353 | { | 174 | { |
175 | struct Scsi_Host *instance = cmd->device->host; | ||
176 | |||
354 | /* FIXME perform bus-specific reset */ | 177 | /* FIXME perform bus-specific reset */ |
355 | 178 | ||
356 | /* FIXME 2: shouldn't we no-op this function (return | 179 | /* FIXME 2: shouldn't we no-op this function (return |
357 | FAILED), and fall back to host reset function, | 180 | FAILED), and fall back to host reset function, |
358 | wd33c93_host_reset ? */ | 181 | wd33c93_host_reset ? */ |
359 | 182 | ||
360 | spin_lock_irq(cmd->device->host->host_lock); | 183 | spin_lock_irq(instance->host_lock); |
361 | wd33c93_host_reset(cmd); | 184 | wd33c93_host_reset(cmd); |
362 | spin_unlock_irq(cmd->device->host->host_lock); | 185 | spin_unlock_irq(instance->host_lock); |
363 | 186 | ||
364 | return SUCCESS; | 187 | return SUCCESS; |
365 | } | 188 | } |
366 | 189 | ||
367 | 190 | static struct scsi_host_template gvp11_scsi_template = { | |
368 | #define HOSTS_C | 191 | .module = THIS_MODULE, |
369 | |||
370 | #include "gvp11.h" | ||
371 | |||
372 | static struct scsi_host_template driver_template = { | ||
373 | .proc_name = "GVP11", | ||
374 | .name = "GVP Series II SCSI", | 192 | .name = "GVP Series II SCSI", |
375 | .detect = gvp11_detect, | 193 | .proc_info = wd33c93_proc_info, |
376 | .release = gvp11_release, | 194 | .proc_name = "GVP11", |
377 | .queuecommand = wd33c93_queuecommand, | 195 | .queuecommand = wd33c93_queuecommand, |
378 | .eh_abort_handler = wd33c93_abort, | 196 | .eh_abort_handler = wd33c93_abort, |
379 | .eh_bus_reset_handler = gvp11_bus_reset, | 197 | .eh_bus_reset_handler = gvp11_bus_reset, |
@@ -385,17 +203,230 @@ static struct scsi_host_template driver_template = { | |||
385 | .use_clustering = DISABLE_CLUSTERING | 203 | .use_clustering = DISABLE_CLUSTERING |
386 | }; | 204 | }; |
387 | 205 | ||
206 | static int __devinit check_wd33c93(struct gvp11_scsiregs *regs) | ||
207 | { | ||
208 | #ifdef CHECK_WD33C93 | ||
209 | volatile unsigned char *sasr_3393, *scmd_3393; | ||
210 | unsigned char save_sasr; | ||
211 | unsigned char q, qq; | ||
388 | 212 | ||
389 | #include "scsi_module.c" | 213 | /* |
214 | * These darn GVP boards are a problem - it can be tough to tell | ||
215 | * whether or not they include a SCSI controller. This is the | ||
216 | * ultimate Yet-Another-GVP-Detection-Hack in that it actually | ||
217 | * probes for a WD33c93 chip: If we find one, it's extremely | ||
218 | * likely that this card supports SCSI, regardless of Product_ | ||
219 | * Code, Board_Size, etc. | ||
220 | */ | ||
221 | |||
222 | /* Get pointers to the presumed register locations and save contents */ | ||
223 | |||
224 | sasr_3393 = ®s->SASR; | ||
225 | scmd_3393 = ®s->SCMD; | ||
226 | save_sasr = *sasr_3393; | ||
227 | |||
228 | /* First test the AuxStatus Reg */ | ||
229 | |||
230 | q = *sasr_3393; /* read it */ | ||
231 | if (q & 0x08) /* bit 3 should always be clear */ | ||
232 | return -ENODEV; | ||
233 | *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ | ||
234 | if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ | ||
235 | *sasr_3393 = save_sasr; /* Oops - restore this byte */ | ||
236 | return -ENODEV; | ||
237 | } | ||
238 | if (*sasr_3393 != q) { /* should still read the same */ | ||
239 | *sasr_3393 = save_sasr; /* Oops - restore this byte */ | ||
240 | return -ENODEV; | ||
241 | } | ||
242 | if (*scmd_3393 != q) /* and so should the image at 0x1f */ | ||
243 | return -ENODEV; | ||
244 | |||
245 | /* | ||
246 | * Ok, we probably have a wd33c93, but let's check a few other places | ||
247 | * for good measure. Make sure that this works for both 'A and 'B | ||
248 | * chip versions. | ||
249 | */ | ||
250 | |||
251 | *sasr_3393 = WD_SCSI_STATUS; | ||
252 | q = *scmd_3393; | ||
253 | *sasr_3393 = WD_SCSI_STATUS; | ||
254 | *scmd_3393 = ~q; | ||
255 | *sasr_3393 = WD_SCSI_STATUS; | ||
256 | qq = *scmd_3393; | ||
257 | *sasr_3393 = WD_SCSI_STATUS; | ||
258 | *scmd_3393 = q; | ||
259 | if (qq != q) /* should be read only */ | ||
260 | return -ENODEV; | ||
261 | *sasr_3393 = 0x1e; /* this register is unimplemented */ | ||
262 | q = *scmd_3393; | ||
263 | *sasr_3393 = 0x1e; | ||
264 | *scmd_3393 = ~q; | ||
265 | *sasr_3393 = 0x1e; | ||
266 | qq = *scmd_3393; | ||
267 | *sasr_3393 = 0x1e; | ||
268 | *scmd_3393 = q; | ||
269 | if (qq != q || qq != 0xff) /* should be read only, all 1's */ | ||
270 | return -ENODEV; | ||
271 | *sasr_3393 = WD_TIMEOUT_PERIOD; | ||
272 | q = *scmd_3393; | ||
273 | *sasr_3393 = WD_TIMEOUT_PERIOD; | ||
274 | *scmd_3393 = ~q; | ||
275 | *sasr_3393 = WD_TIMEOUT_PERIOD; | ||
276 | qq = *scmd_3393; | ||
277 | *sasr_3393 = WD_TIMEOUT_PERIOD; | ||
278 | *scmd_3393 = q; | ||
279 | if (qq != (~q & 0xff)) /* should be read/write */ | ||
280 | return -ENODEV; | ||
281 | #endif /* CHECK_WD33C93 */ | ||
390 | 282 | ||
391 | int gvp11_release(struct Scsi_Host *instance) | 283 | return 0; |
284 | } | ||
285 | |||
286 | static int __devinit gvp11_probe(struct zorro_dev *z, | ||
287 | const struct zorro_device_id *ent) | ||
392 | { | 288 | { |
393 | #ifdef MODULE | 289 | struct Scsi_Host *instance; |
394 | DMA(instance)->CNTR = 0; | 290 | unsigned long address; |
395 | release_mem_region(ZTWO_PADDR(instance->base), 256); | 291 | int error; |
292 | unsigned int epc; | ||
293 | unsigned int default_dma_xfer_mask; | ||
294 | struct gvp11_hostdata *hdata; | ||
295 | struct gvp11_scsiregs *regs; | ||
296 | wd33c93_regs wdregs; | ||
297 | |||
298 | default_dma_xfer_mask = ent->driver_data; | ||
299 | |||
300 | /* | ||
301 | * Rumors state that some GVP ram boards use the same product | ||
302 | * code as the SCSI controllers. Therefore if the board-size | ||
303 | * is not 64KB we asume it is a ram board and bail out. | ||
304 | */ | ||
305 | if (zorro_resource_len(z) != 0x10000) | ||
306 | return -ENODEV; | ||
307 | |||
308 | address = z->resource.start; | ||
309 | if (!request_mem_region(address, 256, "wd33c93")) | ||
310 | return -EBUSY; | ||
311 | |||
312 | regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address)); | ||
313 | |||
314 | error = check_wd33c93(regs); | ||
315 | if (error) | ||
316 | goto fail_check_or_alloc; | ||
317 | |||
318 | instance = scsi_host_alloc(&gvp11_scsi_template, | ||
319 | sizeof(struct gvp11_hostdata)); | ||
320 | if (!instance) { | ||
321 | error = -ENOMEM; | ||
322 | goto fail_check_or_alloc; | ||
323 | } | ||
324 | |||
325 | instance->irq = IRQ_AMIGA_PORTS; | ||
326 | instance->unique_id = z->slotaddr; | ||
327 | |||
328 | regs->secret2 = 1; | ||
329 | regs->secret1 = 0; | ||
330 | regs->secret3 = 15; | ||
331 | while (regs->CNTR & GVP11_DMAC_BUSY) | ||
332 | ; | ||
333 | regs->CNTR = 0; | ||
334 | regs->BANK = 0; | ||
335 | |||
336 | wdregs.SASR = ®s->SASR; | ||
337 | wdregs.SCMD = ®s->SCMD; | ||
338 | |||
339 | hdata = shost_priv(instance); | ||
340 | if (gvp11_xfer_mask) | ||
341 | hdata->wh.dma_xfer_mask = gvp11_xfer_mask; | ||
342 | else | ||
343 | hdata->wh.dma_xfer_mask = default_dma_xfer_mask; | ||
344 | |||
345 | hdata->wh.no_sync = 0xff; | ||
346 | hdata->wh.fast = 0; | ||
347 | hdata->wh.dma_mode = CTRL_DMA; | ||
348 | hdata->regs = regs; | ||
349 | |||
350 | /* | ||
351 | * Check for 14MHz SCSI clock | ||
352 | */ | ||
353 | epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); | ||
354 | wd33c93_init(instance, wdregs, dma_setup, dma_stop, | ||
355 | (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 | ||
356 | : WD33C93_FS_12_15); | ||
357 | |||
358 | error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, | ||
359 | "GVP11 SCSI", instance); | ||
360 | if (error) | ||
361 | goto fail_irq; | ||
362 | |||
363 | regs->CNTR = GVP11_DMAC_INT_ENABLE; | ||
364 | |||
365 | error = scsi_add_host(instance, NULL); | ||
366 | if (error) | ||
367 | goto fail_host; | ||
368 | |||
369 | zorro_set_drvdata(z, instance); | ||
370 | scsi_scan_host(instance); | ||
371 | return 0; | ||
372 | |||
373 | fail_host: | ||
396 | free_irq(IRQ_AMIGA_PORTS, instance); | 374 | free_irq(IRQ_AMIGA_PORTS, instance); |
397 | #endif | 375 | fail_irq: |
398 | return 1; | 376 | scsi_host_put(instance); |
377 | fail_check_or_alloc: | ||
378 | release_mem_region(address, 256); | ||
379 | return error; | ||
380 | } | ||
381 | |||
382 | static void __devexit gvp11_remove(struct zorro_dev *z) | ||
383 | { | ||
384 | struct Scsi_Host *instance = zorro_get_drvdata(z); | ||
385 | struct gvp11_hostdata *hdata = shost_priv(instance); | ||
386 | |||
387 | hdata->regs->CNTR = 0; | ||
388 | scsi_remove_host(instance); | ||
389 | free_irq(IRQ_AMIGA_PORTS, instance); | ||
390 | scsi_host_put(instance); | ||
391 | release_mem_region(z->resource.start, 256); | ||
392 | } | ||
393 | |||
394 | /* | ||
395 | * This should (hopefully) be the correct way to identify | ||
396 | * all the different GVP SCSI controllers (except for the | ||
397 | * SERIES I though). | ||
398 | */ | ||
399 | |||
400 | static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = { | ||
401 | { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff }, | ||
402 | { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff }, | ||
403 | { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff }, | ||
404 | { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff }, | ||
405 | { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff }, | ||
406 | { ZORRO_PROD_GVP_A1291, ~0x07ffffff }, | ||
407 | { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff }, | ||
408 | { 0 } | ||
409 | }; | ||
410 | MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl); | ||
411 | |||
412 | static struct zorro_driver gvp11_driver = { | ||
413 | .name = "gvp11", | ||
414 | .id_table = gvp11_zorro_tbl, | ||
415 | .probe = gvp11_probe, | ||
416 | .remove = __devexit_p(gvp11_remove), | ||
417 | }; | ||
418 | |||
419 | static int __init gvp11_init(void) | ||
420 | { | ||
421 | return zorro_register_driver(&gvp11_driver); | ||
422 | } | ||
423 | module_init(gvp11_init); | ||
424 | |||
425 | static void __exit gvp11_exit(void) | ||
426 | { | ||
427 | zorro_unregister_driver(&gvp11_driver); | ||
399 | } | 428 | } |
429 | module_exit(gvp11_exit); | ||
400 | 430 | ||
431 | MODULE_DESCRIPTION("GVP Series II SCSI"); | ||
401 | MODULE_LICENSE("GPL"); | 432 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h index e2efdf9601ef..852913cde5dd 100644 --- a/drivers/scsi/gvp11.h +++ b/drivers/scsi/gvp11.h | |||
@@ -11,9 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | int gvp11_detect(struct scsi_host_template *); | ||
15 | int gvp11_release(struct Scsi_Host *); | ||
16 | |||
17 | #ifndef CMD_PER_LUN | 14 | #ifndef CMD_PER_LUN |
18 | #define CMD_PER_LUN 2 | 15 | #define CMD_PER_LUN 2 |
19 | #endif | 16 | #endif |
@@ -22,15 +19,13 @@ int gvp11_release(struct Scsi_Host *); | |||
22 | #define CAN_QUEUE 16 | 19 | #define CAN_QUEUE 16 |
23 | #endif | 20 | #endif |
24 | 21 | ||
25 | #ifndef HOSTS_C | ||
26 | |||
27 | /* | 22 | /* |
28 | * if the transfer address ANDed with this results in a non-zero | 23 | * if the transfer address ANDed with this results in a non-zero |
29 | * result, then we can't use DMA. | 24 | * result, then we can't use DMA. |
30 | */ | 25 | */ |
31 | #define GVP11_XFER_MASK (0xff000001) | 26 | #define GVP11_XFER_MASK (0xff000001) |
32 | 27 | ||
33 | typedef struct { | 28 | struct gvp11_scsiregs { |
34 | unsigned char pad1[64]; | 29 | unsigned char pad1[64]; |
35 | volatile unsigned short CNTR; | 30 | volatile unsigned short CNTR; |
36 | unsigned char pad2[31]; | 31 | unsigned char pad2[31]; |
@@ -46,7 +41,7 @@ typedef struct { | |||
46 | volatile unsigned short SP_DMA; | 41 | volatile unsigned short SP_DMA; |
47 | volatile unsigned short secret2; /* store 1 here */ | 42 | volatile unsigned short secret2; /* store 1 here */ |
48 | volatile unsigned short secret3; /* store 15 here */ | 43 | volatile unsigned short secret3; /* store 15 here */ |
49 | } gvp11_scsiregs; | 44 | }; |
50 | 45 | ||
51 | /* bits in CNTR */ | 46 | /* bits in CNTR */ |
52 | #define GVP11_DMAC_BUSY (1<<0) | 47 | #define GVP11_DMAC_BUSY (1<<0) |
@@ -54,6 +49,4 @@ typedef struct { | |||
54 | #define GVP11_DMAC_INT_ENABLE (1<<3) | 49 | #define GVP11_DMAC_INT_ENABLE (1<<3) |
55 | #define GVP11_DMAC_DIR_WRITE (1<<4) | 50 | #define GVP11_DMAC_DIR_WRITE (1<<4) |
56 | 51 | ||
57 | #endif /* else def HOSTS_C */ | ||
58 | |||
59 | #endif /* GVP11_H */ | 52 | #endif /* GVP11_H */ |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 3eb2b7b3d8b0..fef49521cbc3 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -1157,7 +1157,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost) | |||
1157 | static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) | 1157 | static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) |
1158 | { | 1158 | { |
1159 | struct ibmvfc_npiv_login *login_info = &vhost->login_info; | 1159 | struct ibmvfc_npiv_login *login_info = &vhost->login_info; |
1160 | struct device_node *of_node = vhost->dev->archdata.of_node; | 1160 | struct device_node *of_node = vhost->dev->of_node; |
1161 | const char *location; | 1161 | const char *location; |
1162 | 1162 | ||
1163 | memset(login_info, 0, sizeof(*login_info)); | 1163 | memset(login_info, 0, sizeof(*login_info)); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 88bad0e81bdd..aad35cc41e49 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -932,7 +932,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) | |||
932 | struct viosrp_capabilities *req; | 932 | struct viosrp_capabilities *req; |
933 | struct srp_event_struct *evt_struct; | 933 | struct srp_event_struct *evt_struct; |
934 | unsigned long flags; | 934 | unsigned long flags; |
935 | struct device_node *of_node = hostdata->dev->archdata.of_node; | 935 | struct device_node *of_node = hostdata->dev->of_node; |
936 | const char *location; | 936 | const char *location; |
937 | 937 | ||
938 | evt_struct = get_event_struct(&hostdata->pool); | 938 | evt_struct = get_event_struct(&hostdata->pool); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 6a6661c35b2f..82ea4a8226b0 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -567,7 +567,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, | |||
567 | static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) | 567 | static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) |
568 | { | 568 | { |
569 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 569 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
570 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 570 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
571 | struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; | ||
571 | dma_addr_t dma_addr = ipr_cmd->dma_addr; | 572 | dma_addr_t dma_addr = ipr_cmd->dma_addr; |
572 | 573 | ||
573 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); | 574 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); |
@@ -576,19 +577,19 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) | |||
576 | ioarcb->ioadl_len = 0; | 577 | ioarcb->ioadl_len = 0; |
577 | ioarcb->read_ioadl_len = 0; | 578 | ioarcb->read_ioadl_len = 0; |
578 | 579 | ||
579 | if (ipr_cmd->ioa_cfg->sis64) | 580 | if (ipr_cmd->ioa_cfg->sis64) { |
580 | ioarcb->u.sis64_addr_data.data_ioadl_addr = | 581 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
581 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); | 582 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); |
582 | else { | 583 | ioasa64->u.gata.status = 0; |
584 | } else { | ||
583 | ioarcb->write_ioadl_addr = | 585 | ioarcb->write_ioadl_addr = |
584 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); | 586 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); |
585 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | 587 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; |
588 | ioasa->u.gata.status = 0; | ||
586 | } | 589 | } |
587 | 590 | ||
588 | ioasa->ioasc = 0; | 591 | ioasa->hdr.ioasc = 0; |
589 | ioasa->residual_data_len = 0; | 592 | ioasa->hdr.residual_data_len = 0; |
590 | ioasa->u.gata.status = 0; | ||
591 | |||
592 | ipr_cmd->scsi_cmd = NULL; | 593 | ipr_cmd->scsi_cmd = NULL; |
593 | ipr_cmd->qc = NULL; | 594 | ipr_cmd->qc = NULL; |
594 | ipr_cmd->sense_buffer[0] = 0; | 595 | ipr_cmd->sense_buffer[0] = 0; |
@@ -768,8 +769,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) | |||
768 | list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { | 769 | list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { |
769 | list_del(&ipr_cmd->queue); | 770 | list_del(&ipr_cmd->queue); |
770 | 771 | ||
771 | ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); | 772 | ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); |
772 | ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID); | 773 | ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID); |
773 | 774 | ||
774 | if (ipr_cmd->scsi_cmd) | 775 | if (ipr_cmd->scsi_cmd) |
775 | ipr_cmd->done = ipr_scsi_eh_done; | 776 | ipr_cmd->done = ipr_scsi_eh_done; |
@@ -1040,7 +1041,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, | |||
1040 | proto = cfgtew->u.cfgte64->proto; | 1041 | proto = cfgtew->u.cfgte64->proto; |
1041 | res->res_flags = cfgtew->u.cfgte64->res_flags; | 1042 | res->res_flags = cfgtew->u.cfgte64->res_flags; |
1042 | res->qmodel = IPR_QUEUEING_MODEL64(res); | 1043 | res->qmodel = IPR_QUEUEING_MODEL64(res); |
1043 | res->type = cfgtew->u.cfgte64->res_type & 0x0f; | 1044 | res->type = cfgtew->u.cfgte64->res_type; |
1044 | 1045 | ||
1045 | memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, | 1046 | memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, |
1046 | sizeof(res->res_path)); | 1047 | sizeof(res->res_path)); |
@@ -1319,7 +1320,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) | |||
1319 | { | 1320 | { |
1320 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 1321 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
1321 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; | 1322 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; |
1322 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 1323 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
1323 | 1324 | ||
1324 | list_del(&hostrcb->queue); | 1325 | list_del(&hostrcb->queue); |
1325 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 1326 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
@@ -2354,7 +2355,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) | |||
2354 | { | 2355 | { |
2355 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 2356 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
2356 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; | 2357 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; |
2357 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 2358 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
2358 | u32 fd_ioasc; | 2359 | u32 fd_ioasc; |
2359 | 2360 | ||
2360 | if (ioa_cfg->sis64) | 2361 | if (ioa_cfg->sis64) |
@@ -4509,11 +4510,16 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
4509 | } | 4510 | } |
4510 | 4511 | ||
4511 | ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); | 4512 | ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); |
4512 | ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 4513 | ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
4513 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 4514 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
4514 | if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) | 4515 | if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { |
4515 | memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata, | 4516 | if (ipr_cmd->ioa_cfg->sis64) |
4516 | sizeof(struct ipr_ioasa_gata)); | 4517 | memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, |
4518 | sizeof(struct ipr_ioasa_gata)); | ||
4519 | else | ||
4520 | memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, | ||
4521 | sizeof(struct ipr_ioasa_gata)); | ||
4522 | } | ||
4517 | 4523 | ||
4518 | LEAVE; | 4524 | LEAVE; |
4519 | return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); | 4525 | return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); |
@@ -4768,7 +4774,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) | |||
4768 | scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", | 4774 | scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", |
4769 | scsi_cmd->cmnd[0]); | 4775 | scsi_cmd->cmnd[0]); |
4770 | ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); | 4776 | ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); |
4771 | ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 4777 | ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
4772 | 4778 | ||
4773 | /* | 4779 | /* |
4774 | * If the abort task timed out and we sent a bus reset, we will get | 4780 | * If the abort task timed out and we sent a bus reset, we will get |
@@ -4812,15 +4818,39 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) | |||
4812 | /** | 4818 | /** |
4813 | * ipr_handle_other_interrupt - Handle "other" interrupts | 4819 | * ipr_handle_other_interrupt - Handle "other" interrupts |
4814 | * @ioa_cfg: ioa config struct | 4820 | * @ioa_cfg: ioa config struct |
4815 | * @int_reg: interrupt register | ||
4816 | * | 4821 | * |
4817 | * Return value: | 4822 | * Return value: |
4818 | * IRQ_NONE / IRQ_HANDLED | 4823 | * IRQ_NONE / IRQ_HANDLED |
4819 | **/ | 4824 | **/ |
4820 | static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, | 4825 | static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg) |
4821 | volatile u32 int_reg) | ||
4822 | { | 4826 | { |
4823 | irqreturn_t rc = IRQ_HANDLED; | 4827 | irqreturn_t rc = IRQ_HANDLED; |
4828 | volatile u32 int_reg, int_mask_reg; | ||
4829 | |||
4830 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); | ||
4831 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; | ||
4832 | |||
4833 | /* If an interrupt on the adapter did not occur, ignore it. | ||
4834 | * Or in the case of SIS 64, check for a stage change interrupt. | ||
4835 | */ | ||
4836 | if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { | ||
4837 | if (ioa_cfg->sis64) { | ||
4838 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | ||
4839 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
4840 | if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { | ||
4841 | |||
4842 | /* clear stage change */ | ||
4843 | writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); | ||
4844 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
4845 | list_del(&ioa_cfg->reset_cmd->queue); | ||
4846 | del_timer(&ioa_cfg->reset_cmd->timer); | ||
4847 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); | ||
4848 | return IRQ_HANDLED; | ||
4849 | } | ||
4850 | } | ||
4851 | |||
4852 | return IRQ_NONE; | ||
4853 | } | ||
4824 | 4854 | ||
4825 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { | 4855 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { |
4826 | /* Mask the interrupt */ | 4856 | /* Mask the interrupt */ |
@@ -4881,7 +4911,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
4881 | { | 4911 | { |
4882 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; | 4912 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; |
4883 | unsigned long lock_flags = 0; | 4913 | unsigned long lock_flags = 0; |
4884 | volatile u32 int_reg, int_mask_reg; | 4914 | volatile u32 int_reg; |
4885 | u32 ioasc; | 4915 | u32 ioasc; |
4886 | u16 cmd_index; | 4916 | u16 cmd_index; |
4887 | int num_hrrq = 0; | 4917 | int num_hrrq = 0; |
@@ -4896,33 +4926,6 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
4896 | return IRQ_NONE; | 4926 | return IRQ_NONE; |
4897 | } | 4927 | } |
4898 | 4928 | ||
4899 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); | ||
4900 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; | ||
4901 | |||
4902 | /* If an interrupt on the adapter did not occur, ignore it. | ||
4903 | * Or in the case of SIS 64, check for a stage change interrupt. | ||
4904 | */ | ||
4905 | if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { | ||
4906 | if (ioa_cfg->sis64) { | ||
4907 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | ||
4908 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
4909 | if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { | ||
4910 | |||
4911 | /* clear stage change */ | ||
4912 | writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); | ||
4913 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
4914 | list_del(&ioa_cfg->reset_cmd->queue); | ||
4915 | del_timer(&ioa_cfg->reset_cmd->timer); | ||
4916 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); | ||
4917 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
4918 | return IRQ_HANDLED; | ||
4919 | } | ||
4920 | } | ||
4921 | |||
4922 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
4923 | return IRQ_NONE; | ||
4924 | } | ||
4925 | |||
4926 | while (1) { | 4929 | while (1) { |
4927 | ipr_cmd = NULL; | 4930 | ipr_cmd = NULL; |
4928 | 4931 | ||
@@ -4940,7 +4943,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
4940 | 4943 | ||
4941 | ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; | 4944 | ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; |
4942 | 4945 | ||
4943 | ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 4946 | ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
4944 | 4947 | ||
4945 | ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); | 4948 | ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); |
4946 | 4949 | ||
@@ -4962,7 +4965,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
4962 | /* Clear the PCI interrupt */ | 4965 | /* Clear the PCI interrupt */ |
4963 | do { | 4966 | do { |
4964 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); | 4967 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); |
4965 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; | 4968 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); |
4966 | } while (int_reg & IPR_PCII_HRRQ_UPDATED && | 4969 | } while (int_reg & IPR_PCII_HRRQ_UPDATED && |
4967 | num_hrrq++ < IPR_MAX_HRRQ_RETRIES); | 4970 | num_hrrq++ < IPR_MAX_HRRQ_RETRIES); |
4968 | 4971 | ||
@@ -4977,7 +4980,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
4977 | } | 4980 | } |
4978 | 4981 | ||
4979 | if (unlikely(rc == IRQ_NONE)) | 4982 | if (unlikely(rc == IRQ_NONE)) |
4980 | rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); | 4983 | rc = ipr_handle_other_interrupt(ioa_cfg); |
4981 | 4984 | ||
4982 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 4985 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
4983 | return rc; | 4986 | return rc; |
@@ -5014,6 +5017,10 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, | |||
5014 | 5017 | ||
5015 | ipr_cmd->dma_use_sg = nseg; | 5018 | ipr_cmd->dma_use_sg = nseg; |
5016 | 5019 | ||
5020 | ioarcb->data_transfer_length = cpu_to_be32(length); | ||
5021 | ioarcb->ioadl_len = | ||
5022 | cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); | ||
5023 | |||
5017 | if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { | 5024 | if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { |
5018 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; | 5025 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; |
5019 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | 5026 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; |
@@ -5135,7 +5142,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) | |||
5135 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 5142 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
5136 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; | 5143 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; |
5137 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5144 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
5138 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5145 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
5139 | 5146 | ||
5140 | if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { | 5147 | if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { |
5141 | scsi_cmd->result |= (DID_ERROR << 16); | 5148 | scsi_cmd->result |= (DID_ERROR << 16); |
@@ -5166,7 +5173,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) | |||
5166 | static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) | 5173 | static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) |
5167 | { | 5174 | { |
5168 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 5175 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
5169 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 5176 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
5170 | dma_addr_t dma_addr = ipr_cmd->dma_addr; | 5177 | dma_addr_t dma_addr = ipr_cmd->dma_addr; |
5171 | 5178 | ||
5172 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); | 5179 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); |
@@ -5174,8 +5181,8 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) | |||
5174 | ioarcb->read_data_transfer_length = 0; | 5181 | ioarcb->read_data_transfer_length = 0; |
5175 | ioarcb->ioadl_len = 0; | 5182 | ioarcb->ioadl_len = 0; |
5176 | ioarcb->read_ioadl_len = 0; | 5183 | ioarcb->read_ioadl_len = 0; |
5177 | ioasa->ioasc = 0; | 5184 | ioasa->hdr.ioasc = 0; |
5178 | ioasa->residual_data_len = 0; | 5185 | ioasa->hdr.residual_data_len = 0; |
5179 | 5186 | ||
5180 | if (ipr_cmd->ioa_cfg->sis64) | 5187 | if (ipr_cmd->ioa_cfg->sis64) |
5181 | ioarcb->u.sis64_addr_data.data_ioadl_addr = | 5188 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
@@ -5200,7 +5207,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) | |||
5200 | static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) | 5207 | static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) |
5201 | { | 5208 | { |
5202 | struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; | 5209 | struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; |
5203 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5210 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
5204 | 5211 | ||
5205 | if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { | 5212 | if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { |
5206 | ipr_erp_done(ipr_cmd); | 5213 | ipr_erp_done(ipr_cmd); |
@@ -5277,12 +5284,12 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, | |||
5277 | int i; | 5284 | int i; |
5278 | u16 data_len; | 5285 | u16 data_len; |
5279 | u32 ioasc, fd_ioasc; | 5286 | u32 ioasc, fd_ioasc; |
5280 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 5287 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
5281 | __be32 *ioasa_data = (__be32 *)ioasa; | 5288 | __be32 *ioasa_data = (__be32 *)ioasa; |
5282 | int error_index; | 5289 | int error_index; |
5283 | 5290 | ||
5284 | ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; | 5291 | ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; |
5285 | fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK; | 5292 | fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; |
5286 | 5293 | ||
5287 | if (0 == ioasc) | 5294 | if (0 == ioasc) |
5288 | return; | 5295 | return; |
@@ -5297,7 +5304,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, | |||
5297 | 5304 | ||
5298 | if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { | 5305 | if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { |
5299 | /* Don't log an error if the IOA already logged one */ | 5306 | /* Don't log an error if the IOA already logged one */ |
5300 | if (ioasa->ilid != 0) | 5307 | if (ioasa->hdr.ilid != 0) |
5301 | return; | 5308 | return; |
5302 | 5309 | ||
5303 | if (!ipr_is_gscsi(res)) | 5310 | if (!ipr_is_gscsi(res)) |
@@ -5309,10 +5316,11 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, | |||
5309 | 5316 | ||
5310 | ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); | 5317 | ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); |
5311 | 5318 | ||
5312 | if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) | 5319 | data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); |
5320 | if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) | ||
5321 | data_len = sizeof(struct ipr_ioasa64); | ||
5322 | else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) | ||
5313 | data_len = sizeof(struct ipr_ioasa); | 5323 | data_len = sizeof(struct ipr_ioasa); |
5314 | else | ||
5315 | data_len = be16_to_cpu(ioasa->ret_stat_len); | ||
5316 | 5324 | ||
5317 | ipr_err("IOASA Dump:\n"); | 5325 | ipr_err("IOASA Dump:\n"); |
5318 | 5326 | ||
@@ -5338,8 +5346,8 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
5338 | u32 failing_lba; | 5346 | u32 failing_lba; |
5339 | u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; | 5347 | u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; |
5340 | struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; | 5348 | struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; |
5341 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 5349 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
5342 | u32 ioasc = be32_to_cpu(ioasa->ioasc); | 5350 | u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); |
5343 | 5351 | ||
5344 | memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); | 5352 | memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); |
5345 | 5353 | ||
@@ -5382,7 +5390,7 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
5382 | 5390 | ||
5383 | /* Illegal request */ | 5391 | /* Illegal request */ |
5384 | if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && | 5392 | if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && |
5385 | (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) { | 5393 | (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { |
5386 | sense_buf[7] = 10; /* additional length */ | 5394 | sense_buf[7] = 10; /* additional length */ |
5387 | 5395 | ||
5388 | /* IOARCB was in error */ | 5396 | /* IOARCB was in error */ |
@@ -5393,10 +5401,10 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
5393 | 5401 | ||
5394 | sense_buf[16] = | 5402 | sense_buf[16] = |
5395 | ((IPR_FIELD_POINTER_MASK & | 5403 | ((IPR_FIELD_POINTER_MASK & |
5396 | be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff; | 5404 | be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; |
5397 | sense_buf[17] = | 5405 | sense_buf[17] = |
5398 | (IPR_FIELD_POINTER_MASK & | 5406 | (IPR_FIELD_POINTER_MASK & |
5399 | be32_to_cpu(ioasa->ioasc_specific)) & 0xff; | 5407 | be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; |
5400 | } else { | 5408 | } else { |
5401 | if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { | 5409 | if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { |
5402 | if (ipr_is_vset_device(res)) | 5410 | if (ipr_is_vset_device(res)) |
@@ -5428,14 +5436,20 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
5428 | **/ | 5436 | **/ |
5429 | static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) | 5437 | static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) |
5430 | { | 5438 | { |
5431 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 5439 | struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; |
5440 | struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; | ||
5432 | 5441 | ||
5433 | if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) | 5442 | if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) |
5434 | return 0; | 5443 | return 0; |
5435 | 5444 | ||
5436 | memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, | 5445 | if (ipr_cmd->ioa_cfg->sis64) |
5437 | min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), | 5446 | memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, |
5438 | SCSI_SENSE_BUFFERSIZE)); | 5447 | min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), |
5448 | SCSI_SENSE_BUFFERSIZE)); | ||
5449 | else | ||
5450 | memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, | ||
5451 | min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), | ||
5452 | SCSI_SENSE_BUFFERSIZE)); | ||
5439 | return 1; | 5453 | return 1; |
5440 | } | 5454 | } |
5441 | 5455 | ||
@@ -5455,7 +5469,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, | |||
5455 | { | 5469 | { |
5456 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 5470 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
5457 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; | 5471 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; |
5458 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5472 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
5459 | u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; | 5473 | u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; |
5460 | 5474 | ||
5461 | if (!res) { | 5475 | if (!res) { |
@@ -5547,9 +5561,9 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) | |||
5547 | { | 5561 | { |
5548 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5562 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
5549 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 5563 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
5550 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5564 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
5551 | 5565 | ||
5552 | scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len)); | 5566 | scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); |
5553 | 5567 | ||
5554 | if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { | 5568 | if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { |
5555 | scsi_dma_unmap(ipr_cmd->scsi_cmd); | 5569 | scsi_dma_unmap(ipr_cmd->scsi_cmd); |
@@ -5839,19 +5853,23 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) | |||
5839 | struct ata_queued_cmd *qc = ipr_cmd->qc; | 5853 | struct ata_queued_cmd *qc = ipr_cmd->qc; |
5840 | struct ipr_sata_port *sata_port = qc->ap->private_data; | 5854 | struct ipr_sata_port *sata_port = qc->ap->private_data; |
5841 | struct ipr_resource_entry *res = sata_port->res; | 5855 | struct ipr_resource_entry *res = sata_port->res; |
5842 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 5856 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
5843 | 5857 | ||
5844 | memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata, | 5858 | if (ipr_cmd->ioa_cfg->sis64) |
5845 | sizeof(struct ipr_ioasa_gata)); | 5859 | memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, |
5860 | sizeof(struct ipr_ioasa_gata)); | ||
5861 | else | ||
5862 | memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, | ||
5863 | sizeof(struct ipr_ioasa_gata)); | ||
5846 | ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); | 5864 | ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); |
5847 | 5865 | ||
5848 | if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) | 5866 | if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) |
5849 | scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); | 5867 | scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); |
5850 | 5868 | ||
5851 | if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) | 5869 | if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) |
5852 | qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); | 5870 | qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); |
5853 | else | 5871 | else |
5854 | qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status); | 5872 | qc->err_mask |= ac_err_mask(sata_port->ioasa.status); |
5855 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 5873 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
5856 | ata_qc_complete(qc); | 5874 | ata_qc_complete(qc); |
5857 | } | 5875 | } |
@@ -6520,7 +6538,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, | |||
6520 | static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) | 6538 | static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) |
6521 | { | 6539 | { |
6522 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 6540 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
6523 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 6541 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
6524 | 6542 | ||
6525 | dev_err(&ioa_cfg->pdev->dev, | 6543 | dev_err(&ioa_cfg->pdev->dev, |
6526 | "0x%02X failed with IOASC: 0x%08X\n", | 6544 | "0x%02X failed with IOASC: 0x%08X\n", |
@@ -6544,7 +6562,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) | |||
6544 | static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) | 6562 | static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) |
6545 | { | 6563 | { |
6546 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 6564 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
6547 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 6565 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
6548 | 6566 | ||
6549 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { | 6567 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { |
6550 | ipr_cmd->job_step = ipr_set_supported_devs; | 6568 | ipr_cmd->job_step = ipr_set_supported_devs; |
@@ -6634,7 +6652,7 @@ static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) | |||
6634 | **/ | 6652 | **/ |
6635 | static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) | 6653 | static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) |
6636 | { | 6654 | { |
6637 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 6655 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
6638 | 6656 | ||
6639 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { | 6657 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { |
6640 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; | 6658 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; |
@@ -6706,7 +6724,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) | |||
6706 | list_move_tail(&res->queue, &old_res); | 6724 | list_move_tail(&res->queue, &old_res); |
6707 | 6725 | ||
6708 | if (ioa_cfg->sis64) | 6726 | if (ioa_cfg->sis64) |
6709 | entries = ioa_cfg->u.cfg_table64->hdr64.num_entries; | 6727 | entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); |
6710 | else | 6728 | else |
6711 | entries = ioa_cfg->u.cfg_table->hdr.num_entries; | 6729 | entries = ioa_cfg->u.cfg_table->hdr.num_entries; |
6712 | 6730 | ||
@@ -6792,6 +6810,7 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) | |||
6792 | ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); | 6810 | ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); |
6793 | 6811 | ||
6794 | ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; | 6812 | ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; |
6813 | ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; | ||
6795 | ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; | 6814 | ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; |
6796 | ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; | 6815 | ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; |
6797 | 6816 | ||
@@ -7122,7 +7141,9 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) | |||
7122 | ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); | 7141 | ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); |
7123 | 7142 | ||
7124 | /* sanity check the stage_time value */ | 7143 | /* sanity check the stage_time value */ |
7125 | if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) | 7144 | if (stage_time == 0) |
7145 | stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; | ||
7146 | else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) | ||
7126 | stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; | 7147 | stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; |
7127 | else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) | 7148 | else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) |
7128 | stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; | 7149 | stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; |
@@ -7165,13 +7186,14 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) | |||
7165 | { | 7186 | { |
7166 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 7187 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
7167 | volatile u32 int_reg; | 7188 | volatile u32 int_reg; |
7189 | volatile u64 maskval; | ||
7168 | 7190 | ||
7169 | ENTER; | 7191 | ENTER; |
7170 | ipr_cmd->job_step = ipr_ioafp_identify_hrrq; | 7192 | ipr_cmd->job_step = ipr_ioafp_identify_hrrq; |
7171 | ipr_init_ioa_mem(ioa_cfg); | 7193 | ipr_init_ioa_mem(ioa_cfg); |
7172 | 7194 | ||
7173 | ioa_cfg->allow_interrupts = 1; | 7195 | ioa_cfg->allow_interrupts = 1; |
7174 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); | 7196 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); |
7175 | 7197 | ||
7176 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { | 7198 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { |
7177 | writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), | 7199 | writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), |
@@ -7183,9 +7205,12 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) | |||
7183 | /* Enable destructive diagnostics on IOA */ | 7205 | /* Enable destructive diagnostics on IOA */ |
7184 | writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); | 7206 | writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); |
7185 | 7207 | ||
7186 | writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); | 7208 | if (ioa_cfg->sis64) { |
7187 | if (ioa_cfg->sis64) | 7209 | maskval = IPR_PCII_IPL_STAGE_CHANGE; |
7188 | writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg); | 7210 | maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; |
7211 | writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); | ||
7212 | } else | ||
7213 | writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); | ||
7189 | 7214 | ||
7190 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | 7215 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); |
7191 | 7216 | ||
@@ -7332,12 +7357,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | |||
7332 | rc = pci_restore_state(ioa_cfg->pdev); | 7357 | rc = pci_restore_state(ioa_cfg->pdev); |
7333 | 7358 | ||
7334 | if (rc != PCIBIOS_SUCCESSFUL) { | 7359 | if (rc != PCIBIOS_SUCCESSFUL) { |
7335 | ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); | 7360 | ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); |
7336 | return IPR_RC_JOB_CONTINUE; | 7361 | return IPR_RC_JOB_CONTINUE; |
7337 | } | 7362 | } |
7338 | 7363 | ||
7339 | if (ipr_set_pcix_cmd_reg(ioa_cfg)) { | 7364 | if (ipr_set_pcix_cmd_reg(ioa_cfg)) { |
7340 | ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); | 7365 | ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); |
7341 | return IPR_RC_JOB_CONTINUE; | 7366 | return IPR_RC_JOB_CONTINUE; |
7342 | } | 7367 | } |
7343 | 7368 | ||
@@ -7364,7 +7389,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | |||
7364 | } | 7389 | } |
7365 | } | 7390 | } |
7366 | 7391 | ||
7367 | ENTER; | 7392 | LEAVE; |
7368 | return IPR_RC_JOB_CONTINUE; | 7393 | return IPR_RC_JOB_CONTINUE; |
7369 | } | 7394 | } |
7370 | 7395 | ||
@@ -7406,7 +7431,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) | |||
7406 | 7431 | ||
7407 | if (rc != PCIBIOS_SUCCESSFUL) { | 7432 | if (rc != PCIBIOS_SUCCESSFUL) { |
7408 | pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); | 7433 | pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); |
7409 | ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); | 7434 | ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); |
7410 | rc = IPR_RC_JOB_CONTINUE; | 7435 | rc = IPR_RC_JOB_CONTINUE; |
7411 | } else { | 7436 | } else { |
7412 | ipr_cmd->job_step = ipr_reset_bist_done; | 7437 | ipr_cmd->job_step = ipr_reset_bist_done; |
@@ -7665,7 +7690,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) | |||
7665 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 7690 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
7666 | 7691 | ||
7667 | do { | 7692 | do { |
7668 | ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 7693 | ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
7669 | 7694 | ||
7670 | if (ioa_cfg->reset_cmd != ipr_cmd) { | 7695 | if (ioa_cfg->reset_cmd != ipr_cmd) { |
7671 | /* | 7696 | /* |
@@ -8048,13 +8073,13 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) | |||
8048 | ioarcb->u.sis64_addr_data.data_ioadl_addr = | 8073 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
8049 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); | 8074 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); |
8050 | ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = | 8075 | ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = |
8051 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); | 8076 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); |
8052 | } else { | 8077 | } else { |
8053 | ioarcb->write_ioadl_addr = | 8078 | ioarcb->write_ioadl_addr = |
8054 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); | 8079 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); |
8055 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | 8080 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; |
8056 | ioarcb->ioasa_host_pci_addr = | 8081 | ioarcb->ioasa_host_pci_addr = |
8057 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); | 8082 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); |
8058 | } | 8083 | } |
8059 | ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); | 8084 | ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); |
8060 | ipr_cmd->cmd_index = i; | 8085 | ipr_cmd->cmd_index = i; |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 4c267b5e0b96..9ecd2259eb39 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -244,6 +244,7 @@ | |||
244 | #define IPR_RUNTIME_RESET 0x40000000 | 244 | #define IPR_RUNTIME_RESET 0x40000000 |
245 | 245 | ||
246 | #define IPR_IPL_INIT_MIN_STAGE_TIME 5 | 246 | #define IPR_IPL_INIT_MIN_STAGE_TIME 5 |
247 | #define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15 | ||
247 | #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 | 248 | #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 |
248 | #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 | 249 | #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 |
249 | #define IPR_IPL_INIT_STAGE_MASK 0xff000000 | 250 | #define IPR_IPL_INIT_STAGE_MASK 0xff000000 |
@@ -613,7 +614,7 @@ struct ipr_auto_sense { | |||
613 | __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; | 614 | __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; |
614 | }; | 615 | }; |
615 | 616 | ||
616 | struct ipr_ioasa { | 617 | struct ipr_ioasa_hdr { |
617 | __be32 ioasc; | 618 | __be32 ioasc; |
618 | #define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) | 619 | #define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) |
619 | #define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) | 620 | #define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) |
@@ -645,6 +646,25 @@ struct ipr_ioasa { | |||
645 | #define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) | 646 | #define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) |
646 | #define IPR_FIELD_POINTER_MASK 0x0000ffff | 647 | #define IPR_FIELD_POINTER_MASK 0x0000ffff |
647 | 648 | ||
649 | }__attribute__((packed, aligned (4))); | ||
650 | |||
651 | struct ipr_ioasa { | ||
652 | struct ipr_ioasa_hdr hdr; | ||
653 | |||
654 | union { | ||
655 | struct ipr_ioasa_vset vset; | ||
656 | struct ipr_ioasa_af_dasd dasd; | ||
657 | struct ipr_ioasa_gpdd gpdd; | ||
658 | struct ipr_ioasa_gata gata; | ||
659 | } u; | ||
660 | |||
661 | struct ipr_auto_sense auto_sense; | ||
662 | }__attribute__((packed, aligned (4))); | ||
663 | |||
664 | struct ipr_ioasa64 { | ||
665 | struct ipr_ioasa_hdr hdr; | ||
666 | u8 fd_res_path[8]; | ||
667 | |||
648 | union { | 668 | union { |
649 | struct ipr_ioasa_vset vset; | 669 | struct ipr_ioasa_vset vset; |
650 | struct ipr_ioasa_af_dasd dasd; | 670 | struct ipr_ioasa_af_dasd dasd; |
@@ -804,7 +824,7 @@ struct ipr_hostrcb_array_data_entry_enhanced { | |||
804 | }__attribute__((packed, aligned (4))); | 824 | }__attribute__((packed, aligned (4))); |
805 | 825 | ||
806 | struct ipr_hostrcb_type_ff_error { | 826 | struct ipr_hostrcb_type_ff_error { |
807 | __be32 ioa_data[502]; | 827 | __be32 ioa_data[758]; |
808 | }__attribute__((packed, aligned (4))); | 828 | }__attribute__((packed, aligned (4))); |
809 | 829 | ||
810 | struct ipr_hostrcb_type_01_error { | 830 | struct ipr_hostrcb_type_01_error { |
@@ -1181,7 +1201,7 @@ struct ipr_resource_entry { | |||
1181 | u8 flags; | 1201 | u8 flags; |
1182 | __be16 res_flags; | 1202 | __be16 res_flags; |
1183 | 1203 | ||
1184 | __be32 type; | 1204 | u8 type; |
1185 | 1205 | ||
1186 | u8 qmodel; | 1206 | u8 qmodel; |
1187 | struct ipr_std_inq_data std_inq_data; | 1207 | struct ipr_std_inq_data std_inq_data; |
@@ -1464,7 +1484,10 @@ struct ipr_cmnd { | |||
1464 | struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; | 1484 | struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; |
1465 | struct ipr_ata64_ioadl ata_ioadl; | 1485 | struct ipr_ata64_ioadl ata_ioadl; |
1466 | } i; | 1486 | } i; |
1467 | struct ipr_ioasa ioasa; | 1487 | union { |
1488 | struct ipr_ioasa ioasa; | ||
1489 | struct ipr_ioasa64 ioasa64; | ||
1490 | } s; | ||
1468 | struct list_head queue; | 1491 | struct list_head queue; |
1469 | struct scsi_cmnd *scsi_cmd; | 1492 | struct scsi_cmnd *scsi_cmd; |
1470 | struct ata_queued_cmd *qc; | 1493 | struct ata_queued_cmd *qc; |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index bf55d3057413..fec47de72535 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -601,10 +601,8 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
601 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | 601 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
602 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); | 602 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); |
603 | 603 | ||
604 | if (sk_sleep(sock->sk)) { | 604 | sock->sk->sk_err = EIO; |
605 | sock->sk->sk_err = EIO; | 605 | wake_up_interruptible(sk_sleep(sock->sk)); |
606 | wake_up_interruptible(sk_sleep(sock->sk)); | ||
607 | } | ||
608 | 606 | ||
609 | iscsi_conn_stop(cls_conn, flag); | 607 | iscsi_conn_stop(cls_conn, flag); |
610 | iscsi_sw_tcp_release_conn(conn); | 608 | iscsi_sw_tcp_release_conn(conn); |
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 18735b39b3d3..3ddb4dc62d5d 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c | |||
@@ -542,8 +542,11 @@ MODULE_DEVICE_TABLE (of, mac53c94_match); | |||
542 | 542 | ||
543 | static struct macio_driver mac53c94_driver = | 543 | static struct macio_driver mac53c94_driver = |
544 | { | 544 | { |
545 | .name = "mac53c94", | 545 | .driver = { |
546 | .match_table = mac53c94_match, | 546 | .name = "mac53c94", |
547 | .owner = THIS_MODULE, | ||
548 | .of_match_table = mac53c94_match, | ||
549 | }, | ||
547 | .probe = mac53c94_probe, | 550 | .probe = mac53c94_probe, |
548 | .remove = mac53c94_remove, | 551 | .remove = mac53c94_remove, |
549 | }; | 552 | }; |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 4bf7edca9e69..0b6e3228610a 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -91,12 +91,15 @@ static struct proc_dir_entry *mega_proc_dir_entry; | |||
91 | /* For controller re-ordering */ | 91 | /* For controller re-ordering */ |
92 | static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; | 92 | static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; |
93 | 93 | ||
94 | static long | ||
95 | megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | ||
96 | |||
94 | /* | 97 | /* |
95 | * The File Operations structure for the serial/ioctl interface of the driver | 98 | * The File Operations structure for the serial/ioctl interface of the driver |
96 | */ | 99 | */ |
97 | static const struct file_operations megadev_fops = { | 100 | static const struct file_operations megadev_fops = { |
98 | .owner = THIS_MODULE, | 101 | .owner = THIS_MODULE, |
99 | .ioctl = megadev_ioctl, | 102 | .unlocked_ioctl = megadev_unlocked_ioctl, |
100 | .open = megadev_open, | 103 | .open = megadev_open, |
101 | }; | 104 | }; |
102 | 105 | ||
@@ -3302,8 +3305,7 @@ megadev_open (struct inode *inode, struct file *filep) | |||
3302 | * controller. | 3305 | * controller. |
3303 | */ | 3306 | */ |
3304 | static int | 3307 | static int |
3305 | megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, | 3308 | megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) |
3306 | unsigned long arg) | ||
3307 | { | 3309 | { |
3308 | adapter_t *adapter; | 3310 | adapter_t *adapter; |
3309 | nitioctl_t uioc; | 3311 | nitioctl_t uioc; |
@@ -3694,6 +3696,18 @@ freemem_and_return: | |||
3694 | return 0; | 3696 | return 0; |
3695 | } | 3697 | } |
3696 | 3698 | ||
3699 | static long | ||
3700 | megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) | ||
3701 | { | ||
3702 | int ret; | ||
3703 | |||
3704 | lock_kernel(); | ||
3705 | ret = megadev_ioctl(filep, cmd, arg); | ||
3706 | unlock_kernel(); | ||
3707 | |||
3708 | return ret; | ||
3709 | } | ||
3710 | |||
3697 | /** | 3711 | /** |
3698 | * mega_m_to_n() | 3712 | * mega_m_to_n() |
3699 | * @arg - user address | 3713 | * @arg - user address |
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index d310f49d077e..2b4a048cadf1 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h | |||
@@ -1013,8 +1013,7 @@ static void mega_8_to_40ld (mraid_inquiry *inquiry, | |||
1013 | mega_inquiry3 *enquiry3, mega_product_info *); | 1013 | mega_inquiry3 *enquiry3, mega_product_info *); |
1014 | 1014 | ||
1015 | static int megadev_open (struct inode *, struct file *); | 1015 | static int megadev_open (struct inode *, struct file *); |
1016 | static int megadev_ioctl (struct inode *, struct file *, unsigned int, | 1016 | static int megadev_ioctl (struct file *, unsigned int, unsigned long); |
1017 | unsigned long); | ||
1018 | static int mega_m_to_n(void __user *, nitioctl_t *); | 1017 | static int mega_m_to_n(void __user *, nitioctl_t *); |
1019 | static int mega_n_to_m(void __user *, megacmd_t *); | 1018 | static int mega_n_to_m(void __user *, megacmd_t *); |
1020 | 1019 | ||
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 36e0b7d05c1d..41f82f76d884 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | // Entry points for char node driver | 23 | // Entry points for char node driver |
24 | static int mraid_mm_open(struct inode *, struct file *); | 24 | static int mraid_mm_open(struct inode *, struct file *); |
25 | static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long); | 25 | static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long); |
26 | 26 | ||
27 | 27 | ||
28 | // routines to convert to and from the old the format | 28 | // routines to convert to and from the old the format |
@@ -70,7 +70,7 @@ static wait_queue_head_t wait_q; | |||
70 | 70 | ||
71 | static const struct file_operations lsi_fops = { | 71 | static const struct file_operations lsi_fops = { |
72 | .open = mraid_mm_open, | 72 | .open = mraid_mm_open, |
73 | .ioctl = mraid_mm_ioctl, | 73 | .unlocked_ioctl = mraid_mm_unlocked_ioctl, |
74 | #ifdef CONFIG_COMPAT | 74 | #ifdef CONFIG_COMPAT |
75 | .compat_ioctl = mraid_mm_compat_ioctl, | 75 | .compat_ioctl = mraid_mm_compat_ioctl, |
76 | #endif | 76 | #endif |
@@ -110,8 +110,7 @@ mraid_mm_open(struct inode *inode, struct file *filep) | |||
110 | * @arg : user ioctl packet | 110 | * @arg : user ioctl packet |
111 | */ | 111 | */ |
112 | static int | 112 | static int |
113 | mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, | 113 | mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) |
114 | unsigned long arg) | ||
115 | { | 114 | { |
116 | uioc_t *kioc; | 115 | uioc_t *kioc; |
117 | char signature[EXT_IOCTL_SIGN_SZ] = {0}; | 116 | char signature[EXT_IOCTL_SIGN_SZ] = {0}; |
@@ -218,6 +217,19 @@ mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, | |||
218 | return rval; | 217 | return rval; |
219 | } | 218 | } |
220 | 219 | ||
220 | static long | ||
221 | mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, | ||
222 | unsigned long arg) | ||
223 | { | ||
224 | int err; | ||
225 | |||
226 | /* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */ | ||
227 | lock_kernel(); | ||
228 | err = mraid_mm_ioctl(filep, cmd, arg); | ||
229 | unlock_kernel(); | ||
230 | |||
231 | return err; | ||
232 | } | ||
221 | 233 | ||
222 | /** | 234 | /** |
223 | * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet | 235 | * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet |
@@ -1225,7 +1237,7 @@ mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, | |||
1225 | { | 1237 | { |
1226 | int err; | 1238 | int err; |
1227 | 1239 | ||
1228 | err = mraid_mm_ioctl(NULL, filep, cmd, arg); | 1240 | err = mraid_mm_ioctl(filep, cmd, arg); |
1229 | 1241 | ||
1230 | return err; | 1242 | return err; |
1231 | } | 1243 | } |
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index a1c97e88068a..1f784fde2510 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c | |||
@@ -2036,8 +2036,11 @@ MODULE_DEVICE_TABLE (of, mesh_match); | |||
2036 | 2036 | ||
2037 | static struct macio_driver mesh_driver = | 2037 | static struct macio_driver mesh_driver = |
2038 | { | 2038 | { |
2039 | .name = "mesh", | 2039 | .driver = { |
2040 | .match_table = mesh_match, | 2040 | .name = "mesh", |
2041 | .owner = THIS_MODULE, | ||
2042 | .of_match_table = mesh_match, | ||
2043 | }, | ||
2041 | .probe = mesh_probe, | 2044 | .probe = mesh_probe, |
2042 | .remove = mesh_remove, | 2045 | .remove = mesh_remove, |
2043 | .shutdown = mesh_shutdown, | 2046 | .shutdown = mesh_shutdown, |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index b830d61684dd..0ec1ed389c20 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -3757,7 +3757,7 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) | |||
3757 | if (ioc->config_cmds.status & MPT2_CMD_PENDING) { | 3757 | if (ioc->config_cmds.status & MPT2_CMD_PENDING) { |
3758 | ioc->config_cmds.status |= MPT2_CMD_RESET; | 3758 | ioc->config_cmds.status |= MPT2_CMD_RESET; |
3759 | mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); | 3759 | mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); |
3760 | ioc->config_cmds.smid = USHORT_MAX; | 3760 | ioc->config_cmds.smid = USHRT_MAX; |
3761 | complete(&ioc->config_cmds.done); | 3761 | complete(&ioc->config_cmds.done); |
3762 | } | 3762 | } |
3763 | break; | 3763 | break; |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index e762dd3e2fcb..c65442982d7b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c | |||
@@ -258,7 +258,7 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
258 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 258 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
259 | _config_display_some_debug(ioc, smid, "config_done", mpi_reply); | 259 | _config_display_some_debug(ioc, smid, "config_done", mpi_reply); |
260 | #endif | 260 | #endif |
261 | ioc->config_cmds.smid = USHORT_MAX; | 261 | ioc->config_cmds.smid = USHRT_MAX; |
262 | complete(&ioc->config_cmds.done); | 262 | complete(&ioc->config_cmds.done); |
263 | return 1; | 263 | return 1; |
264 | } | 264 | } |
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c index 716d1785cda7..c29d0dbb9660 100644 --- a/drivers/scsi/mvme147.c +++ b/drivers/scsi/mvme147.c | |||
@@ -16,12 +16,12 @@ | |||
16 | #include <linux/stat.h> | 16 | #include <linux/stat.h> |
17 | 17 | ||
18 | 18 | ||
19 | static struct Scsi_Host *mvme147_host = NULL; | 19 | static irqreturn_t mvme147_intr(int irq, void *data) |
20 | |||
21 | static irqreturn_t mvme147_intr(int irq, void *dummy) | ||
22 | { | 20 | { |
21 | struct Scsi_Host *instance = data; | ||
22 | |||
23 | if (irq == MVME147_IRQ_SCSI_PORT) | 23 | if (irq == MVME147_IRQ_SCSI_PORT) |
24 | wd33c93_intr(mvme147_host); | 24 | wd33c93_intr(instance); |
25 | else | 25 | else |
26 | m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ | 26 | m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ |
27 | return IRQ_HANDLED; | 27 | return IRQ_HANDLED; |
@@ -29,7 +29,8 @@ static irqreturn_t mvme147_intr(int irq, void *dummy) | |||
29 | 29 | ||
30 | static int dma_setup(struct scsi_cmnd *cmd, int dir_in) | 30 | static int dma_setup(struct scsi_cmnd *cmd, int dir_in) |
31 | { | 31 | { |
32 | struct WD33C93_hostdata *hdata = shost_priv(mvme147_host); | 32 | struct Scsi_Host *instance = cmd->device->host; |
33 | struct WD33C93_hostdata *hdata = shost_priv(instance); | ||
33 | unsigned char flags = 0x01; | 34 | unsigned char flags = 0x01; |
34 | unsigned long addr = virt_to_bus(cmd->SCp.ptr); | 35 | unsigned long addr = virt_to_bus(cmd->SCp.ptr); |
35 | 36 | ||
@@ -66,6 +67,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, | |||
66 | int mvme147_detect(struct scsi_host_template *tpnt) | 67 | int mvme147_detect(struct scsi_host_template *tpnt) |
67 | { | 68 | { |
68 | static unsigned char called = 0; | 69 | static unsigned char called = 0; |
70 | struct Scsi_Host *instance; | ||
69 | wd33c93_regs regs; | 71 | wd33c93_regs regs; |
70 | struct WD33C93_hostdata *hdata; | 72 | struct WD33C93_hostdata *hdata; |
71 | 73 | ||
@@ -76,25 +78,25 @@ int mvme147_detect(struct scsi_host_template *tpnt) | |||
76 | tpnt->proc_name = "MVME147"; | 78 | tpnt->proc_name = "MVME147"; |
77 | tpnt->proc_info = &wd33c93_proc_info; | 79 | tpnt->proc_info = &wd33c93_proc_info; |
78 | 80 | ||
79 | mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); | 81 | instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); |
80 | if (!mvme147_host) | 82 | if (!instance) |
81 | goto err_out; | 83 | goto err_out; |
82 | 84 | ||
83 | mvme147_host->base = 0xfffe4000; | 85 | instance->base = 0xfffe4000; |
84 | mvme147_host->irq = MVME147_IRQ_SCSI_PORT; | 86 | instance->irq = MVME147_IRQ_SCSI_PORT; |
85 | regs.SASR = (volatile unsigned char *)0xfffe4000; | 87 | regs.SASR = (volatile unsigned char *)0xfffe4000; |
86 | regs.SCMD = (volatile unsigned char *)0xfffe4001; | 88 | regs.SCMD = (volatile unsigned char *)0xfffe4001; |
87 | hdata = shost_priv(mvme147_host); | 89 | hdata = shost_priv(instance); |
88 | hdata->no_sync = 0xff; | 90 | hdata->no_sync = 0xff; |
89 | hdata->fast = 0; | 91 | hdata->fast = 0; |
90 | hdata->dma_mode = CTRL_DMA; | 92 | hdata->dma_mode = CTRL_DMA; |
91 | wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); | 93 | wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); |
92 | 94 | ||
93 | if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, | 95 | if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, |
94 | "MVME147 SCSI PORT", mvme147_intr)) | 96 | "MVME147 SCSI PORT", instance)) |
95 | goto err_unregister; | 97 | goto err_unregister; |
96 | if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, | 98 | if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, |
97 | "MVME147 SCSI DMA", mvme147_intr)) | 99 | "MVME147 SCSI DMA", instance)) |
98 | goto err_free_irq; | 100 | goto err_free_irq; |
99 | #if 0 /* Disabled; causes problems booting */ | 101 | #if 0 /* Disabled; causes problems booting */ |
100 | m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ | 102 | m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ |
@@ -113,7 +115,7 @@ int mvme147_detect(struct scsi_host_template *tpnt) | |||
113 | err_free_irq: | 115 | err_free_irq: |
114 | free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); | 116 | free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); |
115 | err_unregister: | 117 | err_unregister: |
116 | scsi_unregister(mvme147_host); | 118 | scsi_unregister(instance); |
117 | err_out: | 119 | err_out: |
118 | return 0; | 120 | return 0; |
119 | } | 121 | } |
@@ -132,9 +134,6 @@ static int mvme147_bus_reset(struct scsi_cmnd *cmd) | |||
132 | return SUCCESS; | 134 | return SUCCESS; |
133 | } | 135 | } |
134 | 136 | ||
135 | #define HOSTS_C | ||
136 | |||
137 | #include "mvme147.h" | ||
138 | 137 | ||
139 | static struct scsi_host_template driver_template = { | 138 | static struct scsi_host_template driver_template = { |
140 | .proc_name = "MVME147", | 139 | .proc_name = "MVME147", |
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index b219118f8bd6..d64b7178fa08 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c | |||
@@ -3587,7 +3587,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name | |||
3587 | if (i == (-ENOSPC)) { | 3587 | if (i == (-ENOSPC)) { |
3588 | transfer = STp->buffer->writing; /* FIXME -- check this logic */ | 3588 | transfer = STp->buffer->writing; /* FIXME -- check this logic */ |
3589 | if (transfer <= do_count) { | 3589 | if (transfer <= do_count) { |
3590 | filp->f_pos += do_count - transfer; | 3590 | *ppos += do_count - transfer; |
3591 | count -= do_count - transfer; | 3591 | count -= do_count - transfer; |
3592 | if (STps->drv_block >= 0) { | 3592 | if (STps->drv_block >= 0) { |
3593 | STps->drv_block += (do_count - transfer) / STp->block_size; | 3593 | STps->drv_block += (do_count - transfer) / STp->block_size; |
@@ -3625,7 +3625,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name | |||
3625 | goto out; | 3625 | goto out; |
3626 | } | 3626 | } |
3627 | 3627 | ||
3628 | filp->f_pos += do_count; | 3628 | *ppos += do_count; |
3629 | b_point += do_count; | 3629 | b_point += do_count; |
3630 | count -= do_count; | 3630 | count -= do_count; |
3631 | if (STps->drv_block >= 0) { | 3631 | if (STps->drv_block >= 0) { |
@@ -3647,7 +3647,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name | |||
3647 | if (STps->drv_block >= 0) { | 3647 | if (STps->drv_block >= 0) { |
3648 | STps->drv_block += blks; | 3648 | STps->drv_block += blks; |
3649 | } | 3649 | } |
3650 | filp->f_pos += count; | 3650 | *ppos += count; |
3651 | count = 0; | 3651 | count = 0; |
3652 | } | 3652 | } |
3653 | 3653 | ||
@@ -3823,7 +3823,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo | |||
3823 | } | 3823 | } |
3824 | STp->logical_blk_num += transfer / STp->block_size; | 3824 | STp->logical_blk_num += transfer / STp->block_size; |
3825 | STps->drv_block += transfer / STp->block_size; | 3825 | STps->drv_block += transfer / STp->block_size; |
3826 | filp->f_pos += transfer; | 3826 | *ppos += transfer; |
3827 | buf += transfer; | 3827 | buf += transfer; |
3828 | total += transfer; | 3828 | total += transfer; |
3829 | } | 3829 | } |
@@ -4932,7 +4932,7 @@ static int os_scsi_tape_close(struct inode * inode, struct file * filp) | |||
4932 | 4932 | ||
4933 | 4933 | ||
4934 | /* The ioctl command */ | 4934 | /* The ioctl command */ |
4935 | static int osst_ioctl(struct inode * inode,struct file * file, | 4935 | static long osst_ioctl(struct file * file, |
4936 | unsigned int cmd_in, unsigned long arg) | 4936 | unsigned int cmd_in, unsigned long arg) |
4937 | { | 4937 | { |
4938 | int i, cmd_nr, cmd_type, blk, retval = 0; | 4938 | int i, cmd_nr, cmd_type, blk, retval = 0; |
@@ -4943,8 +4943,11 @@ static int osst_ioctl(struct inode * inode,struct file * file, | |||
4943 | char * name = tape_name(STp); | 4943 | char * name = tape_name(STp); |
4944 | void __user * p = (void __user *)arg; | 4944 | void __user * p = (void __user *)arg; |
4945 | 4945 | ||
4946 | if (mutex_lock_interruptible(&STp->lock)) | 4946 | lock_kernel(); |
4947 | if (mutex_lock_interruptible(&STp->lock)) { | ||
4948 | unlock_kernel(); | ||
4947 | return -ERESTARTSYS; | 4949 | return -ERESTARTSYS; |
4950 | } | ||
4948 | 4951 | ||
4949 | #if DEBUG | 4952 | #if DEBUG |
4950 | if (debugging && !STp->in_use) { | 4953 | if (debugging && !STp->in_use) { |
@@ -5256,12 +5259,15 @@ static int osst_ioctl(struct inode * inode,struct file * file, | |||
5256 | 5259 | ||
5257 | mutex_unlock(&STp->lock); | 5260 | mutex_unlock(&STp->lock); |
5258 | 5261 | ||
5259 | return scsi_ioctl(STp->device, cmd_in, p); | 5262 | retval = scsi_ioctl(STp->device, cmd_in, p); |
5263 | unlock_kernel(); | ||
5264 | return retval; | ||
5260 | 5265 | ||
5261 | out: | 5266 | out: |
5262 | if (SRpnt) osst_release_request(SRpnt); | 5267 | if (SRpnt) osst_release_request(SRpnt); |
5263 | 5268 | ||
5264 | mutex_unlock(&STp->lock); | 5269 | mutex_unlock(&STp->lock); |
5270 | unlock_kernel(); | ||
5265 | 5271 | ||
5266 | return retval; | 5272 | return retval; |
5267 | } | 5273 | } |
@@ -5613,13 +5619,14 @@ static const struct file_operations osst_fops = { | |||
5613 | .owner = THIS_MODULE, | 5619 | .owner = THIS_MODULE, |
5614 | .read = osst_read, | 5620 | .read = osst_read, |
5615 | .write = osst_write, | 5621 | .write = osst_write, |
5616 | .ioctl = osst_ioctl, | 5622 | .unlocked_ioctl = osst_ioctl, |
5617 | #ifdef CONFIG_COMPAT | 5623 | #ifdef CONFIG_COMPAT |
5618 | .compat_ioctl = osst_compat_ioctl, | 5624 | .compat_ioctl = osst_compat_ioctl, |
5619 | #endif | 5625 | #endif |
5620 | .open = os_scsi_tape_open, | 5626 | .open = os_scsi_tape_open, |
5621 | .flush = os_scsi_tape_flush, | 5627 | .flush = os_scsi_tape_flush, |
5622 | .release = os_scsi_tape_close, | 5628 | .release = os_scsi_tape_close, |
5629 | .llseek = noop_llseek, | ||
5623 | }; | 5630 | }; |
5624 | 5631 | ||
5625 | static int osst_supports(struct scsi_device * SDp) | 5632 | static int osst_supports(struct scsi_device * SDp) |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index aa406497eebc..ca5c15c779cf 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -755,7 +755,7 @@ static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti) | |||
755 | struct of_device *op = qpti->op; | 755 | struct of_device *op = qpti->op; |
756 | struct device_node *dp; | 756 | struct device_node *dp; |
757 | 757 | ||
758 | dp = op->node; | 758 | dp = op->dev.of_node; |
759 | 759 | ||
760 | qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); | 760 | qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); |
761 | if (qpti->scsi_id == -1) | 761 | if (qpti->scsi_id == -1) |
@@ -776,8 +776,8 @@ static void qpti_get_bursts(struct qlogicpti *qpti) | |||
776 | struct of_device *op = qpti->op; | 776 | struct of_device *op = qpti->op; |
777 | u8 bursts, bmask; | 777 | u8 bursts, bmask; |
778 | 778 | ||
779 | bursts = of_getintprop_default(op->node, "burst-sizes", 0xff); | 779 | bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff); |
780 | bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff); | 780 | bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff); |
781 | if (bmask != 0xff) | 781 | if (bmask != 0xff) |
782 | bursts &= bmask; | 782 | bursts &= bmask; |
783 | if (bursts == 0xff || | 783 | if (bursts == 0xff || |
@@ -1293,7 +1293,7 @@ static struct scsi_host_template qpti_template = { | |||
1293 | static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match) | 1293 | static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match) |
1294 | { | 1294 | { |
1295 | struct scsi_host_template *tpnt = match->data; | 1295 | struct scsi_host_template *tpnt = match->data; |
1296 | struct device_node *dp = op->node; | 1296 | struct device_node *dp = op->dev.of_node; |
1297 | struct Scsi_Host *host; | 1297 | struct Scsi_Host *host; |
1298 | struct qlogicpti *qpti; | 1298 | struct qlogicpti *qpti; |
1299 | static int nqptis; | 1299 | static int nqptis; |
@@ -1315,7 +1315,7 @@ static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_devic | |||
1315 | qpti->qhost = host; | 1315 | qpti->qhost = host; |
1316 | qpti->op = op; | 1316 | qpti->op = op; |
1317 | qpti->qpti_id = nqptis; | 1317 | qpti->qpti_id = nqptis; |
1318 | strcpy(qpti->prom_name, op->node->name); | 1318 | strcpy(qpti->prom_name, op->dev.of_node->name); |
1319 | qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); | 1319 | qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); |
1320 | 1320 | ||
1321 | if (qpti_map_regs(qpti) < 0) | 1321 | if (qpti_map_regs(qpti) < 0) |
@@ -1456,8 +1456,11 @@ static const struct of_device_id qpti_match[] = { | |||
1456 | MODULE_DEVICE_TABLE(of, qpti_match); | 1456 | MODULE_DEVICE_TABLE(of, qpti_match); |
1457 | 1457 | ||
1458 | static struct of_platform_driver qpti_sbus_driver = { | 1458 | static struct of_platform_driver qpti_sbus_driver = { |
1459 | .name = "qpti", | 1459 | .driver = { |
1460 | .match_table = qpti_match, | 1460 | .name = "qpti", |
1461 | .owner = THIS_MODULE, | ||
1462 | .of_match_table = qpti_match, | ||
1463 | }, | ||
1461 | .probe = qpti_sbus_probe, | 1464 | .probe = qpti_sbus_probe, |
1462 | .remove = __devexit_p(qpti_sbus_remove), | 1465 | .remove = __devexit_p(qpti_sbus_remove), |
1463 | }; | 1466 | }; |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index c992ecf4e372..1c027a97d8b9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -492,19 +492,20 @@ void scsi_target_reap(struct scsi_target *starget) | |||
492 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 492 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
493 | unsigned long flags; | 493 | unsigned long flags; |
494 | enum scsi_target_state state; | 494 | enum scsi_target_state state; |
495 | int empty; | 495 | int empty = 0; |
496 | 496 | ||
497 | spin_lock_irqsave(shost->host_lock, flags); | 497 | spin_lock_irqsave(shost->host_lock, flags); |
498 | state = starget->state; | 498 | state = starget->state; |
499 | empty = --starget->reap_ref == 0 && | 499 | if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { |
500 | list_empty(&starget->devices) ? 1 : 0; | 500 | empty = 1; |
501 | starget->state = STARGET_DEL; | ||
502 | } | ||
501 | spin_unlock_irqrestore(shost->host_lock, flags); | 503 | spin_unlock_irqrestore(shost->host_lock, flags); |
502 | 504 | ||
503 | if (!empty) | 505 | if (!empty) |
504 | return; | 506 | return; |
505 | 507 | ||
506 | BUG_ON(state == STARGET_DEL); | 508 | BUG_ON(state == STARGET_DEL); |
507 | starget->state = STARGET_DEL; | ||
508 | if (state == STARGET_CREATED) | 509 | if (state == STARGET_CREATED) |
509 | scsi_target_destroy(starget); | 510 | scsi_target_destroy(starget); |
510 | else | 511 | else |
@@ -1220,7 +1221,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, | |||
1220 | } | 1221 | } |
1221 | 1222 | ||
1222 | /** | 1223 | /** |
1223 | * scsilun_to_int: convert a scsi_lun to an int | 1224 | * scsilun_to_int - convert a scsi_lun to an int |
1224 | * @scsilun: struct scsi_lun to be converted. | 1225 | * @scsilun: struct scsi_lun to be converted. |
1225 | * | 1226 | * |
1226 | * Description: | 1227 | * Description: |
@@ -1252,7 +1253,7 @@ int scsilun_to_int(struct scsi_lun *scsilun) | |||
1252 | EXPORT_SYMBOL(scsilun_to_int); | 1253 | EXPORT_SYMBOL(scsilun_to_int); |
1253 | 1254 | ||
1254 | /** | 1255 | /** |
1255 | * int_to_scsilun: reverts an int into a scsi_lun | 1256 | * int_to_scsilun - reverts an int into a scsi_lun |
1256 | * @lun: integer to be reverted | 1257 | * @lun: integer to be reverted |
1257 | * @scsilun: struct scsi_lun to be set. | 1258 | * @scsilun: struct scsi_lun to be set. |
1258 | * | 1259 | * |
@@ -1876,12 +1877,9 @@ void scsi_forget_host(struct Scsi_Host *shost) | |||
1876 | spin_unlock_irqrestore(shost->host_lock, flags); | 1877 | spin_unlock_irqrestore(shost->host_lock, flags); |
1877 | } | 1878 | } |
1878 | 1879 | ||
1879 | /* | 1880 | /** |
1880 | * Function: scsi_get_host_dev() | 1881 | * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself |
1881 | * | 1882 | * @shost: Host that needs a scsi_device |
1882 | * Purpose: Create a scsi_device that points to the host adapter itself. | ||
1883 | * | ||
1884 | * Arguments: SHpnt - Host that needs a scsi_device | ||
1885 | * | 1883 | * |
1886 | * Lock status: None assumed. | 1884 | * Lock status: None assumed. |
1887 | * | 1885 | * |
@@ -1894,7 +1892,7 @@ void scsi_forget_host(struct Scsi_Host *shost) | |||
1894 | * | 1892 | * |
1895 | * Note - this device is not accessible from any high-level | 1893 | * Note - this device is not accessible from any high-level |
1896 | * drivers (including generics), which is probably not | 1894 | * drivers (including generics), which is probably not |
1897 | * optimal. We can add hooks later to attach | 1895 | * optimal. We can add hooks later to attach. |
1898 | */ | 1896 | */ |
1899 | struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) | 1897 | struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) |
1900 | { | 1898 | { |
@@ -1920,18 +1918,13 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) | |||
1920 | } | 1918 | } |
1921 | EXPORT_SYMBOL(scsi_get_host_dev); | 1919 | EXPORT_SYMBOL(scsi_get_host_dev); |
1922 | 1920 | ||
1923 | /* | 1921 | /** |
1924 | * Function: scsi_free_host_dev() | 1922 | * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself |
1925 | * | 1923 | * @sdev: Host device to be freed |
1926 | * Purpose: Free a scsi_device that points to the host adapter itself. | ||
1927 | * | ||
1928 | * Arguments: SHpnt - Host that needs a scsi_device | ||
1929 | * | 1924 | * |
1930 | * Lock status: None assumed. | 1925 | * Lock status: None assumed. |
1931 | * | 1926 | * |
1932 | * Returns: Nothing | 1927 | * Returns: Nothing |
1933 | * | ||
1934 | * Notes: | ||
1935 | */ | 1928 | */ |
1936 | void scsi_free_host_dev(struct scsi_device *sdev) | 1929 | void scsi_free_host_dev(struct scsi_device *sdev) |
1937 | { | 1930 | { |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 829cc37abc41..8802e48bc063 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -97,6 +97,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); | |||
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | static int sd_revalidate_disk(struct gendisk *); | 99 | static int sd_revalidate_disk(struct gendisk *); |
100 | static void sd_unlock_native_capacity(struct gendisk *disk); | ||
100 | static int sd_probe(struct device *); | 101 | static int sd_probe(struct device *); |
101 | static int sd_remove(struct device *); | 102 | static int sd_remove(struct device *); |
102 | static void sd_shutdown(struct device *); | 103 | static void sd_shutdown(struct device *); |
@@ -1101,6 +1102,7 @@ static const struct block_device_operations sd_fops = { | |||
1101 | #endif | 1102 | #endif |
1102 | .media_changed = sd_media_changed, | 1103 | .media_changed = sd_media_changed, |
1103 | .revalidate_disk = sd_revalidate_disk, | 1104 | .revalidate_disk = sd_revalidate_disk, |
1105 | .unlock_native_capacity = sd_unlock_native_capacity, | ||
1104 | }; | 1106 | }; |
1105 | 1107 | ||
1106 | static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) | 1108 | static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) |
@@ -2121,6 +2123,26 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
2121 | } | 2123 | } |
2122 | 2124 | ||
2123 | /** | 2125 | /** |
2126 | * sd_unlock_native_capacity - unlock native capacity | ||
2127 | * @disk: struct gendisk to set capacity for | ||
2128 | * | ||
2129 | * Block layer calls this function if it detects that partitions | ||
2130 | * on @disk reach beyond the end of the device. If the SCSI host | ||
2131 | * implements ->unlock_native_capacity() method, it's invoked to | ||
2132 | * give it a chance to adjust the device capacity. | ||
2133 | * | ||
2134 | * CONTEXT: | ||
2135 | * Defined by block layer. Might sleep. | ||
2136 | */ | ||
2137 | static void sd_unlock_native_capacity(struct gendisk *disk) | ||
2138 | { | ||
2139 | struct scsi_device *sdev = scsi_disk(disk)->device; | ||
2140 | |||
2141 | if (sdev->host->hostt->unlock_native_capacity) | ||
2142 | sdev->host->hostt->unlock_native_capacity(sdev); | ||
2143 | } | ||
2144 | |||
2145 | /** | ||
2124 | * sd_format_disk_name - format disk name | 2146 | * sd_format_disk_name - format disk name |
2125 | * @prefix: name prefix - ie. "sd" for SCSI disks | 2147 | * @prefix: name prefix - ie. "sd" for SCSI disks |
2126 | * @index: index of the disk to format name for | 2148 | * @index: index of the disk to format name for |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index dee1c96288d4..ef752b248c4d 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -758,8 +758,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
758 | } | 758 | } |
759 | 759 | ||
760 | static int | 760 | static int |
761 | sg_ioctl(struct inode *inode, struct file *filp, | 761 | sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
762 | unsigned int cmd_in, unsigned long arg) | ||
763 | { | 762 | { |
764 | void __user *p = (void __user *)arg; | 763 | void __user *p = (void __user *)arg; |
765 | int __user *ip = p; | 764 | int __user *ip = p; |
@@ -1078,6 +1077,18 @@ sg_ioctl(struct inode *inode, struct file *filp, | |||
1078 | } | 1077 | } |
1079 | } | 1078 | } |
1080 | 1079 | ||
1080 | static long | ||
1081 | sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | ||
1082 | { | ||
1083 | int ret; | ||
1084 | |||
1085 | lock_kernel(); | ||
1086 | ret = sg_ioctl(filp, cmd_in, arg); | ||
1087 | unlock_kernel(); | ||
1088 | |||
1089 | return ret; | ||
1090 | } | ||
1091 | |||
1081 | #ifdef CONFIG_COMPAT | 1092 | #ifdef CONFIG_COMPAT |
1082 | static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | 1093 | static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
1083 | { | 1094 | { |
@@ -1322,7 +1333,7 @@ static const struct file_operations sg_fops = { | |||
1322 | .read = sg_read, | 1333 | .read = sg_read, |
1323 | .write = sg_write, | 1334 | .write = sg_write, |
1324 | .poll = sg_poll, | 1335 | .poll = sg_poll, |
1325 | .ioctl = sg_ioctl, | 1336 | .unlocked_ioctl = sg_unlocked_ioctl, |
1326 | #ifdef CONFIG_COMPAT | 1337 | #ifdef CONFIG_COMPAT |
1327 | .compat_ioctl = sg_compat_ioctl, | 1338 | .compat_ioctl = sg_compat_ioctl, |
1328 | #endif | 1339 | #endif |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 3ea1a713ef25..24211d0efa6d 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -3962,6 +3962,7 @@ static const struct file_operations st_fops = | |||
3962 | .open = st_open, | 3962 | .open = st_open, |
3963 | .flush = st_flush, | 3963 | .flush = st_flush, |
3964 | .release = st_release, | 3964 | .release = st_release, |
3965 | .llseek = noop_llseek, | ||
3965 | }; | 3966 | }; |
3966 | 3967 | ||
3967 | static int st_probe(struct device *dev) | 3968 | static int st_probe(struct device *dev) |
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index fc23d273fb1a..386dd9d602b6 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c | |||
@@ -125,7 +125,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma) | |||
125 | struct of_device *op = esp->dev; | 125 | struct of_device *op = esp->dev; |
126 | struct device_node *dp; | 126 | struct device_node *dp; |
127 | 127 | ||
128 | dp = op->node; | 128 | dp = op->dev.of_node; |
129 | esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); | 129 | esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); |
130 | if (esp->scsi_id != 0xff) | 130 | if (esp->scsi_id != 0xff) |
131 | goto done; | 131 | goto done; |
@@ -134,7 +134,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma) | |||
134 | if (esp->scsi_id != 0xff) | 134 | if (esp->scsi_id != 0xff) |
135 | goto done; | 135 | goto done; |
136 | 136 | ||
137 | esp->scsi_id = of_getintprop_default(espdma->node, | 137 | esp->scsi_id = of_getintprop_default(espdma->dev.of_node, |
138 | "scsi-initiator-id", 7); | 138 | "scsi-initiator-id", 7); |
139 | 139 | ||
140 | done: | 140 | done: |
@@ -147,7 +147,7 @@ static void __devinit esp_get_differential(struct esp *esp) | |||
147 | struct of_device *op = esp->dev; | 147 | struct of_device *op = esp->dev; |
148 | struct device_node *dp; | 148 | struct device_node *dp; |
149 | 149 | ||
150 | dp = op->node; | 150 | dp = op->dev.of_node; |
151 | if (of_find_property(dp, "differential", NULL)) | 151 | if (of_find_property(dp, "differential", NULL)) |
152 | esp->flags |= ESP_FLAG_DIFFERENTIAL; | 152 | esp->flags |= ESP_FLAG_DIFFERENTIAL; |
153 | else | 153 | else |
@@ -160,7 +160,7 @@ static void __devinit esp_get_clock_params(struct esp *esp) | |||
160 | struct device_node *bus_dp, *dp; | 160 | struct device_node *bus_dp, *dp; |
161 | int fmhz; | 161 | int fmhz; |
162 | 162 | ||
163 | dp = op->node; | 163 | dp = op->dev.of_node; |
164 | bus_dp = dp->parent; | 164 | bus_dp = dp->parent; |
165 | 165 | ||
166 | fmhz = of_getintprop_default(dp, "clock-frequency", 0); | 166 | fmhz = of_getintprop_default(dp, "clock-frequency", 0); |
@@ -172,12 +172,12 @@ static void __devinit esp_get_clock_params(struct esp *esp) | |||
172 | 172 | ||
173 | static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of) | 173 | static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of) |
174 | { | 174 | { |
175 | struct device_node *dma_dp = dma_of->node; | 175 | struct device_node *dma_dp = dma_of->dev.of_node; |
176 | struct of_device *op = esp->dev; | 176 | struct of_device *op = esp->dev; |
177 | struct device_node *dp; | 177 | struct device_node *dp; |
178 | u8 bursts, val; | 178 | u8 bursts, val; |
179 | 179 | ||
180 | dp = op->node; | 180 | dp = op->dev.of_node; |
181 | bursts = of_getintprop_default(dp, "burst-sizes", 0xff); | 181 | bursts = of_getintprop_default(dp, "burst-sizes", 0xff); |
182 | val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); | 182 | val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); |
183 | if (val != 0xff) | 183 | if (val != 0xff) |
@@ -565,7 +565,7 @@ fail: | |||
565 | static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match) | 565 | static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match) |
566 | { | 566 | { |
567 | struct device_node *dma_node = NULL; | 567 | struct device_node *dma_node = NULL; |
568 | struct device_node *dp = op->node; | 568 | struct device_node *dp = op->dev.of_node; |
569 | struct of_device *dma_of = NULL; | 569 | struct of_device *dma_of = NULL; |
570 | int hme = 0; | 570 | int hme = 0; |
571 | 571 | ||
@@ -574,7 +574,7 @@ static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device | |||
574 | !strcmp(dp->parent->name, "dma"))) | 574 | !strcmp(dp->parent->name, "dma"))) |
575 | dma_node = dp->parent; | 575 | dma_node = dp->parent; |
576 | else if (!strcmp(dp->name, "SUNW,fas")) { | 576 | else if (!strcmp(dp->name, "SUNW,fas")) { |
577 | dma_node = op->node; | 577 | dma_node = op->dev.of_node; |
578 | hme = 1; | 578 | hme = 1; |
579 | } | 579 | } |
580 | if (dma_node) | 580 | if (dma_node) |
@@ -633,8 +633,11 @@ static const struct of_device_id esp_match[] = { | |||
633 | MODULE_DEVICE_TABLE(of, esp_match); | 633 | MODULE_DEVICE_TABLE(of, esp_match); |
634 | 634 | ||
635 | static struct of_platform_driver esp_sbus_driver = { | 635 | static struct of_platform_driver esp_sbus_driver = { |
636 | .name = "esp", | 636 | .driver = { |
637 | .match_table = esp_match, | 637 | .name = "esp", |
638 | .owner = THIS_MODULE, | ||
639 | .of_match_table = esp_match, | ||
640 | }, | ||
638 | .probe = esp_sbus_probe, | 641 | .probe = esp_sbus_probe, |
639 | .remove = __devexit_p(esp_sbus_remove), | 642 | .remove = __devexit_p(esp_sbus_remove), |
640 | }; | 643 | }; |