diff options
133 files changed, 4873 insertions, 4389 deletions
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt index b7be95b5bd24..40752602c050 100644 --- a/Documentation/scsi/st.txt +++ b/Documentation/scsi/st.txt | |||
@@ -2,7 +2,7 @@ This file contains brief information about the SCSI tape driver. | |||
2 | The driver is currently maintained by Kai Mäkisara (email | 2 | The driver is currently maintained by Kai Mäkisara (email |
3 | Kai.Makisara@kolumbus.fi) | 3 | Kai.Makisara@kolumbus.fi) |
4 | 4 | ||
5 | Last modified: Mon Mar 7 21:14:44 2005 by kai.makisara | 5 | Last modified: Sun Feb 24 21:59:07 2008 by kai.makisara |
6 | 6 | ||
7 | 7 | ||
8 | BASICS | 8 | BASICS |
@@ -133,6 +133,11 @@ the defaults set by the user. The value -1 means the default is not set. The | |||
133 | file 'dev' contains the device numbers corresponding to this device. The links | 133 | file 'dev' contains the device numbers corresponding to this device. The links |
134 | 'device' and 'driver' point to the SCSI device and driver entries. | 134 | 'device' and 'driver' point to the SCSI device and driver entries. |
135 | 135 | ||
136 | Each directory also contains the entry 'options' which shows the currently | ||
137 | enabled driver and mode options. The value in the file is a bit mask where the | ||
138 | bit definitions are the same as those used with MTSETDRVBUFFER in setting the | ||
139 | options. | ||
140 | |||
136 | A link named 'tape' is made from the SCSI device directory to the class | 141 | A link named 'tape' is made from the SCSI device directory to the class |
137 | directory corresponding to the mode 0 auto-rewind device (e.g., st0). | 142 | directory corresponding to the mode 0 auto-rewind device (e.g., st0). |
138 | 143 | ||
@@ -372,6 +377,11 @@ MTSETDRVBUFFER | |||
372 | MT_ST_SYSV sets the SYSV semantics (mode) | 377 | MT_ST_SYSV sets the SYSV semantics (mode) |
373 | MT_ST_NOWAIT enables immediate mode (i.e., don't wait for | 378 | MT_ST_NOWAIT enables immediate mode (i.e., don't wait for |
374 | the command to finish) for some commands (e.g., rewind) | 379 | the command to finish) for some commands (e.g., rewind) |
380 | MT_ST_SILI enables setting the SILI bit in SCSI commands when | ||
381 | reading in variable block mode to enhance performance when | ||
382 | reading blocks shorter than the byte count; set this only | ||
383 | if you are sure that the drive supports SILI and the HBA | ||
384 | correctly returns transfer residuals | ||
375 | MT_ST_DEBUGGING debugging (global; debugging must be | 385 | MT_ST_DEBUGGING debugging (global; debugging must be |
376 | compiled into the driver) | 386 | compiled into the driver) |
377 | MT_ST_SETBOOLEANS | 387 | MT_ST_SETBOOLEANS |
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 7661bb065fa5..3a078ad3aa44 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c | |||
@@ -201,22 +201,6 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) | |||
201 | simscsi_sg_readwrite(sc, mode, offset); | 201 | simscsi_sg_readwrite(sc, mode, offset); |
202 | } | 202 | } |
203 | 203 | ||
204 | static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) | ||
205 | { | ||
206 | |||
207 | int i; | ||
208 | unsigned thislen; | ||
209 | struct scatterlist *slp; | ||
210 | |||
211 | scsi_for_each_sg(sc, slp, scsi_sg_count(sc), i) { | ||
212 | if (!len) | ||
213 | break; | ||
214 | thislen = min(len, slp->length); | ||
215 | memcpy(sg_virt(slp), buf, thislen); | ||
216 | len -= thislen; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static int | 204 | static int |
221 | simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 205 | simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
222 | { | 206 | { |
@@ -258,7 +242,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
258 | buf[6] = 0; /* reserved */ | 242 | buf[6] = 0; /* reserved */ |
259 | buf[7] = 0; /* various flags */ | 243 | buf[7] = 0; /* various flags */ |
260 | memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); | 244 | memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); |
261 | simscsi_fillresult(sc, buf, 36); | 245 | scsi_sg_copy_from_buffer(sc, buf, 36); |
262 | sc->result = GOOD; | 246 | sc->result = GOOD; |
263 | break; | 247 | break; |
264 | 248 | ||
@@ -306,14 +290,15 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
306 | buf[5] = 0; | 290 | buf[5] = 0; |
307 | buf[6] = 2; | 291 | buf[6] = 2; |
308 | buf[7] = 0; | 292 | buf[7] = 0; |
309 | simscsi_fillresult(sc, buf, 8); | 293 | scsi_sg_copy_from_buffer(sc, buf, 8); |
310 | sc->result = GOOD; | 294 | sc->result = GOOD; |
311 | break; | 295 | break; |
312 | 296 | ||
313 | case MODE_SENSE: | 297 | case MODE_SENSE: |
314 | case MODE_SENSE_10: | 298 | case MODE_SENSE_10: |
315 | /* sd.c uses this to determine whether disk does write-caching. */ | 299 | /* sd.c uses this to determine whether disk does write-caching. */ |
316 | simscsi_fillresult(sc, (char *)empty_zero_page, scsi_bufflen(sc)); | 300 | scsi_sg_copy_from_buffer(sc, (char *)empty_zero_page, |
301 | PAGE_SIZE); | ||
317 | sc->result = GOOD; | 302 | sc->result = GOOD; |
318 | break; | 303 | break; |
319 | 304 | ||
diff --git a/block/bsg.c b/block/bsg.c index 8917c5174dc2..302ac1f5af39 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -37,7 +37,6 @@ struct bsg_device { | |||
37 | struct list_head done_list; | 37 | struct list_head done_list; |
38 | struct hlist_node dev_list; | 38 | struct hlist_node dev_list; |
39 | atomic_t ref_count; | 39 | atomic_t ref_count; |
40 | int minor; | ||
41 | int queued_cmds; | 40 | int queued_cmds; |
42 | int done_cmds; | 41 | int done_cmds; |
43 | wait_queue_head_t wq_done; | 42 | wait_queue_head_t wq_done; |
@@ -368,7 +367,7 @@ static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) | |||
368 | 367 | ||
369 | spin_lock_irq(&bd->lock); | 368 | spin_lock_irq(&bd->lock); |
370 | if (bd->done_cmds) { | 369 | if (bd->done_cmds) { |
371 | bc = list_entry(bd->done_list.next, struct bsg_command, list); | 370 | bc = list_first_entry(&bd->done_list, struct bsg_command, list); |
372 | list_del(&bc->list); | 371 | list_del(&bc->list); |
373 | bd->done_cmds--; | 372 | bd->done_cmds--; |
374 | } | 373 | } |
@@ -468,8 +467,6 @@ static int bsg_complete_all_commands(struct bsg_device *bd) | |||
468 | 467 | ||
469 | dprintk("%s: entered\n", bd->name); | 468 | dprintk("%s: entered\n", bd->name); |
470 | 469 | ||
471 | set_bit(BSG_F_BLOCK, &bd->flags); | ||
472 | |||
473 | /* | 470 | /* |
474 | * wait for all commands to complete | 471 | * wait for all commands to complete |
475 | */ | 472 | */ |
@@ -705,6 +702,7 @@ static struct bsg_device *bsg_alloc_device(void) | |||
705 | static int bsg_put_device(struct bsg_device *bd) | 702 | static int bsg_put_device(struct bsg_device *bd) |
706 | { | 703 | { |
707 | int ret = 0; | 704 | int ret = 0; |
705 | struct device *dev = bd->queue->bsg_dev.dev; | ||
708 | 706 | ||
709 | mutex_lock(&bsg_mutex); | 707 | mutex_lock(&bsg_mutex); |
710 | 708 | ||
@@ -730,6 +728,7 @@ static int bsg_put_device(struct bsg_device *bd) | |||
730 | kfree(bd); | 728 | kfree(bd); |
731 | out: | 729 | out: |
732 | mutex_unlock(&bsg_mutex); | 730 | mutex_unlock(&bsg_mutex); |
731 | put_device(dev); | ||
733 | return ret; | 732 | return ret; |
734 | } | 733 | } |
735 | 734 | ||
@@ -738,22 +737,26 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
738 | struct file *file) | 737 | struct file *file) |
739 | { | 738 | { |
740 | struct bsg_device *bd; | 739 | struct bsg_device *bd; |
740 | int ret; | ||
741 | #ifdef BSG_DEBUG | 741 | #ifdef BSG_DEBUG |
742 | unsigned char buf[32]; | 742 | unsigned char buf[32]; |
743 | #endif | 743 | #endif |
744 | ret = blk_get_queue(rq); | ||
745 | if (ret) | ||
746 | return ERR_PTR(-ENXIO); | ||
744 | 747 | ||
745 | bd = bsg_alloc_device(); | 748 | bd = bsg_alloc_device(); |
746 | if (!bd) | 749 | if (!bd) { |
750 | blk_put_queue(rq); | ||
747 | return ERR_PTR(-ENOMEM); | 751 | return ERR_PTR(-ENOMEM); |
752 | } | ||
748 | 753 | ||
749 | bd->queue = rq; | 754 | bd->queue = rq; |
750 | kobject_get(&rq->kobj); | ||
751 | bsg_set_block(bd, file); | 755 | bsg_set_block(bd, file); |
752 | 756 | ||
753 | atomic_set(&bd->ref_count, 1); | 757 | atomic_set(&bd->ref_count, 1); |
754 | bd->minor = iminor(inode); | ||
755 | mutex_lock(&bsg_mutex); | 758 | mutex_lock(&bsg_mutex); |
756 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor)); | 759 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
757 | 760 | ||
758 | strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); | 761 | strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); |
759 | dprintk("bound to <%s>, max queue %d\n", | 762 | dprintk("bound to <%s>, max queue %d\n", |
@@ -763,23 +766,21 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
763 | return bd; | 766 | return bd; |
764 | } | 767 | } |
765 | 768 | ||
766 | static struct bsg_device *__bsg_get_device(int minor) | 769 | static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) |
767 | { | 770 | { |
768 | struct bsg_device *bd = NULL; | 771 | struct bsg_device *bd; |
769 | struct hlist_node *entry; | 772 | struct hlist_node *entry; |
770 | 773 | ||
771 | mutex_lock(&bsg_mutex); | 774 | mutex_lock(&bsg_mutex); |
772 | 775 | ||
773 | hlist_for_each(entry, bsg_dev_idx_hash(minor)) { | 776 | hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { |
774 | bd = hlist_entry(entry, struct bsg_device, dev_list); | 777 | if (bd->queue == q) { |
775 | if (bd->minor == minor) { | ||
776 | atomic_inc(&bd->ref_count); | 778 | atomic_inc(&bd->ref_count); |
777 | break; | 779 | goto found; |
778 | } | 780 | } |
779 | |||
780 | bd = NULL; | ||
781 | } | 781 | } |
782 | 782 | bd = NULL; | |
783 | found: | ||
783 | mutex_unlock(&bsg_mutex); | 784 | mutex_unlock(&bsg_mutex); |
784 | return bd; | 785 | return bd; |
785 | } | 786 | } |
@@ -789,21 +790,27 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) | |||
789 | struct bsg_device *bd; | 790 | struct bsg_device *bd; |
790 | struct bsg_class_device *bcd; | 791 | struct bsg_class_device *bcd; |
791 | 792 | ||
792 | bd = __bsg_get_device(iminor(inode)); | ||
793 | if (bd) | ||
794 | return bd; | ||
795 | |||
796 | /* | 793 | /* |
797 | * find the class device | 794 | * find the class device |
798 | */ | 795 | */ |
799 | mutex_lock(&bsg_mutex); | 796 | mutex_lock(&bsg_mutex); |
800 | bcd = idr_find(&bsg_minor_idr, iminor(inode)); | 797 | bcd = idr_find(&bsg_minor_idr, iminor(inode)); |
798 | if (bcd) | ||
799 | get_device(bcd->dev); | ||
801 | mutex_unlock(&bsg_mutex); | 800 | mutex_unlock(&bsg_mutex); |
802 | 801 | ||
803 | if (!bcd) | 802 | if (!bcd) |
804 | return ERR_PTR(-ENODEV); | 803 | return ERR_PTR(-ENODEV); |
805 | 804 | ||
806 | return bsg_add_device(inode, bcd->queue, file); | 805 | bd = __bsg_get_device(iminor(inode), bcd->queue); |
806 | if (bd) | ||
807 | return bd; | ||
808 | |||
809 | bd = bsg_add_device(inode, bcd->queue, file); | ||
810 | if (IS_ERR(bd)) | ||
811 | put_device(bcd->dev); | ||
812 | |||
813 | return bd; | ||
807 | } | 814 | } |
808 | 815 | ||
809 | static int bsg_open(struct inode *inode, struct file *file) | 816 | static int bsg_open(struct inode *inode, struct file *file) |
@@ -942,7 +949,6 @@ void bsg_unregister_queue(struct request_queue *q) | |||
942 | class_device_unregister(bcd->class_dev); | 949 | class_device_unregister(bcd->class_dev); |
943 | put_device(bcd->dev); | 950 | put_device(bcd->dev); |
944 | bcd->class_dev = NULL; | 951 | bcd->class_dev = NULL; |
945 | bcd->dev = NULL; | ||
946 | mutex_unlock(&bsg_mutex); | 952 | mutex_unlock(&bsg_mutex); |
947 | } | 953 | } |
948 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); | 954 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index c16e3cea1d28..f3c69a8c1103 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2332,11 +2332,7 @@ void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) | |||
2332 | { | 2332 | { |
2333 | cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; | 2333 | cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; |
2334 | 2334 | ||
2335 | cmd->sense_buffer[0] = 0x70; /* fixed format, current */ | 2335 | scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); |
2336 | cmd->sense_buffer[2] = sk; | ||
2337 | cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ | ||
2338 | cmd->sense_buffer[12] = asc; | ||
2339 | cmd->sense_buffer[13] = ascq; | ||
2340 | } | 2336 | } |
2341 | 2337 | ||
2342 | /** | 2338 | /** |
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c index 40bca48abc12..cabd0edf2156 100644 --- a/drivers/base/transport_class.c +++ b/drivers/base/transport_class.c | |||
@@ -108,7 +108,8 @@ EXPORT_SYMBOL_GPL(anon_transport_class_register); | |||
108 | */ | 108 | */ |
109 | void anon_transport_class_unregister(struct anon_transport_class *atc) | 109 | void anon_transport_class_unregister(struct anon_transport_class *atc) |
110 | { | 110 | { |
111 | attribute_container_unregister(&atc->container); | 111 | if (unlikely(attribute_container_unregister(&atc->container))) |
112 | BUG(); | ||
112 | } | 113 | } |
113 | EXPORT_SYMBOL_GPL(anon_transport_class_unregister); | 114 | EXPORT_SYMBOL_GPL(anon_transport_class_unregister); |
114 | 115 | ||
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index c6be6eba7dc3..db3c892f87fb 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -79,7 +79,7 @@ MODULE_VERSION(my_VERSION); | |||
79 | /* | 79 | /* |
80 | * cmd line parameters | 80 | * cmd line parameters |
81 | */ | 81 | */ |
82 | static int mpt_msi_enable; | 82 | static int mpt_msi_enable = -1; |
83 | module_param(mpt_msi_enable, int, 0); | 83 | module_param(mpt_msi_enable, int, 0); |
84 | MODULE_PARM_DESC(mpt_msi_enable, " MSI Support Enable (default=0)"); | 84 | MODULE_PARM_DESC(mpt_msi_enable, " MSI Support Enable (default=0)"); |
85 | 85 | ||
@@ -1686,6 +1686,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1686 | ioc->bus_type = SAS; | 1686 | ioc->bus_type = SAS; |
1687 | } | 1687 | } |
1688 | 1688 | ||
1689 | if (ioc->bus_type == SAS && mpt_msi_enable == -1) | ||
1690 | ioc->msi_enable = 1; | ||
1691 | else | ||
1692 | ioc->msi_enable = mpt_msi_enable; | ||
1693 | |||
1689 | if (ioc->errata_flag_1064) | 1694 | if (ioc->errata_flag_1064) |
1690 | pci_disable_io_access(pdev); | 1695 | pci_disable_io_access(pdev); |
1691 | 1696 | ||
@@ -1831,7 +1836,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1831 | CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); | 1836 | CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); |
1832 | 1837 | ||
1833 | free_irq(ioc->pci_irq, ioc); | 1838 | free_irq(ioc->pci_irq, ioc); |
1834 | if (mpt_msi_enable) | 1839 | if (ioc->msi_enable) |
1835 | pci_disable_msi(ioc->pcidev); | 1840 | pci_disable_msi(ioc->pcidev); |
1836 | ioc->pci_irq = -1; | 1841 | ioc->pci_irq = -1; |
1837 | pci_save_state(pdev); | 1842 | pci_save_state(pdev); |
@@ -2057,15 +2062,17 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) | |||
2057 | if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) { | 2062 | if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) { |
2058 | ioc->pci_irq = -1; | 2063 | ioc->pci_irq = -1; |
2059 | if (ioc->pcidev->irq) { | 2064 | if (ioc->pcidev->irq) { |
2060 | if (mpt_msi_enable && !pci_enable_msi(ioc->pcidev)) | 2065 | if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev)) |
2061 | printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n", | 2066 | printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n", |
2062 | ioc->name); | 2067 | ioc->name); |
2068 | else | ||
2069 | ioc->msi_enable = 0; | ||
2063 | rc = request_irq(ioc->pcidev->irq, mpt_interrupt, | 2070 | rc = request_irq(ioc->pcidev->irq, mpt_interrupt, |
2064 | IRQF_SHARED, ioc->name, ioc); | 2071 | IRQF_SHARED, ioc->name, ioc); |
2065 | if (rc < 0) { | 2072 | if (rc < 0) { |
2066 | printk(MYIOC_s_ERR_FMT "Unable to allocate " | 2073 | printk(MYIOC_s_ERR_FMT "Unable to allocate " |
2067 | "interrupt %d!\n", ioc->name, ioc->pcidev->irq); | 2074 | "interrupt %d!\n", ioc->name, ioc->pcidev->irq); |
2068 | if (mpt_msi_enable) | 2075 | if (ioc->msi_enable) |
2069 | pci_disable_msi(ioc->pcidev); | 2076 | pci_disable_msi(ioc->pcidev); |
2070 | return -EBUSY; | 2077 | return -EBUSY; |
2071 | } | 2078 | } |
@@ -2173,7 +2180,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) | |||
2173 | /* | 2180 | /* |
2174 | * Initalize link list for inactive raid volumes. | 2181 | * Initalize link list for inactive raid volumes. |
2175 | */ | 2182 | */ |
2176 | init_MUTEX(&ioc->raid_data.inactive_list_mutex); | 2183 | mutex_init(&ioc->raid_data.inactive_list_mutex); |
2177 | INIT_LIST_HEAD(&ioc->raid_data.inactive_list); | 2184 | INIT_LIST_HEAD(&ioc->raid_data.inactive_list); |
2178 | 2185 | ||
2179 | if (ioc->bus_type == SAS) { | 2186 | if (ioc->bus_type == SAS) { |
@@ -2261,7 +2268,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) | |||
2261 | out: | 2268 | out: |
2262 | if ((ret != 0) && irq_allocated) { | 2269 | if ((ret != 0) && irq_allocated) { |
2263 | free_irq(ioc->pci_irq, ioc); | 2270 | free_irq(ioc->pci_irq, ioc); |
2264 | if (mpt_msi_enable) | 2271 | if (ioc->msi_enable) |
2265 | pci_disable_msi(ioc->pcidev); | 2272 | pci_disable_msi(ioc->pcidev); |
2266 | } | 2273 | } |
2267 | return ret; | 2274 | return ret; |
@@ -2443,7 +2450,7 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc) | |||
2443 | 2450 | ||
2444 | if (ioc->pci_irq != -1) { | 2451 | if (ioc->pci_irq != -1) { |
2445 | free_irq(ioc->pci_irq, ioc); | 2452 | free_irq(ioc->pci_irq, ioc); |
2446 | if (mpt_msi_enable) | 2453 | if (ioc->msi_enable) |
2447 | pci_disable_msi(ioc->pcidev); | 2454 | pci_disable_msi(ioc->pcidev); |
2448 | ioc->pci_irq = -1; | 2455 | ioc->pci_irq = -1; |
2449 | } | 2456 | } |
@@ -5159,13 +5166,13 @@ mpt_inactive_raid_list_free(MPT_ADAPTER *ioc) | |||
5159 | if (list_empty(&ioc->raid_data.inactive_list)) | 5166 | if (list_empty(&ioc->raid_data.inactive_list)) |
5160 | return; | 5167 | return; |
5161 | 5168 | ||
5162 | down(&ioc->raid_data.inactive_list_mutex); | 5169 | mutex_lock(&ioc->raid_data.inactive_list_mutex); |
5163 | list_for_each_entry_safe(component_info, pNext, | 5170 | list_for_each_entry_safe(component_info, pNext, |
5164 | &ioc->raid_data.inactive_list, list) { | 5171 | &ioc->raid_data.inactive_list, list) { |
5165 | list_del(&component_info->list); | 5172 | list_del(&component_info->list); |
5166 | kfree(component_info); | 5173 | kfree(component_info); |
5167 | } | 5174 | } |
5168 | up(&ioc->raid_data.inactive_list_mutex); | 5175 | mutex_unlock(&ioc->raid_data.inactive_list_mutex); |
5169 | } | 5176 | } |
5170 | 5177 | ||
5171 | /** | 5178 | /** |
@@ -5224,7 +5231,7 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id) | |||
5224 | if (!handle_inactive_volumes) | 5231 | if (!handle_inactive_volumes) |
5225 | goto out; | 5232 | goto out; |
5226 | 5233 | ||
5227 | down(&ioc->raid_data.inactive_list_mutex); | 5234 | mutex_lock(&ioc->raid_data.inactive_list_mutex); |
5228 | for (i = 0; i < buffer->NumPhysDisks; i++) { | 5235 | for (i = 0; i < buffer->NumPhysDisks; i++) { |
5229 | if(mpt_raid_phys_disk_pg0(ioc, | 5236 | if(mpt_raid_phys_disk_pg0(ioc, |
5230 | buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) | 5237 | buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) |
@@ -5244,7 +5251,7 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id) | |||
5244 | list_add_tail(&component_info->list, | 5251 | list_add_tail(&component_info->list, |
5245 | &ioc->raid_data.inactive_list); | 5252 | &ioc->raid_data.inactive_list); |
5246 | } | 5253 | } |
5247 | up(&ioc->raid_data.inactive_list_mutex); | 5254 | mutex_unlock(&ioc->raid_data.inactive_list_mutex); |
5248 | 5255 | ||
5249 | out: | 5256 | out: |
5250 | if (buffer) | 5257 | if (buffer) |
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index caadc68c3000..a8f617447d22 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h | |||
@@ -51,6 +51,7 @@ | |||
51 | 51 | ||
52 | #include <linux/kernel.h> | 52 | #include <linux/kernel.h> |
53 | #include <linux/pci.h> | 53 | #include <linux/pci.h> |
54 | #include <linux/mutex.h> | ||
54 | 55 | ||
55 | #include "lsi/mpi_type.h" | 56 | #include "lsi/mpi_type.h" |
56 | #include "lsi/mpi.h" /* Fusion MPI(nterface) basic defs */ | 57 | #include "lsi/mpi.h" /* Fusion MPI(nterface) basic defs */ |
@@ -531,7 +532,7 @@ struct inactive_raid_component_info { | |||
531 | typedef struct _RaidCfgData { | 532 | typedef struct _RaidCfgData { |
532 | IOCPage2_t *pIocPg2; /* table of Raid Volumes */ | 533 | IOCPage2_t *pIocPg2; /* table of Raid Volumes */ |
533 | IOCPage3_t *pIocPg3; /* table of physical disks */ | 534 | IOCPage3_t *pIocPg3; /* table of physical disks */ |
534 | struct semaphore inactive_list_mutex; | 535 | struct mutex inactive_list_mutex; |
535 | struct list_head inactive_list; /* link list for physical | 536 | struct list_head inactive_list; /* link list for physical |
536 | disk that belong in | 537 | disk that belong in |
537 | inactive volumes */ | 538 | inactive volumes */ |
@@ -630,6 +631,7 @@ typedef struct _MPT_ADAPTER | |||
630 | int mtrr_reg; | 631 | int mtrr_reg; |
631 | struct pci_dev *pcidev; /* struct pci_dev pointer */ | 632 | struct pci_dev *pcidev; /* struct pci_dev pointer */ |
632 | int bars; /* bitmask of BAR's that must be configured */ | 633 | int bars; /* bitmask of BAR's that must be configured */ |
634 | int msi_enable; | ||
633 | u8 __iomem *memmap; /* mmap address */ | 635 | u8 __iomem *memmap; /* mmap address */ |
634 | struct Scsi_Host *sh; /* Scsi Host pointer */ | 636 | struct Scsi_Host *sh; /* Scsi Host pointer */ |
635 | SpiCfgData spi_data; /* Scsi config. data */ | 637 | SpiCfgData spi_data; /* Scsi config. data */ |
@@ -693,7 +695,6 @@ typedef struct _MPT_ADAPTER | |||
693 | struct mutex sas_discovery_mutex; | 695 | struct mutex sas_discovery_mutex; |
694 | u8 sas_discovery_runtime; | 696 | u8 sas_discovery_runtime; |
695 | u8 sas_discovery_ignore_events; | 697 | u8 sas_discovery_ignore_events; |
696 | u16 handle; | ||
697 | int sas_index; /* index refrencing */ | 698 | int sas_index; /* index refrencing */ |
698 | MPT_SAS_MGMT sas_mgmt; | 699 | MPT_SAS_MGMT sas_mgmt; |
699 | struct work_struct sas_persist_task; | 700 | struct work_struct sas_persist_task; |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 78734e25edd5..468480771f13 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -230,6 +230,20 @@ static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy) | |||
230 | return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; | 230 | return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; |
231 | } | 231 | } |
232 | 232 | ||
233 | static struct mptsas_portinfo * | ||
234 | mptsas_get_hba_portinfo(MPT_ADAPTER *ioc) | ||
235 | { | ||
236 | struct list_head *head = &ioc->sas_topology; | ||
237 | struct mptsas_portinfo *pi = NULL; | ||
238 | |||
239 | /* always the first entry on sas_topology list */ | ||
240 | |||
241 | if (!list_empty(head)) | ||
242 | pi = list_entry(head->next, struct mptsas_portinfo, list); | ||
243 | |||
244 | return pi; | ||
245 | } | ||
246 | |||
233 | /* | 247 | /* |
234 | * mptsas_find_portinfo_by_handle | 248 | * mptsas_find_portinfo_by_handle |
235 | * | 249 | * |
@@ -1290,7 +1304,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1290 | struct mptsas_portinfo *port_info; | 1304 | struct mptsas_portinfo *port_info; |
1291 | 1305 | ||
1292 | mutex_lock(&ioc->sas_topology_mutex); | 1306 | mutex_lock(&ioc->sas_topology_mutex); |
1293 | port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle); | 1307 | port_info = mptsas_get_hba_portinfo(ioc); |
1294 | if (port_info && port_info->phy_info) | 1308 | if (port_info && port_info->phy_info) |
1295 | sas_address = | 1309 | sas_address = |
1296 | port_info->phy_info[0].phy->identify.sas_address; | 1310 | port_info->phy_info[0].phy->identify.sas_address; |
@@ -2028,8 +2042,7 @@ static int mptsas_probe_one_phy(struct device *dev, | |||
2028 | int i; | 2042 | int i; |
2029 | 2043 | ||
2030 | mutex_lock(&ioc->sas_topology_mutex); | 2044 | mutex_lock(&ioc->sas_topology_mutex); |
2031 | port_info = mptsas_find_portinfo_by_handle(ioc, | 2045 | port_info = mptsas_get_hba_portinfo(ioc); |
2032 | ioc->handle); | ||
2033 | mutex_unlock(&ioc->sas_topology_mutex); | 2046 | mutex_unlock(&ioc->sas_topology_mutex); |
2034 | 2047 | ||
2035 | for (i = 0; i < port_info->num_phys; i++) | 2048 | for (i = 0; i < port_info->num_phys; i++) |
@@ -2099,8 +2112,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) | |||
2099 | 2112 | ||
2100 | mptsas_sas_io_unit_pg1(ioc); | 2113 | mptsas_sas_io_unit_pg1(ioc); |
2101 | mutex_lock(&ioc->sas_topology_mutex); | 2114 | mutex_lock(&ioc->sas_topology_mutex); |
2102 | ioc->handle = hba->phy_info[0].handle; | 2115 | port_info = mptsas_get_hba_portinfo(ioc); |
2103 | port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle); | ||
2104 | if (!port_info) { | 2116 | if (!port_info) { |
2105 | port_info = hba; | 2117 | port_info = hba; |
2106 | list_add_tail(&port_info->list, &ioc->sas_topology); | 2118 | list_add_tail(&port_info->list, &ioc->sas_topology); |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index c207bda6723b..89c63147a15d 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -2304,14 +2304,14 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) | |||
2304 | if (list_empty(&ioc->raid_data.inactive_list)) | 2304 | if (list_empty(&ioc->raid_data.inactive_list)) |
2305 | goto out; | 2305 | goto out; |
2306 | 2306 | ||
2307 | down(&ioc->raid_data.inactive_list_mutex); | 2307 | mutex_lock(&ioc->raid_data.inactive_list_mutex); |
2308 | list_for_each_entry(component_info, &ioc->raid_data.inactive_list, | 2308 | list_for_each_entry(component_info, &ioc->raid_data.inactive_list, |
2309 | list) { | 2309 | list) { |
2310 | if ((component_info->d.PhysDiskID == id) && | 2310 | if ((component_info->d.PhysDiskID == id) && |
2311 | (component_info->d.PhysDiskBus == channel)) | 2311 | (component_info->d.PhysDiskBus == channel)) |
2312 | rc = 1; | 2312 | rc = 1; |
2313 | } | 2313 | } |
2314 | up(&ioc->raid_data.inactive_list_mutex); | 2314 | mutex_unlock(&ioc->raid_data.inactive_list_mutex); |
2315 | 2315 | ||
2316 | out: | 2316 | out: |
2317 | return rc; | 2317 | return rc; |
@@ -2341,14 +2341,14 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) | |||
2341 | if (list_empty(&ioc->raid_data.inactive_list)) | 2341 | if (list_empty(&ioc->raid_data.inactive_list)) |
2342 | goto out; | 2342 | goto out; |
2343 | 2343 | ||
2344 | down(&ioc->raid_data.inactive_list_mutex); | 2344 | mutex_lock(&ioc->raid_data.inactive_list_mutex); |
2345 | list_for_each_entry(component_info, &ioc->raid_data.inactive_list, | 2345 | list_for_each_entry(component_info, &ioc->raid_data.inactive_list, |
2346 | list) { | 2346 | list) { |
2347 | if ((component_info->d.PhysDiskID == id) && | 2347 | if ((component_info->d.PhysDiskID == id) && |
2348 | (component_info->d.PhysDiskBus == channel)) | 2348 | (component_info->d.PhysDiskBus == channel)) |
2349 | rc = component_info->d.PhysDiskNum; | 2349 | rc = component_info->d.PhysDiskNum; |
2350 | } | 2350 | } |
2351 | up(&ioc->raid_data.inactive_list_mutex); | 2351 | mutex_unlock(&ioc->raid_data.inactive_list_mutex); |
2352 | 2352 | ||
2353 | out: | 2353 | out: |
2354 | return rc; | 2354 | return rc; |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 874b55ed00a3..8c7e2b778ef1 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -1030,10 +1030,10 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
1030 | 1030 | ||
1031 | /* initialize debug locks */ | 1031 | /* initialize debug locks */ |
1032 | 1032 | ||
1033 | spin_lock_init(&adapter->erp_dbf_lock); | ||
1034 | spin_lock_init(&adapter->hba_dbf_lock); | 1033 | spin_lock_init(&adapter->hba_dbf_lock); |
1035 | spin_lock_init(&adapter->san_dbf_lock); | 1034 | spin_lock_init(&adapter->san_dbf_lock); |
1036 | spin_lock_init(&adapter->scsi_dbf_lock); | 1035 | spin_lock_init(&adapter->scsi_dbf_lock); |
1036 | spin_lock_init(&adapter->rec_dbf_lock); | ||
1037 | 1037 | ||
1038 | retval = zfcp_adapter_debug_register(adapter); | 1038 | retval = zfcp_adapter_debug_register(adapter); |
1039 | if (retval) | 1039 | if (retval) |
@@ -1325,10 +1325,10 @@ zfcp_nameserver_enqueue(struct zfcp_adapter *adapter) | |||
1325 | 1325 | ||
1326 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC | 1326 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC |
1327 | 1327 | ||
1328 | static void | 1328 | static void zfcp_fsf_incoming_els_rscn(struct zfcp_fsf_req *fsf_req) |
1329 | zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter, | ||
1330 | struct fsf_status_read_buffer *status_buffer) | ||
1331 | { | 1329 | { |
1330 | struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data; | ||
1331 | struct zfcp_adapter *adapter = fsf_req->adapter; | ||
1332 | struct fcp_rscn_head *fcp_rscn_head; | 1332 | struct fcp_rscn_head *fcp_rscn_head; |
1333 | struct fcp_rscn_element *fcp_rscn_element; | 1333 | struct fcp_rscn_element *fcp_rscn_element; |
1334 | struct zfcp_port *port; | 1334 | struct zfcp_port *port; |
@@ -1375,7 +1375,8 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter, | |||
1375 | ZFCP_LOG_INFO("incoming RSCN, trying to open " | 1375 | ZFCP_LOG_INFO("incoming RSCN, trying to open " |
1376 | "port 0x%016Lx\n", port->wwpn); | 1376 | "port 0x%016Lx\n", port->wwpn); |
1377 | zfcp_erp_port_reopen(port, | 1377 | zfcp_erp_port_reopen(port, |
1378 | ZFCP_STATUS_COMMON_ERP_FAILED); | 1378 | ZFCP_STATUS_COMMON_ERP_FAILED, |
1379 | 82, fsf_req); | ||
1379 | continue; | 1380 | continue; |
1380 | } | 1381 | } |
1381 | 1382 | ||
@@ -1406,10 +1407,10 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter, | |||
1406 | } | 1407 | } |
1407 | } | 1408 | } |
1408 | 1409 | ||
1409 | static void | 1410 | static void zfcp_fsf_incoming_els_plogi(struct zfcp_fsf_req *fsf_req) |
1410 | zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter, | ||
1411 | struct fsf_status_read_buffer *status_buffer) | ||
1412 | { | 1411 | { |
1412 | struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data; | ||
1413 | struct zfcp_adapter *adapter = fsf_req->adapter; | ||
1413 | struct fsf_plogi *els_plogi; | 1414 | struct fsf_plogi *els_plogi; |
1414 | struct zfcp_port *port; | 1415 | struct zfcp_port *port; |
1415 | unsigned long flags; | 1416 | unsigned long flags; |
@@ -1428,14 +1429,14 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter, | |||
1428 | status_buffer->d_id, | 1429 | status_buffer->d_id, |
1429 | zfcp_get_busid_by_adapter(adapter)); | 1430 | zfcp_get_busid_by_adapter(adapter)); |
1430 | } else { | 1431 | } else { |
1431 | zfcp_erp_port_forced_reopen(port, 0); | 1432 | zfcp_erp_port_forced_reopen(port, 0, 83, fsf_req); |
1432 | } | 1433 | } |
1433 | } | 1434 | } |
1434 | 1435 | ||
1435 | static void | 1436 | static void zfcp_fsf_incoming_els_logo(struct zfcp_fsf_req *fsf_req) |
1436 | zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter, | ||
1437 | struct fsf_status_read_buffer *status_buffer) | ||
1438 | { | 1437 | { |
1438 | struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data; | ||
1439 | struct zfcp_adapter *adapter = fsf_req->adapter; | ||
1439 | struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload; | 1440 | struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload; |
1440 | struct zfcp_port *port; | 1441 | struct zfcp_port *port; |
1441 | unsigned long flags; | 1442 | unsigned long flags; |
@@ -1453,7 +1454,7 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter, | |||
1453 | status_buffer->d_id, | 1454 | status_buffer->d_id, |
1454 | zfcp_get_busid_by_adapter(adapter)); | 1455 | zfcp_get_busid_by_adapter(adapter)); |
1455 | } else { | 1456 | } else { |
1456 | zfcp_erp_port_forced_reopen(port, 0); | 1457 | zfcp_erp_port_forced_reopen(port, 0, 84, fsf_req); |
1457 | } | 1458 | } |
1458 | } | 1459 | } |
1459 | 1460 | ||
@@ -1480,12 +1481,12 @@ zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req) | |||
1480 | 1481 | ||
1481 | zfcp_san_dbf_event_incoming_els(fsf_req); | 1482 | zfcp_san_dbf_event_incoming_els(fsf_req); |
1482 | if (els_type == LS_PLOGI) | 1483 | if (els_type == LS_PLOGI) |
1483 | zfcp_fsf_incoming_els_plogi(adapter, status_buffer); | 1484 | zfcp_fsf_incoming_els_plogi(fsf_req); |
1484 | else if (els_type == LS_LOGO) | 1485 | else if (els_type == LS_LOGO) |
1485 | zfcp_fsf_incoming_els_logo(adapter, status_buffer); | 1486 | zfcp_fsf_incoming_els_logo(fsf_req); |
1486 | else if ((els_type & 0xffff0000) == LS_RSCN) | 1487 | else if ((els_type & 0xffff0000) == LS_RSCN) |
1487 | /* we are only concerned with the command, not the length */ | 1488 | /* we are only concerned with the command, not the length */ |
1488 | zfcp_fsf_incoming_els_rscn(adapter, status_buffer); | 1489 | zfcp_fsf_incoming_els_rscn(fsf_req); |
1489 | else | 1490 | else |
1490 | zfcp_fsf_incoming_els_unknown(adapter, status_buffer); | 1491 | zfcp_fsf_incoming_els_unknown(adapter, status_buffer); |
1491 | } | 1492 | } |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index edc5015e920d..66d3b88844b0 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -170,9 +170,10 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) | |||
170 | BUG_ON(!zfcp_reqlist_isempty(adapter)); | 170 | BUG_ON(!zfcp_reqlist_isempty(adapter)); |
171 | adapter->req_no = 0; | 171 | adapter->req_no = 0; |
172 | 172 | ||
173 | zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, | 173 | zfcp_erp_modify_adapter_status(adapter, 10, NULL, |
174 | ZFCP_SET); | 174 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); |
175 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); | 175 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 85, |
176 | NULL); | ||
176 | zfcp_erp_wait(adapter); | 177 | zfcp_erp_wait(adapter); |
177 | goto out; | 178 | goto out; |
178 | 179 | ||
@@ -197,7 +198,7 @@ zfcp_ccw_set_offline(struct ccw_device *ccw_device) | |||
197 | 198 | ||
198 | down(&zfcp_data.config_sema); | 199 | down(&zfcp_data.config_sema); |
199 | adapter = dev_get_drvdata(&ccw_device->dev); | 200 | adapter = dev_get_drvdata(&ccw_device->dev); |
200 | zfcp_erp_adapter_shutdown(adapter, 0); | 201 | zfcp_erp_adapter_shutdown(adapter, 0, 86, NULL); |
201 | zfcp_erp_wait(adapter); | 202 | zfcp_erp_wait(adapter); |
202 | zfcp_erp_thread_kill(adapter); | 203 | zfcp_erp_thread_kill(adapter); |
203 | up(&zfcp_data.config_sema); | 204 | up(&zfcp_data.config_sema); |
@@ -223,24 +224,21 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event) | |||
223 | case CIO_GONE: | 224 | case CIO_GONE: |
224 | ZFCP_LOG_NORMAL("adapter %s: device gone\n", | 225 | ZFCP_LOG_NORMAL("adapter %s: device gone\n", |
225 | zfcp_get_busid_by_adapter(adapter)); | 226 | zfcp_get_busid_by_adapter(adapter)); |
226 | debug_text_event(adapter->erp_dbf,1,"dev_gone"); | 227 | zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); |
227 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
228 | break; | 228 | break; |
229 | case CIO_NO_PATH: | 229 | case CIO_NO_PATH: |
230 | ZFCP_LOG_NORMAL("adapter %s: no path\n", | 230 | ZFCP_LOG_NORMAL("adapter %s: no path\n", |
231 | zfcp_get_busid_by_adapter(adapter)); | 231 | zfcp_get_busid_by_adapter(adapter)); |
232 | debug_text_event(adapter->erp_dbf,1,"no_path"); | 232 | zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); |
233 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
234 | break; | 233 | break; |
235 | case CIO_OPER: | 234 | case CIO_OPER: |
236 | ZFCP_LOG_NORMAL("adapter %s: operational again\n", | 235 | ZFCP_LOG_NORMAL("adapter %s: operational again\n", |
237 | zfcp_get_busid_by_adapter(adapter)); | 236 | zfcp_get_busid_by_adapter(adapter)); |
238 | debug_text_event(adapter->erp_dbf,1,"dev_oper"); | 237 | zfcp_erp_modify_adapter_status(adapter, 11, NULL, |
239 | zfcp_erp_modify_adapter_status(adapter, | ||
240 | ZFCP_STATUS_COMMON_RUNNING, | 238 | ZFCP_STATUS_COMMON_RUNNING, |
241 | ZFCP_SET); | 239 | ZFCP_SET); |
242 | zfcp_erp_adapter_reopen(adapter, | 240 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
243 | ZFCP_STATUS_COMMON_ERP_FAILED); | 241 | 89, NULL); |
244 | break; | 242 | break; |
245 | } | 243 | } |
246 | zfcp_erp_wait(adapter); | 244 | zfcp_erp_wait(adapter); |
@@ -272,7 +270,7 @@ zfcp_ccw_shutdown(struct ccw_device *cdev) | |||
272 | 270 | ||
273 | down(&zfcp_data.config_sema); | 271 | down(&zfcp_data.config_sema); |
274 | adapter = dev_get_drvdata(&cdev->dev); | 272 | adapter = dev_get_drvdata(&cdev->dev); |
275 | zfcp_erp_adapter_shutdown(adapter, 0); | 273 | zfcp_erp_adapter_shutdown(adapter, 0, 90, NULL); |
276 | zfcp_erp_wait(adapter); | 274 | zfcp_erp_wait(adapter); |
277 | up(&zfcp_data.config_sema); | 275 | up(&zfcp_data.config_sema); |
278 | } | 276 | } |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 701046c9bb33..37b85c67b11d 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -31,123 +31,128 @@ MODULE_PARM_DESC(dbfsize, | |||
31 | 31 | ||
32 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER | 32 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER |
33 | 33 | ||
34 | static int | 34 | static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, |
35 | zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) | 35 | int level, char *from, int from_len) |
36 | { | ||
37 | int offset; | ||
38 | struct zfcp_dbf_dump *dump = to; | ||
39 | int room = to_len - sizeof(*dump); | ||
40 | |||
41 | for (offset = 0; offset < from_len; offset += dump->size) { | ||
42 | memset(to, 0, to_len); | ||
43 | strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); | ||
44 | dump->total_size = from_len; | ||
45 | dump->offset = offset; | ||
46 | dump->size = min(from_len - offset, room); | ||
47 | memcpy(dump->data, from + offset, dump->size); | ||
48 | debug_event(dbf, level, dump, dump->size); | ||
49 | } | ||
50 | } | ||
51 | |||
52 | /* FIXME: this duplicate this code in s390 debug feature */ | ||
53 | static void zfcp_dbf_timestamp(unsigned long long stck, struct timespec *time) | ||
36 | { | 54 | { |
37 | unsigned long long sec; | 55 | unsigned long long sec; |
38 | struct timespec dbftime; | ||
39 | int len = 0; | ||
40 | 56 | ||
41 | stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); | 57 | stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); |
42 | sec = stck >> 12; | 58 | sec = stck >> 12; |
43 | do_div(sec, 1000000); | 59 | do_div(sec, 1000000); |
44 | dbftime.tv_sec = sec; | 60 | time->tv_sec = sec; |
45 | stck -= (sec * 1000000) << 12; | 61 | stck -= (sec * 1000000) << 12; |
46 | dbftime.tv_nsec = ((stck * 1000) >> 12); | 62 | time->tv_nsec = ((stck * 1000) >> 12); |
47 | len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n", | ||
48 | label, dbftime.tv_sec, dbftime.tv_nsec); | ||
49 | |||
50 | return len; | ||
51 | } | 63 | } |
52 | 64 | ||
53 | static int zfcp_dbf_tag(char *out_buf, const char *label, const char *tag) | 65 | static void zfcp_dbf_tag(char **p, const char *label, const char *tag) |
54 | { | 66 | { |
55 | int len = 0, i; | 67 | int i; |
56 | 68 | ||
57 | len += sprintf(out_buf + len, "%-24s", label); | 69 | *p += sprintf(*p, "%-24s", label); |
58 | for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++) | 70 | for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++) |
59 | len += sprintf(out_buf + len, "%c", tag[i]); | 71 | *p += sprintf(*p, "%c", tag[i]); |
60 | len += sprintf(out_buf + len, "\n"); | 72 | *p += sprintf(*p, "\n"); |
73 | } | ||
61 | 74 | ||
62 | return len; | 75 | static void zfcp_dbf_outs(char **buf, const char *s1, const char *s2) |
76 | { | ||
77 | *buf += sprintf(*buf, "%-24s%s\n", s1, s2); | ||
63 | } | 78 | } |
64 | 79 | ||
65 | static int | 80 | static void zfcp_dbf_out(char **buf, const char *s, const char *format, ...) |
66 | zfcp_dbf_view(char *out_buf, const char *label, const char *format, ...) | ||
67 | { | 81 | { |
68 | va_list arg; | 82 | va_list arg; |
69 | int len = 0; | ||
70 | 83 | ||
71 | len += sprintf(out_buf + len, "%-24s", label); | 84 | *buf += sprintf(*buf, "%-24s", s); |
72 | va_start(arg, format); | 85 | va_start(arg, format); |
73 | len += vsprintf(out_buf + len, format, arg); | 86 | *buf += vsprintf(*buf, format, arg); |
74 | va_end(arg); | 87 | va_end(arg); |
75 | len += sprintf(out_buf + len, "\n"); | 88 | *buf += sprintf(*buf, "\n"); |
76 | |||
77 | return len; | ||
78 | } | 89 | } |
79 | 90 | ||
80 | static int | 91 | static void zfcp_dbf_outd(char **p, const char *label, char *buffer, |
81 | zfcp_dbf_view_dump(char *out_buf, const char *label, | 92 | int buflen, int offset, int total_size) |
82 | char *buffer, int buflen, int offset, int total_size) | ||
83 | { | 93 | { |
84 | int len = 0; | 94 | if (!offset) |
85 | 95 | *p += sprintf(*p, "%-24s ", label); | |
86 | if (offset == 0) | ||
87 | len += sprintf(out_buf + len, "%-24s ", label); | ||
88 | |||
89 | while (buflen--) { | 96 | while (buflen--) { |
90 | if (offset > 0) { | 97 | if (offset > 0) { |
91 | if ((offset % 32) == 0) | 98 | if ((offset % 32) == 0) |
92 | len += sprintf(out_buf + len, "\n%-24c ", ' '); | 99 | *p += sprintf(*p, "\n%-24c ", ' '); |
93 | else if ((offset % 4) == 0) | 100 | else if ((offset % 4) == 0) |
94 | len += sprintf(out_buf + len, " "); | 101 | *p += sprintf(*p, " "); |
95 | } | 102 | } |
96 | len += sprintf(out_buf + len, "%02x", *buffer++); | 103 | *p += sprintf(*p, "%02x", *buffer++); |
97 | if (++offset == total_size) { | 104 | if (++offset == total_size) { |
98 | len += sprintf(out_buf + len, "\n"); | 105 | *p += sprintf(*p, "\n"); |
99 | break; | 106 | break; |
100 | } | 107 | } |
101 | } | 108 | } |
102 | 109 | if (!total_size) | |
103 | if (total_size == 0) | 110 | *p += sprintf(*p, "\n"); |
104 | len += sprintf(out_buf + len, "\n"); | ||
105 | |||
106 | return len; | ||
107 | } | 111 | } |
108 | 112 | ||
109 | static int | 113 | static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, |
110 | zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, | 114 | int area, debug_entry_t *entry, char *out_buf) |
111 | debug_entry_t * entry, char *out_buf) | ||
112 | { | 115 | { |
113 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry); | 116 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry); |
114 | int len = 0; | 117 | struct timespec t; |
118 | char *p = out_buf; | ||
115 | 119 | ||
116 | if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { | 120 | if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { |
117 | len += zfcp_dbf_stck(out_buf + len, "timestamp", | 121 | zfcp_dbf_timestamp(entry->id.stck, &t); |
118 | entry->id.stck); | 122 | zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu", |
119 | len += zfcp_dbf_view(out_buf + len, "cpu", "%02i", | 123 | t.tv_sec, t.tv_nsec); |
120 | entry->id.fields.cpuid); | 124 | zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); |
121 | } else { | 125 | } else { |
122 | len += zfcp_dbf_view_dump(out_buf + len, NULL, | 126 | zfcp_dbf_outd(&p, NULL, dump->data, dump->size, dump->offset, |
123 | dump->data, | 127 | dump->total_size); |
124 | dump->size, | ||
125 | dump->offset, dump->total_size); | ||
126 | if ((dump->offset + dump->size) == dump->total_size) | 128 | if ((dump->offset + dump->size) == dump->total_size) |
127 | len += sprintf(out_buf + len, "\n"); | 129 | p += sprintf(p, "\n"); |
128 | } | 130 | } |
129 | 131 | return p - out_buf; | |
130 | return len; | ||
131 | } | 132 | } |
132 | 133 | ||
134 | /** | ||
135 | * zfcp_hba_dbf_event_fsf_response - trace event for request completion | ||
136 | * @fsf_req: request that has been completed | ||
137 | */ | ||
133 | void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | 138 | void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) |
134 | { | 139 | { |
135 | struct zfcp_adapter *adapter = fsf_req->adapter; | 140 | struct zfcp_adapter *adapter = fsf_req->adapter; |
136 | struct fsf_qtcb *qtcb = fsf_req->qtcb; | 141 | struct fsf_qtcb *qtcb = fsf_req->qtcb; |
137 | union fsf_prot_status_qual *prot_status_qual = | 142 | union fsf_prot_status_qual *prot_status_qual = |
138 | &qtcb->prefix.prot_status_qual; | 143 | &qtcb->prefix.prot_status_qual; |
139 | union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual; | 144 | union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual; |
140 | struct scsi_cmnd *scsi_cmnd; | 145 | struct scsi_cmnd *scsi_cmnd; |
141 | struct zfcp_port *port; | 146 | struct zfcp_port *port; |
142 | struct zfcp_unit *unit; | 147 | struct zfcp_unit *unit; |
143 | struct zfcp_send_els *send_els; | 148 | struct zfcp_send_els *send_els; |
144 | struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; | 149 | struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; |
145 | struct zfcp_hba_dbf_record_response *response = &rec->type.response; | 150 | struct zfcp_hba_dbf_record_response *response = &rec->u.response; |
146 | int level; | 151 | int level; |
147 | unsigned long flags; | 152 | unsigned long flags; |
148 | 153 | ||
149 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); | 154 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); |
150 | memset(rec, 0, sizeof(struct zfcp_hba_dbf_record)); | 155 | memset(rec, 0, sizeof(*rec)); |
151 | strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); | 156 | strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); |
152 | 157 | ||
153 | if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && | 158 | if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && |
@@ -161,6 +166,9 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
161 | (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) { | 166 | (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) { |
162 | strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE); | 167 | strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE); |
163 | level = 4; | 168 | level = 4; |
169 | } else if (qtcb->header.log_length) { | ||
170 | strncpy(rec->tag2, "qtcb", ZFCP_DBF_TAG_SIZE); | ||
171 | level = 5; | ||
164 | } else { | 172 | } else { |
165 | strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE); | 173 | strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE); |
166 | level = 6; | 174 | level = 6; |
@@ -188,11 +196,9 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
188 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) | 196 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) |
189 | break; | 197 | break; |
190 | scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; | 198 | scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; |
191 | if (scsi_cmnd != NULL) { | 199 | if (scsi_cmnd) { |
192 | response->data.send_fcp.scsi_cmnd | 200 | response->u.fcp.cmnd = (unsigned long)scsi_cmnd; |
193 | = (unsigned long)scsi_cmnd; | 201 | response->u.fcp.serial = scsi_cmnd->serial_number; |
194 | response->data.send_fcp.scsi_serial | ||
195 | = scsi_cmnd->serial_number; | ||
196 | } | 202 | } |
197 | break; | 203 | break; |
198 | 204 | ||
@@ -200,25 +206,25 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
200 | case FSF_QTCB_CLOSE_PORT: | 206 | case FSF_QTCB_CLOSE_PORT: |
201 | case FSF_QTCB_CLOSE_PHYSICAL_PORT: | 207 | case FSF_QTCB_CLOSE_PHYSICAL_PORT: |
202 | port = (struct zfcp_port *)fsf_req->data; | 208 | port = (struct zfcp_port *)fsf_req->data; |
203 | response->data.port.wwpn = port->wwpn; | 209 | response->u.port.wwpn = port->wwpn; |
204 | response->data.port.d_id = port->d_id; | 210 | response->u.port.d_id = port->d_id; |
205 | response->data.port.port_handle = qtcb->header.port_handle; | 211 | response->u.port.port_handle = qtcb->header.port_handle; |
206 | break; | 212 | break; |
207 | 213 | ||
208 | case FSF_QTCB_OPEN_LUN: | 214 | case FSF_QTCB_OPEN_LUN: |
209 | case FSF_QTCB_CLOSE_LUN: | 215 | case FSF_QTCB_CLOSE_LUN: |
210 | unit = (struct zfcp_unit *)fsf_req->data; | 216 | unit = (struct zfcp_unit *)fsf_req->data; |
211 | port = unit->port; | 217 | port = unit->port; |
212 | response->data.unit.wwpn = port->wwpn; | 218 | response->u.unit.wwpn = port->wwpn; |
213 | response->data.unit.fcp_lun = unit->fcp_lun; | 219 | response->u.unit.fcp_lun = unit->fcp_lun; |
214 | response->data.unit.port_handle = qtcb->header.port_handle; | 220 | response->u.unit.port_handle = qtcb->header.port_handle; |
215 | response->data.unit.lun_handle = qtcb->header.lun_handle; | 221 | response->u.unit.lun_handle = qtcb->header.lun_handle; |
216 | break; | 222 | break; |
217 | 223 | ||
218 | case FSF_QTCB_SEND_ELS: | 224 | case FSF_QTCB_SEND_ELS: |
219 | send_els = (struct zfcp_send_els *)fsf_req->data; | 225 | send_els = (struct zfcp_send_els *)fsf_req->data; |
220 | response->data.send_els.d_id = qtcb->bottom.support.d_id; | 226 | response->u.els.d_id = qtcb->bottom.support.d_id; |
221 | response->data.send_els.ls_code = send_els->ls_code >> 24; | 227 | response->u.els.ls_code = send_els->ls_code >> 24; |
222 | break; | 228 | break; |
223 | 229 | ||
224 | case FSF_QTCB_ABORT_FCP_CMND: | 230 | case FSF_QTCB_ABORT_FCP_CMND: |
@@ -230,39 +236,54 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
230 | break; | 236 | break; |
231 | } | 237 | } |
232 | 238 | ||
233 | debug_event(adapter->hba_dbf, level, | 239 | debug_event(adapter->hba_dbf, level, rec, sizeof(*rec)); |
234 | rec, sizeof(struct zfcp_hba_dbf_record)); | 240 | |
241 | /* have fcp channel microcode fixed to use as little as possible */ | ||
242 | if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { | ||
243 | /* adjust length skipping trailing zeros */ | ||
244 | char *buf = (char *)qtcb + qtcb->header.log_start; | ||
245 | int len = qtcb->header.log_length; | ||
246 | for (; len && !buf[len - 1]; len--); | ||
247 | zfcp_dbf_hexdump(adapter->hba_dbf, rec, sizeof(*rec), level, | ||
248 | buf, len); | ||
249 | } | ||
250 | |||
235 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 251 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
236 | } | 252 | } |
237 | 253 | ||
238 | void | 254 | /** |
239 | zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | 255 | * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer |
240 | struct fsf_status_read_buffer *status_buffer) | 256 | * @tag: tag indicating which kind of unsolicited status has been received |
257 | * @adapter: adapter that has issued the unsolicited status buffer | ||
258 | * @status_buffer: buffer containing payload of unsolicited status | ||
259 | */ | ||
260 | void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | ||
261 | struct fsf_status_read_buffer *status_buffer) | ||
241 | { | 262 | { |
242 | struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; | 263 | struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; |
243 | unsigned long flags; | 264 | unsigned long flags; |
244 | 265 | ||
245 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); | 266 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); |
246 | memset(rec, 0, sizeof(struct zfcp_hba_dbf_record)); | 267 | memset(rec, 0, sizeof(*rec)); |
247 | strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); | 268 | strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); |
248 | strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); | 269 | strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); |
249 | 270 | ||
250 | rec->type.status.failed = adapter->status_read_failed; | 271 | rec->u.status.failed = adapter->status_read_failed; |
251 | if (status_buffer != NULL) { | 272 | if (status_buffer != NULL) { |
252 | rec->type.status.status_type = status_buffer->status_type; | 273 | rec->u.status.status_type = status_buffer->status_type; |
253 | rec->type.status.status_subtype = status_buffer->status_subtype; | 274 | rec->u.status.status_subtype = status_buffer->status_subtype; |
254 | memcpy(&rec->type.status.queue_designator, | 275 | memcpy(&rec->u.status.queue_designator, |
255 | &status_buffer->queue_designator, | 276 | &status_buffer->queue_designator, |
256 | sizeof(struct fsf_queue_designator)); | 277 | sizeof(struct fsf_queue_designator)); |
257 | 278 | ||
258 | switch (status_buffer->status_type) { | 279 | switch (status_buffer->status_type) { |
259 | case FSF_STATUS_READ_SENSE_DATA_AVAIL: | 280 | case FSF_STATUS_READ_SENSE_DATA_AVAIL: |
260 | rec->type.status.payload_size = | 281 | rec->u.status.payload_size = |
261 | ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL; | 282 | ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL; |
262 | break; | 283 | break; |
263 | 284 | ||
264 | case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: | 285 | case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: |
265 | rec->type.status.payload_size = | 286 | rec->u.status.payload_size = |
266 | ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD; | 287 | ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD; |
267 | break; | 288 | break; |
268 | 289 | ||
@@ -270,119 +291,101 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | |||
270 | switch (status_buffer->status_subtype) { | 291 | switch (status_buffer->status_subtype) { |
271 | case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: | 292 | case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: |
272 | case FSF_STATUS_READ_SUB_FDISC_FAILED: | 293 | case FSF_STATUS_READ_SUB_FDISC_FAILED: |
273 | rec->type.status.payload_size = | 294 | rec->u.status.payload_size = |
274 | sizeof(struct fsf_link_down_info); | 295 | sizeof(struct fsf_link_down_info); |
275 | } | 296 | } |
276 | break; | 297 | break; |
277 | 298 | ||
278 | case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: | 299 | case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: |
279 | rec->type.status.payload_size = | 300 | rec->u.status.payload_size = |
280 | ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT; | 301 | ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT; |
281 | break; | 302 | break; |
282 | } | 303 | } |
283 | memcpy(&rec->type.status.payload, | 304 | memcpy(&rec->u.status.payload, |
284 | &status_buffer->payload, rec->type.status.payload_size); | 305 | &status_buffer->payload, rec->u.status.payload_size); |
285 | } | 306 | } |
286 | 307 | ||
287 | debug_event(adapter->hba_dbf, 2, | 308 | debug_event(adapter->hba_dbf, 2, rec, sizeof(*rec)); |
288 | rec, sizeof(struct zfcp_hba_dbf_record)); | ||
289 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 309 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
290 | } | 310 | } |
291 | 311 | ||
292 | void | 312 | /** |
293 | zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, | 313 | * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure |
294 | unsigned int qdio_error, unsigned int siga_error, | 314 | * @adapter: adapter affected by this QDIO related event |
295 | int sbal_index, int sbal_count) | 315 | * @status: as passed by qdio module |
316 | * @qdio_error: as passed by qdio module | ||
317 | * @siga_error: as passed by qdio module | ||
318 | * @sbal_index: first buffer with error condition, as passed by qdio module | ||
319 | * @sbal_count: number of buffers affected, as passed by qdio module | ||
320 | */ | ||
321 | void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, | ||
322 | unsigned int qdio_error, unsigned int siga_error, | ||
323 | int sbal_index, int sbal_count) | ||
296 | { | 324 | { |
297 | struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; | 325 | struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; |
298 | unsigned long flags; | 326 | unsigned long flags; |
299 | 327 | ||
300 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); | 328 | spin_lock_irqsave(&adapter->hba_dbf_lock, flags); |
301 | memset(rec, 0, sizeof(struct zfcp_hba_dbf_record)); | 329 | memset(r, 0, sizeof(*r)); |
302 | strncpy(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE); | 330 | strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); |
303 | rec->type.qdio.status = status; | 331 | r->u.qdio.status = status; |
304 | rec->type.qdio.qdio_error = qdio_error; | 332 | r->u.qdio.qdio_error = qdio_error; |
305 | rec->type.qdio.siga_error = siga_error; | 333 | r->u.qdio.siga_error = siga_error; |
306 | rec->type.qdio.sbal_index = sbal_index; | 334 | r->u.qdio.sbal_index = sbal_index; |
307 | rec->type.qdio.sbal_count = sbal_count; | 335 | r->u.qdio.sbal_count = sbal_count; |
308 | debug_event(adapter->hba_dbf, 0, | 336 | debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); |
309 | rec, sizeof(struct zfcp_hba_dbf_record)); | ||
310 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 337 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
311 | } | 338 | } |
312 | 339 | ||
313 | static int | 340 | static void zfcp_hba_dbf_view_response(char **p, |
314 | zfcp_hba_dbf_view_response(char *out_buf, | 341 | struct zfcp_hba_dbf_record_response *r) |
315 | struct zfcp_hba_dbf_record_response *rec) | 342 | { |
316 | { | 343 | struct timespec t; |
317 | int len = 0; | 344 | |
318 | 345 | zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command); | |
319 | len += zfcp_dbf_view(out_buf + len, "fsf_command", "0x%08x", | 346 | zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); |
320 | rec->fsf_command); | 347 | zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno); |
321 | len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", | 348 | zfcp_dbf_timestamp(r->fsf_issued, &t); |
322 | rec->fsf_reqid); | 349 | zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); |
323 | len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", | 350 | zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status); |
324 | rec->fsf_seqno); | 351 | zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status); |
325 | len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued); | 352 | zfcp_dbf_outd(p, "fsf_prot_status_qual", r->fsf_prot_status_qual, |
326 | len += zfcp_dbf_view(out_buf + len, "fsf_prot_status", "0x%08x", | 353 | FSF_PROT_STATUS_QUAL_SIZE, 0, FSF_PROT_STATUS_QUAL_SIZE); |
327 | rec->fsf_prot_status); | 354 | zfcp_dbf_outd(p, "fsf_status_qual", r->fsf_status_qual, |
328 | len += zfcp_dbf_view(out_buf + len, "fsf_status", "0x%08x", | 355 | FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE); |
329 | rec->fsf_status); | 356 | zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status); |
330 | len += zfcp_dbf_view_dump(out_buf + len, "fsf_prot_status_qual", | 357 | zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first); |
331 | rec->fsf_prot_status_qual, | 358 | zfcp_dbf_out(p, "sbal_curr", "0x%02x", r->sbal_curr); |
332 | FSF_PROT_STATUS_QUAL_SIZE, | 359 | zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last); |
333 | 0, FSF_PROT_STATUS_QUAL_SIZE); | 360 | zfcp_dbf_out(p, "pool", "0x%02x", r->pool); |
334 | len += zfcp_dbf_view_dump(out_buf + len, "fsf_status_qual", | 361 | |
335 | rec->fsf_status_qual, | 362 | switch (r->fsf_command) { |
336 | FSF_STATUS_QUALIFIER_SIZE, | ||
337 | 0, FSF_STATUS_QUALIFIER_SIZE); | ||
338 | len += zfcp_dbf_view(out_buf + len, "fsf_req_status", "0x%08x", | ||
339 | rec->fsf_req_status); | ||
340 | len += zfcp_dbf_view(out_buf + len, "sbal_first", "0x%02x", | ||
341 | rec->sbal_first); | ||
342 | len += zfcp_dbf_view(out_buf + len, "sbal_curr", "0x%02x", | ||
343 | rec->sbal_curr); | ||
344 | len += zfcp_dbf_view(out_buf + len, "sbal_last", "0x%02x", | ||
345 | rec->sbal_last); | ||
346 | len += zfcp_dbf_view(out_buf + len, "pool", "0x%02x", rec->pool); | ||
347 | |||
348 | switch (rec->fsf_command) { | ||
349 | case FSF_QTCB_FCP_CMND: | 363 | case FSF_QTCB_FCP_CMND: |
350 | if (rec->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) | 364 | if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) |
351 | break; | 365 | break; |
352 | len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx", | 366 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); |
353 | rec->data.send_fcp.scsi_cmnd); | 367 | zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); |
354 | len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx", | ||
355 | rec->data.send_fcp.scsi_serial); | ||
356 | break; | 368 | break; |
357 | 369 | ||
358 | case FSF_QTCB_OPEN_PORT_WITH_DID: | 370 | case FSF_QTCB_OPEN_PORT_WITH_DID: |
359 | case FSF_QTCB_CLOSE_PORT: | 371 | case FSF_QTCB_CLOSE_PORT: |
360 | case FSF_QTCB_CLOSE_PHYSICAL_PORT: | 372 | case FSF_QTCB_CLOSE_PHYSICAL_PORT: |
361 | len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx", | 373 | zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.port.wwpn); |
362 | rec->data.port.wwpn); | 374 | zfcp_dbf_out(p, "d_id", "0x%06x", r->u.port.d_id); |
363 | len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", | 375 | zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.port.port_handle); |
364 | rec->data.port.d_id); | ||
365 | len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x", | ||
366 | rec->data.port.port_handle); | ||
367 | break; | 376 | break; |
368 | 377 | ||
369 | case FSF_QTCB_OPEN_LUN: | 378 | case FSF_QTCB_OPEN_LUN: |
370 | case FSF_QTCB_CLOSE_LUN: | 379 | case FSF_QTCB_CLOSE_LUN: |
371 | len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx", | 380 | zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.unit.wwpn); |
372 | rec->data.unit.wwpn); | 381 | zfcp_dbf_out(p, "fcp_lun", "0x%016Lx", r->u.unit.fcp_lun); |
373 | len += zfcp_dbf_view(out_buf + len, "fcp_lun", "0x%016Lx", | 382 | zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.unit.port_handle); |
374 | rec->data.unit.fcp_lun); | 383 | zfcp_dbf_out(p, "lun_handle", "0x%08x", r->u.unit.lun_handle); |
375 | len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x", | ||
376 | rec->data.unit.port_handle); | ||
377 | len += zfcp_dbf_view(out_buf + len, "lun_handle", "0x%08x", | ||
378 | rec->data.unit.lun_handle); | ||
379 | break; | 384 | break; |
380 | 385 | ||
381 | case FSF_QTCB_SEND_ELS: | 386 | case FSF_QTCB_SEND_ELS: |
382 | len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", | 387 | zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id); |
383 | rec->data.send_els.d_id); | 388 | zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code); |
384 | len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x", | ||
385 | rec->data.send_els.ls_code); | ||
386 | break; | 389 | break; |
387 | 390 | ||
388 | case FSF_QTCB_ABORT_FCP_CMND: | 391 | case FSF_QTCB_ABORT_FCP_CMND: |
@@ -393,74 +396,52 @@ zfcp_hba_dbf_view_response(char *out_buf, | |||
393 | case FSF_QTCB_UPLOAD_CONTROL_FILE: | 396 | case FSF_QTCB_UPLOAD_CONTROL_FILE: |
394 | break; | 397 | break; |
395 | } | 398 | } |
396 | |||
397 | return len; | ||
398 | } | 399 | } |
399 | 400 | ||
400 | static int | 401 | static void zfcp_hba_dbf_view_status(char **p, |
401 | zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) | 402 | struct zfcp_hba_dbf_record_status *r) |
402 | { | 403 | { |
403 | int len = 0; | 404 | zfcp_dbf_out(p, "failed", "0x%02x", r->failed); |
404 | 405 | zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type); | |
405 | len += zfcp_dbf_view(out_buf + len, "failed", "0x%02x", rec->failed); | 406 | zfcp_dbf_out(p, "status_subtype", "0x%08x", r->status_subtype); |
406 | len += zfcp_dbf_view(out_buf + len, "status_type", "0x%08x", | 407 | zfcp_dbf_outd(p, "queue_designator", (char *)&r->queue_designator, |
407 | rec->status_type); | 408 | sizeof(struct fsf_queue_designator), 0, |
408 | len += zfcp_dbf_view(out_buf + len, "status_subtype", "0x%08x", | 409 | sizeof(struct fsf_queue_designator)); |
409 | rec->status_subtype); | 410 | zfcp_dbf_outd(p, "payload", (char *)&r->payload, r->payload_size, 0, |
410 | len += zfcp_dbf_view_dump(out_buf + len, "queue_designator", | 411 | r->payload_size); |
411 | (char *)&rec->queue_designator, | ||
412 | sizeof(struct fsf_queue_designator), | ||
413 | 0, sizeof(struct fsf_queue_designator)); | ||
414 | len += zfcp_dbf_view_dump(out_buf + len, "payload", | ||
415 | (char *)&rec->payload, | ||
416 | rec->payload_size, 0, rec->payload_size); | ||
417 | |||
418 | return len; | ||
419 | } | 412 | } |
420 | 413 | ||
421 | static int | 414 | static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) |
422 | zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) | ||
423 | { | 415 | { |
424 | int len = 0; | 416 | zfcp_dbf_out(p, "status", "0x%08x", r->status); |
425 | 417 | zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); | |
426 | len += zfcp_dbf_view(out_buf + len, "status", "0x%08x", rec->status); | 418 | zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error); |
427 | len += zfcp_dbf_view(out_buf + len, "qdio_error", "0x%08x", | 419 | zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); |
428 | rec->qdio_error); | 420 | zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); |
429 | len += zfcp_dbf_view(out_buf + len, "siga_error", "0x%08x", | ||
430 | rec->siga_error); | ||
431 | len += zfcp_dbf_view(out_buf + len, "sbal_index", "0x%02x", | ||
432 | rec->sbal_index); | ||
433 | len += zfcp_dbf_view(out_buf + len, "sbal_count", "0x%02x", | ||
434 | rec->sbal_count); | ||
435 | |||
436 | return len; | ||
437 | } | 421 | } |
438 | 422 | ||
439 | static int | 423 | static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, |
440 | zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view, | 424 | char *out_buf, const char *in_buf) |
441 | char *out_buf, const char *in_buf) | ||
442 | { | 425 | { |
443 | struct zfcp_hba_dbf_record *rec = (struct zfcp_hba_dbf_record *)in_buf; | 426 | struct zfcp_hba_dbf_record *r = (struct zfcp_hba_dbf_record *)in_buf; |
444 | int len = 0; | 427 | char *p = out_buf; |
445 | 428 | ||
446 | if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) | 429 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) |
447 | return 0; | 430 | return 0; |
448 | 431 | ||
449 | len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag); | 432 | zfcp_dbf_tag(&p, "tag", r->tag); |
450 | if (isalpha(rec->tag2[0])) | 433 | if (isalpha(r->tag2[0])) |
451 | len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2); | 434 | zfcp_dbf_tag(&p, "tag2", r->tag2); |
452 | if (strncmp(rec->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) | ||
453 | len += zfcp_hba_dbf_view_response(out_buf + len, | ||
454 | &rec->type.response); | ||
455 | else if (strncmp(rec->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) | ||
456 | len += zfcp_hba_dbf_view_status(out_buf + len, | ||
457 | &rec->type.status); | ||
458 | else if (strncmp(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) | ||
459 | len += zfcp_hba_dbf_view_qdio(out_buf + len, &rec->type.qdio); | ||
460 | 435 | ||
461 | len += sprintf(out_buf + len, "\n"); | 436 | if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) |
437 | zfcp_hba_dbf_view_response(&p, &r->u.response); | ||
438 | else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) | ||
439 | zfcp_hba_dbf_view_status(&p, &r->u.status); | ||
440 | else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) | ||
441 | zfcp_hba_dbf_view_qdio(&p, &r->u.qdio); | ||
462 | 442 | ||
463 | return len; | 443 | p += sprintf(p, "\n"); |
444 | return p - out_buf; | ||
464 | } | 445 | } |
465 | 446 | ||
466 | static struct debug_view zfcp_hba_dbf_view = { | 447 | static struct debug_view zfcp_hba_dbf_view = { |
@@ -472,219 +453,570 @@ static struct debug_view zfcp_hba_dbf_view = { | |||
472 | NULL | 453 | NULL |
473 | }; | 454 | }; |
474 | 455 | ||
475 | static void | 456 | static const char *zfcp_rec_dbf_tags[] = { |
476 | _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, | 457 | [ZFCP_REC_DBF_ID_THREAD] = "thread", |
477 | u32 s_id, u32 d_id, void *buffer, int buflen) | 458 | [ZFCP_REC_DBF_ID_TARGET] = "target", |
459 | [ZFCP_REC_DBF_ID_TRIGGER] = "trigger", | ||
460 | [ZFCP_REC_DBF_ID_ACTION] = "action", | ||
461 | }; | ||
462 | |||
463 | static const char *zfcp_rec_dbf_ids[] = { | ||
464 | [1] = "new", | ||
465 | [2] = "ready", | ||
466 | [3] = "kill", | ||
467 | [4] = "down sleep", | ||
468 | [5] = "down wakeup", | ||
469 | [6] = "down sleep ecd", | ||
470 | [7] = "down wakeup ecd", | ||
471 | [8] = "down sleep epd", | ||
472 | [9] = "down wakeup epd", | ||
473 | [10] = "online", | ||
474 | [11] = "operational", | ||
475 | [12] = "scsi slave destroy", | ||
476 | [13] = "propagate failed adapter", | ||
477 | [14] = "propagate failed port", | ||
478 | [15] = "block adapter", | ||
479 | [16] = "unblock adapter", | ||
480 | [17] = "block port", | ||
481 | [18] = "unblock port", | ||
482 | [19] = "block unit", | ||
483 | [20] = "unblock unit", | ||
484 | [21] = "unit recovery failed", | ||
485 | [22] = "port recovery failed", | ||
486 | [23] = "adapter recovery failed", | ||
487 | [24] = "qdio queues down", | ||
488 | [25] = "p2p failed", | ||
489 | [26] = "nameserver lookup failed", | ||
490 | [27] = "nameserver port failed", | ||
491 | [28] = "link up", | ||
492 | [29] = "link down", | ||
493 | [30] = "link up status read", | ||
494 | [31] = "open port failed", | ||
495 | [32] = "open port failed", | ||
496 | [33] = "close port", | ||
497 | [34] = "open unit failed", | ||
498 | [35] = "exclusive open unit failed", | ||
499 | [36] = "shared open unit failed", | ||
500 | [37] = "link down", | ||
501 | [38] = "link down status read no link", | ||
502 | [39] = "link down status read fdisc login", | ||
503 | [40] = "link down status read firmware update", | ||
504 | [41] = "link down status read unknown reason", | ||
505 | [42] = "link down ecd incomplete", | ||
506 | [43] = "link down epd incomplete", | ||
507 | [44] = "sysfs adapter recovery", | ||
508 | [45] = "sysfs port recovery", | ||
509 | [46] = "sysfs unit recovery", | ||
510 | [47] = "port boxed abort", | ||
511 | [48] = "unit boxed abort", | ||
512 | [49] = "port boxed ct", | ||
513 | [50] = "port boxed close physical", | ||
514 | [51] = "port boxed open unit", | ||
515 | [52] = "port boxed close unit", | ||
516 | [53] = "port boxed fcp", | ||
517 | [54] = "unit boxed fcp", | ||
518 | [55] = "port access denied ct", | ||
519 | [56] = "port access denied els", | ||
520 | [57] = "port access denied open port", | ||
521 | [58] = "port access denied close physical", | ||
522 | [59] = "unit access denied open unit", | ||
523 | [60] = "shared unit access denied open unit", | ||
524 | [61] = "unit access denied fcp", | ||
525 | [62] = "request timeout", | ||
526 | [63] = "adisc link test reject or timeout", | ||
527 | [64] = "adisc link test d_id changed", | ||
528 | [65] = "adisc link test failed", | ||
529 | [66] = "recovery out of memory", | ||
530 | [67] = "adapter recovery repeated after state change", | ||
531 | [68] = "port recovery repeated after state change", | ||
532 | [69] = "unit recovery repeated after state change", | ||
533 | [70] = "port recovery follow-up after successful adapter recovery", | ||
534 | [71] = "adapter recovery escalation after failed adapter recovery", | ||
535 | [72] = "port recovery follow-up after successful physical port " | ||
536 | "recovery", | ||
537 | [73] = "adapter recovery escalation after failed physical port " | ||
538 | "recovery", | ||
539 | [74] = "unit recovery follow-up after successful port recovery", | ||
540 | [75] = "physical port recovery escalation after failed port " | ||
541 | "recovery", | ||
542 | [76] = "port recovery escalation after failed unit recovery", | ||
543 | [77] = "recovery opening nameserver port", | ||
544 | [78] = "duplicate request id", | ||
545 | [79] = "link down", | ||
546 | [80] = "exclusive read-only unit access unsupported", | ||
547 | [81] = "shared read-write unit access unsupported", | ||
548 | [82] = "incoming rscn", | ||
549 | [83] = "incoming plogi", | ||
550 | [84] = "incoming logo", | ||
551 | [85] = "online", | ||
552 | [86] = "offline", | ||
553 | [87] = "ccw device gone", | ||
554 | [88] = "ccw device no path", | ||
555 | [89] = "ccw device operational", | ||
556 | [90] = "ccw device shutdown", | ||
557 | [91] = "sysfs port addition", | ||
558 | [92] = "sysfs port removal", | ||
559 | [93] = "sysfs adapter recovery", | ||
560 | [94] = "sysfs unit addition", | ||
561 | [95] = "sysfs unit removal", | ||
562 | [96] = "sysfs port recovery", | ||
563 | [97] = "sysfs unit recovery", | ||
564 | [98] = "sequence number mismatch", | ||
565 | [99] = "link up", | ||
566 | [100] = "error state", | ||
567 | [101] = "status read physical port closed", | ||
568 | [102] = "link up status read", | ||
569 | [103] = "too many failed status read buffers", | ||
570 | [104] = "port handle not valid abort", | ||
571 | [105] = "lun handle not valid abort", | ||
572 | [106] = "port handle not valid ct", | ||
573 | [107] = "port handle not valid close port", | ||
574 | [108] = "port handle not valid close physical port", | ||
575 | [109] = "port handle not valid open unit", | ||
576 | [110] = "port handle not valid close unit", | ||
577 | [111] = "lun handle not valid close unit", | ||
578 | [112] = "port handle not valid fcp", | ||
579 | [113] = "lun handle not valid fcp", | ||
580 | [114] = "handle mismatch fcp", | ||
581 | [115] = "lun not valid fcp", | ||
582 | [116] = "qdio send failed", | ||
583 | [117] = "version mismatch", | ||
584 | [118] = "incompatible qtcb type", | ||
585 | [119] = "unknown protocol status", | ||
586 | [120] = "unknown fsf command", | ||
587 | [121] = "no recommendation for status qualifier", | ||
588 | [122] = "status read physical port closed in error", | ||
589 | [123] = "fc service class not supported ct", | ||
590 | [124] = "fc service class not supported els", | ||
591 | [125] = "need newer zfcp", | ||
592 | [126] = "need newer microcode", | ||
593 | [127] = "arbitrated loop not supported", | ||
594 | [128] = "unknown topology", | ||
595 | [129] = "qtcb size mismatch", | ||
596 | [130] = "unknown fsf status ecd", | ||
597 | [131] = "fcp request too big", | ||
598 | [132] = "fc service class not supported fcp", | ||
599 | [133] = "data direction not valid fcp", | ||
600 | [134] = "command length not valid fcp", | ||
601 | [135] = "status read act update", | ||
602 | [136] = "status read cfdc update", | ||
603 | [137] = "hbaapi port open", | ||
604 | [138] = "hbaapi unit open", | ||
605 | [139] = "hbaapi unit shutdown", | ||
606 | [140] = "qdio error", | ||
607 | [141] = "scsi host reset", | ||
608 | [142] = "dismissing fsf request for recovery action", | ||
609 | [143] = "recovery action timed out", | ||
610 | [144] = "recovery action gone", | ||
611 | [145] = "recovery action being processed", | ||
612 | [146] = "recovery action ready for next step", | ||
613 | }; | ||
614 | |||
615 | static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, | ||
616 | char *buf, const char *_rec) | ||
617 | { | ||
618 | struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec; | ||
619 | char *p = buf; | ||
620 | |||
621 | zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]); | ||
622 | zfcp_dbf_outs(&p, "hint", zfcp_rec_dbf_ids[r->id2]); | ||
623 | zfcp_dbf_out(&p, "id", "%d", r->id2); | ||
624 | switch (r->id) { | ||
625 | case ZFCP_REC_DBF_ID_THREAD: | ||
626 | zfcp_dbf_out(&p, "total", "%d", r->u.thread.total); | ||
627 | zfcp_dbf_out(&p, "ready", "%d", r->u.thread.ready); | ||
628 | zfcp_dbf_out(&p, "running", "%d", r->u.thread.running); | ||
629 | break; | ||
630 | case ZFCP_REC_DBF_ID_TARGET: | ||
631 | zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.target.ref); | ||
632 | zfcp_dbf_out(&p, "status", "0x%08x", r->u.target.status); | ||
633 | zfcp_dbf_out(&p, "erp_count", "%d", r->u.target.erp_count); | ||
634 | zfcp_dbf_out(&p, "d_id", "0x%06x", r->u.target.d_id); | ||
635 | zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.target.wwpn); | ||
636 | zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.target.fcp_lun); | ||
637 | break; | ||
638 | case ZFCP_REC_DBF_ID_TRIGGER: | ||
639 | zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.trigger.ref); | ||
640 | zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.trigger.action); | ||
641 | zfcp_dbf_out(&p, "requested", "%d", r->u.trigger.want); | ||
642 | zfcp_dbf_out(&p, "executed", "%d", r->u.trigger.need); | ||
643 | zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.trigger.wwpn); | ||
644 | zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun); | ||
645 | zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as); | ||
646 | zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps); | ||
647 | zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us); | ||
648 | break; | ||
649 | case ZFCP_REC_DBF_ID_ACTION: | ||
650 | zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action); | ||
651 | zfcp_dbf_out(&p, "fsf_req", "0x%016Lx", r->u.action.fsf_req); | ||
652 | zfcp_dbf_out(&p, "status", "0x%08Lx", r->u.action.status); | ||
653 | zfcp_dbf_out(&p, "step", "0x%08Lx", r->u.action.step); | ||
654 | break; | ||
655 | } | ||
656 | p += sprintf(p, "\n"); | ||
657 | return p - buf; | ||
658 | } | ||
659 | |||
660 | static struct debug_view zfcp_rec_dbf_view = { | ||
661 | "structured", | ||
662 | NULL, | ||
663 | &zfcp_dbf_view_header, | ||
664 | &zfcp_rec_dbf_view_format, | ||
665 | NULL, | ||
666 | NULL | ||
667 | }; | ||
668 | |||
669 | /** | ||
670 | * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation | ||
671 | * @id2: identifier for event | ||
672 | * @adapter: adapter | ||
673 | * @lock: non-zero value indicates that erp_lock has not yet been acquired | ||
674 | */ | ||
675 | void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock) | ||
676 | { | ||
677 | struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; | ||
678 | unsigned long flags = 0; | ||
679 | struct list_head *entry; | ||
680 | unsigned ready = 0, running = 0, total; | ||
681 | |||
682 | if (lock) | ||
683 | read_lock_irqsave(&adapter->erp_lock, flags); | ||
684 | list_for_each(entry, &adapter->erp_ready_head) | ||
685 | ready++; | ||
686 | list_for_each(entry, &adapter->erp_running_head) | ||
687 | running++; | ||
688 | total = adapter->erp_total_count; | ||
689 | if (lock) | ||
690 | read_unlock_irqrestore(&adapter->erp_lock, flags); | ||
691 | |||
692 | spin_lock_irqsave(&adapter->rec_dbf_lock, flags); | ||
693 | memset(r, 0, sizeof(*r)); | ||
694 | r->id = ZFCP_REC_DBF_ID_THREAD; | ||
695 | r->id2 = id2; | ||
696 | r->u.thread.total = total; | ||
697 | r->u.thread.ready = ready; | ||
698 | r->u.thread.running = running; | ||
699 | debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); | ||
700 | spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); | ||
701 | } | ||
702 | |||
703 | static void zfcp_rec_dbf_event_target(u8 id2, void *ref, | ||
704 | struct zfcp_adapter *adapter, | ||
705 | atomic_t *status, atomic_t *erp_count, | ||
706 | u64 wwpn, u32 d_id, u64 fcp_lun) | ||
707 | { | ||
708 | struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; | ||
709 | unsigned long flags; | ||
710 | |||
711 | spin_lock_irqsave(&adapter->rec_dbf_lock, flags); | ||
712 | memset(r, 0, sizeof(*r)); | ||
713 | r->id = ZFCP_REC_DBF_ID_TARGET; | ||
714 | r->id2 = id2; | ||
715 | r->u.target.ref = (unsigned long)ref; | ||
716 | r->u.target.status = atomic_read(status); | ||
717 | r->u.target.wwpn = wwpn; | ||
718 | r->u.target.d_id = d_id; | ||
719 | r->u.target.fcp_lun = fcp_lun; | ||
720 | r->u.target.erp_count = atomic_read(erp_count); | ||
721 | debug_event(adapter->rec_dbf, 3, r, sizeof(*r)); | ||
722 | spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); | ||
723 | } | ||
724 | |||
725 | /** | ||
726 | * zfcp_rec_dbf_event_adapter - trace event for adapter state change | ||
727 | * @id: identifier for trigger of state change | ||
728 | * @ref: additional reference (e.g. request) | ||
729 | * @adapter: adapter | ||
730 | */ | ||
731 | void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *adapter) | ||
732 | { | ||
733 | zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status, | ||
734 | &adapter->erp_counter, 0, 0, 0); | ||
735 | } | ||
736 | |||
737 | /** | ||
738 | * zfcp_rec_dbf_event_port - trace event for port state change | ||
739 | * @id: identifier for trigger of state change | ||
740 | * @ref: additional reference (e.g. request) | ||
741 | * @port: port | ||
742 | */ | ||
743 | void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port) | ||
478 | { | 744 | { |
479 | struct zfcp_send_ct *send_ct = (struct zfcp_send_ct *)fsf_req->data; | ||
480 | struct zfcp_port *port = send_ct->port; | ||
481 | struct zfcp_adapter *adapter = port->adapter; | 745 | struct zfcp_adapter *adapter = port->adapter; |
482 | struct ct_hdr *header = (struct ct_hdr *)buffer; | 746 | |
483 | struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf; | 747 | zfcp_rec_dbf_event_target(id, ref, adapter, &port->status, |
484 | struct zfcp_san_dbf_record_ct *ct = &rec->type.ct; | 748 | &port->erp_counter, port->wwpn, port->d_id, |
749 | 0); | ||
750 | } | ||
751 | |||
752 | /** | ||
753 | * zfcp_rec_dbf_event_unit - trace event for unit state change | ||
754 | * @id: identifier for trigger of state change | ||
755 | * @ref: additional reference (e.g. request) | ||
756 | * @unit: unit | ||
757 | */ | ||
758 | void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit) | ||
759 | { | ||
760 | struct zfcp_port *port = unit->port; | ||
761 | struct zfcp_adapter *adapter = port->adapter; | ||
762 | |||
763 | zfcp_rec_dbf_event_target(id, ref, adapter, &unit->status, | ||
764 | &unit->erp_counter, port->wwpn, port->d_id, | ||
765 | unit->fcp_lun); | ||
766 | } | ||
767 | |||
768 | /** | ||
769 | * zfcp_rec_dbf_event_trigger - trace event for triggered error recovery | ||
770 | * @id2: identifier for error recovery trigger | ||
771 | * @ref: additional reference (e.g. request) | ||
772 | * @want: originally requested error recovery action | ||
773 | * @need: error recovery action actually initiated | ||
774 | * @action: address of error recovery action struct | ||
775 | * @adapter: adapter | ||
776 | * @port: port | ||
777 | * @unit: unit | ||
778 | */ | ||
779 | void zfcp_rec_dbf_event_trigger(u8 id2, void *ref, u8 want, u8 need, | ||
780 | void *action, struct zfcp_adapter *adapter, | ||
781 | struct zfcp_port *port, struct zfcp_unit *unit) | ||
782 | { | ||
783 | struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; | ||
485 | unsigned long flags; | 784 | unsigned long flags; |
486 | 785 | ||
487 | spin_lock_irqsave(&adapter->san_dbf_lock, flags); | 786 | spin_lock_irqsave(&adapter->rec_dbf_lock, flags); |
488 | memset(rec, 0, sizeof(struct zfcp_san_dbf_record)); | 787 | memset(r, 0, sizeof(*r)); |
489 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); | 788 | r->id = ZFCP_REC_DBF_ID_TRIGGER; |
490 | rec->fsf_reqid = (unsigned long)fsf_req; | 789 | r->id2 = id2; |
491 | rec->fsf_seqno = fsf_req->seq_no; | 790 | r->u.trigger.ref = (unsigned long)ref; |
492 | rec->s_id = s_id; | 791 | r->u.trigger.want = want; |
493 | rec->d_id = d_id; | 792 | r->u.trigger.need = need; |
494 | if (strncmp(tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { | 793 | r->u.trigger.action = (unsigned long)action; |
495 | ct->type.request.cmd_req_code = header->cmd_rsp_code; | 794 | r->u.trigger.as = atomic_read(&adapter->status); |
496 | ct->type.request.revision = header->revision; | 795 | if (port) { |
497 | ct->type.request.gs_type = header->gs_type; | 796 | r->u.trigger.ps = atomic_read(&port->status); |
498 | ct->type.request.gs_subtype = header->gs_subtype; | 797 | r->u.trigger.wwpn = port->wwpn; |
499 | ct->type.request.options = header->options; | ||
500 | ct->type.request.max_res_size = header->max_res_size; | ||
501 | } else if (strncmp(tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { | ||
502 | ct->type.response.cmd_rsp_code = header->cmd_rsp_code; | ||
503 | ct->type.response.revision = header->revision; | ||
504 | ct->type.response.reason_code = header->reason_code; | ||
505 | ct->type.response.reason_code_expl = header->reason_code_expl; | ||
506 | ct->type.response.vendor_unique = header->vendor_unique; | ||
507 | } | 798 | } |
508 | ct->payload_size = | 799 | if (unit) { |
509 | min(buflen - (int)sizeof(struct ct_hdr), ZFCP_DBF_CT_PAYLOAD); | 800 | r->u.trigger.us = atomic_read(&unit->status); |
510 | memcpy(ct->payload, buffer + sizeof(struct ct_hdr), ct->payload_size); | 801 | r->u.trigger.fcp_lun = unit->fcp_lun; |
511 | debug_event(adapter->san_dbf, 3, | 802 | } |
512 | rec, sizeof(struct zfcp_san_dbf_record)); | 803 | debug_event(adapter->rec_dbf, action ? 1 : 4, r, sizeof(*r)); |
513 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 804 | spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); |
514 | } | 805 | } |
515 | 806 | ||
807 | /** | ||
808 | * zfcp_rec_dbf_event_action - trace event showing progress of recovery action | ||
809 | * @id2: identifier | ||
810 | * @erp_action: error recovery action struct pointer | ||
811 | */ | ||
812 | void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action) | ||
813 | { | ||
814 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
815 | struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; | ||
816 | unsigned long flags; | ||
817 | |||
818 | spin_lock_irqsave(&adapter->rec_dbf_lock, flags); | ||
819 | memset(r, 0, sizeof(*r)); | ||
820 | r->id = ZFCP_REC_DBF_ID_ACTION; | ||
821 | r->id2 = id2; | ||
822 | r->u.action.action = (unsigned long)erp_action; | ||
823 | r->u.action.status = erp_action->status; | ||
824 | r->u.action.step = erp_action->step; | ||
825 | r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; | ||
826 | debug_event(adapter->rec_dbf, 4, r, sizeof(*r)); | ||
827 | spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); | ||
828 | } | ||
829 | |||
830 | /** | ||
831 | * zfcp_san_dbf_event_ct_request - trace event for issued CT request | ||
832 | * @fsf_req: request containing issued CT data | ||
833 | */ | ||
516 | void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) | 834 | void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) |
517 | { | 835 | { |
518 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; | 836 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; |
519 | struct zfcp_port *port = ct->port; | 837 | struct zfcp_port *port = ct->port; |
520 | struct zfcp_adapter *adapter = port->adapter; | 838 | struct zfcp_adapter *adapter = port->adapter; |
839 | struct ct_hdr *hdr = zfcp_sg_to_address(ct->req); | ||
840 | struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; | ||
841 | struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; | ||
842 | unsigned long flags; | ||
521 | 843 | ||
522 | _zfcp_san_dbf_event_common_ct("octc", fsf_req, | 844 | spin_lock_irqsave(&adapter->san_dbf_lock, flags); |
523 | fc_host_port_id(adapter->scsi_host), | 845 | memset(r, 0, sizeof(*r)); |
524 | port->d_id, zfcp_sg_to_address(ct->req), | 846 | strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); |
525 | ct->req->length); | 847 | r->fsf_reqid = (unsigned long)fsf_req; |
848 | r->fsf_seqno = fsf_req->seq_no; | ||
849 | r->s_id = fc_host_port_id(adapter->scsi_host); | ||
850 | r->d_id = port->d_id; | ||
851 | oct->cmd_req_code = hdr->cmd_rsp_code; | ||
852 | oct->revision = hdr->revision; | ||
853 | oct->gs_type = hdr->gs_type; | ||
854 | oct->gs_subtype = hdr->gs_subtype; | ||
855 | oct->options = hdr->options; | ||
856 | oct->max_res_size = hdr->max_res_size; | ||
857 | oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), | ||
858 | ZFCP_DBF_CT_PAYLOAD); | ||
859 | memcpy(oct->payload, (void *)hdr + sizeof(struct ct_hdr), oct->len); | ||
860 | debug_event(adapter->san_dbf, 3, r, sizeof(*r)); | ||
861 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | ||
526 | } | 862 | } |
527 | 863 | ||
864 | /** | ||
865 | * zfcp_san_dbf_event_ct_response - trace event for completion of CT request | ||
866 | * @fsf_req: request containing CT response | ||
867 | */ | ||
528 | void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) | 868 | void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) |
529 | { | 869 | { |
530 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; | 870 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; |
531 | struct zfcp_port *port = ct->port; | 871 | struct zfcp_port *port = ct->port; |
532 | struct zfcp_adapter *adapter = port->adapter; | 872 | struct zfcp_adapter *adapter = port->adapter; |
873 | struct ct_hdr *hdr = zfcp_sg_to_address(ct->resp); | ||
874 | struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; | ||
875 | struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; | ||
876 | unsigned long flags; | ||
533 | 877 | ||
534 | _zfcp_san_dbf_event_common_ct("rctc", fsf_req, port->d_id, | 878 | spin_lock_irqsave(&adapter->san_dbf_lock, flags); |
535 | fc_host_port_id(adapter->scsi_host), | 879 | memset(r, 0, sizeof(*r)); |
536 | zfcp_sg_to_address(ct->resp), | 880 | strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); |
537 | ct->resp->length); | 881 | r->fsf_reqid = (unsigned long)fsf_req; |
882 | r->fsf_seqno = fsf_req->seq_no; | ||
883 | r->s_id = port->d_id; | ||
884 | r->d_id = fc_host_port_id(adapter->scsi_host); | ||
885 | rct->cmd_rsp_code = hdr->cmd_rsp_code; | ||
886 | rct->revision = hdr->revision; | ||
887 | rct->reason_code = hdr->reason_code; | ||
888 | rct->expl = hdr->reason_code_expl; | ||
889 | rct->vendor_unique = hdr->vendor_unique; | ||
890 | rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), | ||
891 | ZFCP_DBF_CT_PAYLOAD); | ||
892 | memcpy(rct->payload, (void *)hdr + sizeof(struct ct_hdr), rct->len); | ||
893 | debug_event(adapter->san_dbf, 3, r, sizeof(*r)); | ||
894 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | ||
538 | } | 895 | } |
539 | 896 | ||
540 | static void | 897 | static void zfcp_san_dbf_event_els(const char *tag, int level, |
541 | _zfcp_san_dbf_event_common_els(const char *tag, int level, | 898 | struct zfcp_fsf_req *fsf_req, u32 s_id, |
542 | struct zfcp_fsf_req *fsf_req, u32 s_id, | 899 | u32 d_id, u8 ls_code, void *buffer, |
543 | u32 d_id, u8 ls_code, void *buffer, int buflen) | 900 | int buflen) |
544 | { | 901 | { |
545 | struct zfcp_adapter *adapter = fsf_req->adapter; | 902 | struct zfcp_adapter *adapter = fsf_req->adapter; |
546 | struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf; | 903 | struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf; |
547 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; | ||
548 | unsigned long flags; | 904 | unsigned long flags; |
549 | int offset = 0; | ||
550 | 905 | ||
551 | spin_lock_irqsave(&adapter->san_dbf_lock, flags); | 906 | spin_lock_irqsave(&adapter->san_dbf_lock, flags); |
552 | do { | 907 | memset(rec, 0, sizeof(*rec)); |
553 | memset(rec, 0, sizeof(struct zfcp_san_dbf_record)); | 908 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); |
554 | if (offset == 0) { | 909 | rec->fsf_reqid = (unsigned long)fsf_req; |
555 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); | 910 | rec->fsf_seqno = fsf_req->seq_no; |
556 | rec->fsf_reqid = (unsigned long)fsf_req; | 911 | rec->s_id = s_id; |
557 | rec->fsf_seqno = fsf_req->seq_no; | 912 | rec->d_id = d_id; |
558 | rec->s_id = s_id; | 913 | rec->u.els.ls_code = ls_code; |
559 | rec->d_id = d_id; | 914 | debug_event(adapter->san_dbf, level, rec, sizeof(*rec)); |
560 | rec->type.els.ls_code = ls_code; | 915 | zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level, |
561 | buflen = min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD); | 916 | buffer, min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD)); |
562 | rec->type.els.payload_size = buflen; | ||
563 | memcpy(rec->type.els.payload, | ||
564 | buffer, min(buflen, ZFCP_DBF_ELS_PAYLOAD)); | ||
565 | offset += min(buflen, ZFCP_DBF_ELS_PAYLOAD); | ||
566 | } else { | ||
567 | strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); | ||
568 | dump->total_size = buflen; | ||
569 | dump->offset = offset; | ||
570 | dump->size = min(buflen - offset, | ||
571 | (int)sizeof(struct zfcp_san_dbf_record) | ||
572 | - (int)sizeof(struct zfcp_dbf_dump)); | ||
573 | memcpy(dump->data, buffer + offset, dump->size); | ||
574 | offset += dump->size; | ||
575 | } | ||
576 | debug_event(adapter->san_dbf, level, | ||
577 | rec, sizeof(struct zfcp_san_dbf_record)); | ||
578 | } while (offset < buflen); | ||
579 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 917 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); |
580 | } | 918 | } |
581 | 919 | ||
920 | /** | ||
921 | * zfcp_san_dbf_event_els_request - trace event for issued ELS | ||
922 | * @fsf_req: request containing issued ELS | ||
923 | */ | ||
582 | void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) | 924 | void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) |
583 | { | 925 | { |
584 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; | 926 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; |
585 | 927 | ||
586 | _zfcp_san_dbf_event_common_els("oels", 2, fsf_req, | 928 | zfcp_san_dbf_event_els("oels", 2, fsf_req, |
587 | fc_host_port_id(els->adapter->scsi_host), | 929 | fc_host_port_id(els->adapter->scsi_host), |
588 | els->d_id, | 930 | els->d_id, *(u8 *) zfcp_sg_to_address(els->req), |
589 | *(u8 *) zfcp_sg_to_address(els->req), | 931 | zfcp_sg_to_address(els->req), els->req->length); |
590 | zfcp_sg_to_address(els->req), | ||
591 | els->req->length); | ||
592 | } | 932 | } |
593 | 933 | ||
934 | /** | ||
935 | * zfcp_san_dbf_event_els_response - trace event for completed ELS | ||
936 | * @fsf_req: request containing ELS response | ||
937 | */ | ||
594 | void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) | 938 | void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) |
595 | { | 939 | { |
596 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; | 940 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; |
597 | 941 | ||
598 | _zfcp_san_dbf_event_common_els("rels", 2, fsf_req, els->d_id, | 942 | zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id, |
599 | fc_host_port_id(els->adapter->scsi_host), | 943 | fc_host_port_id(els->adapter->scsi_host), |
600 | *(u8 *) zfcp_sg_to_address(els->req), | 944 | *(u8 *)zfcp_sg_to_address(els->req), |
601 | zfcp_sg_to_address(els->resp), | 945 | zfcp_sg_to_address(els->resp), |
602 | els->resp->length); | 946 | els->resp->length); |
603 | } | 947 | } |
604 | 948 | ||
949 | /** | ||
950 | * zfcp_san_dbf_event_incoming_els - trace event for incomig ELS | ||
951 | * @fsf_req: request containing unsolicited status buffer with incoming ELS | ||
952 | */ | ||
605 | void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) | 953 | void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) |
606 | { | 954 | { |
607 | struct zfcp_adapter *adapter = fsf_req->adapter; | 955 | struct zfcp_adapter *adapter = fsf_req->adapter; |
608 | struct fsf_status_read_buffer *status_buffer = | 956 | struct fsf_status_read_buffer *buf = |
609 | (struct fsf_status_read_buffer *)fsf_req->data; | 957 | (struct fsf_status_read_buffer *)fsf_req->data; |
610 | int length = (int)status_buffer->length - | 958 | int length = (int)buf->length - |
611 | (int)((void *)&status_buffer->payload - (void *)status_buffer); | 959 | (int)((void *)&buf->payload - (void *)buf); |
612 | 960 | ||
613 | _zfcp_san_dbf_event_common_els("iels", 1, fsf_req, status_buffer->d_id, | 961 | zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, |
614 | fc_host_port_id(adapter->scsi_host), | 962 | fc_host_port_id(adapter->scsi_host), |
615 | *(u8 *) status_buffer->payload, | 963 | *(u8 *)buf->payload, (void *)buf->payload, |
616 | (void *)status_buffer->payload, length); | 964 | length); |
617 | } | 965 | } |
618 | 966 | ||
619 | static int | 967 | static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, |
620 | zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view, | 968 | char *out_buf, const char *in_buf) |
621 | char *out_buf, const char *in_buf) | ||
622 | { | 969 | { |
623 | struct zfcp_san_dbf_record *rec = (struct zfcp_san_dbf_record *)in_buf; | 970 | struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf; |
624 | char *buffer = NULL; | 971 | char *buffer = NULL; |
625 | int buflen = 0, total = 0; | 972 | int buflen = 0, total = 0; |
626 | int len = 0; | 973 | char *p = out_buf; |
627 | 974 | ||
628 | if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) | 975 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) |
629 | return 0; | 976 | return 0; |
630 | 977 | ||
631 | len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag); | 978 | zfcp_dbf_tag(&p, "tag", r->tag); |
632 | len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", | 979 | zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); |
633 | rec->fsf_reqid); | 980 | zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); |
634 | len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", | 981 | zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id); |
635 | rec->fsf_seqno); | 982 | zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id); |
636 | len += zfcp_dbf_view(out_buf + len, "s_id", "0x%06x", rec->s_id); | 983 | |
637 | len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", rec->d_id); | 984 | if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { |
638 | 985 | struct zfcp_san_dbf_record_ct_request *ct = &r->u.ct_req; | |
639 | if (strncmp(rec->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { | 986 | zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); |
640 | len += zfcp_dbf_view(out_buf + len, "cmd_req_code", "0x%04x", | 987 | zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); |
641 | rec->type.ct.type.request.cmd_req_code); | 988 | zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); |
642 | len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x", | 989 | zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype); |
643 | rec->type.ct.type.request.revision); | 990 | zfcp_dbf_out(&p, "options", "0x%02x", ct->options); |
644 | len += zfcp_dbf_view(out_buf + len, "gs_type", "0x%02x", | 991 | zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); |
645 | rec->type.ct.type.request.gs_type); | 992 | total = ct->len; |
646 | len += zfcp_dbf_view(out_buf + len, "gs_subtype", "0x%02x", | 993 | buffer = ct->payload; |
647 | rec->type.ct.type.request.gs_subtype); | ||
648 | len += zfcp_dbf_view(out_buf + len, "options", "0x%02x", | ||
649 | rec->type.ct.type.request.options); | ||
650 | len += zfcp_dbf_view(out_buf + len, "max_res_size", "0x%04x", | ||
651 | rec->type.ct.type.request.max_res_size); | ||
652 | total = rec->type.ct.payload_size; | ||
653 | buffer = rec->type.ct.payload; | ||
654 | buflen = min(total, ZFCP_DBF_CT_PAYLOAD); | 994 | buflen = min(total, ZFCP_DBF_CT_PAYLOAD); |
655 | } else if (strncmp(rec->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { | 995 | } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { |
656 | len += zfcp_dbf_view(out_buf + len, "cmd_rsp_code", "0x%04x", | 996 | struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp; |
657 | rec->type.ct.type.response.cmd_rsp_code); | 997 | zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); |
658 | len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x", | 998 | zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); |
659 | rec->type.ct.type.response.revision); | 999 | zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); |
660 | len += zfcp_dbf_view(out_buf + len, "reason_code", "0x%02x", | 1000 | zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl); |
661 | rec->type.ct.type.response.reason_code); | 1001 | zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique); |
662 | len += | 1002 | total = ct->len; |
663 | zfcp_dbf_view(out_buf + len, "reason_code_expl", "0x%02x", | 1003 | buffer = ct->payload; |
664 | rec->type.ct.type.response.reason_code_expl); | ||
665 | len += | ||
666 | zfcp_dbf_view(out_buf + len, "vendor_unique", "0x%02x", | ||
667 | rec->type.ct.type.response.vendor_unique); | ||
668 | total = rec->type.ct.payload_size; | ||
669 | buffer = rec->type.ct.payload; | ||
670 | buflen = min(total, ZFCP_DBF_CT_PAYLOAD); | 1004 | buflen = min(total, ZFCP_DBF_CT_PAYLOAD); |
671 | } else if (strncmp(rec->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || | 1005 | } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || |
672 | strncmp(rec->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || | 1006 | strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || |
673 | strncmp(rec->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { | 1007 | strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { |
674 | len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x", | 1008 | struct zfcp_san_dbf_record_els *els = &r->u.els; |
675 | rec->type.els.ls_code); | 1009 | zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); |
676 | total = rec->type.els.payload_size; | 1010 | total = els->len; |
677 | buffer = rec->type.els.payload; | 1011 | buffer = els->payload; |
678 | buflen = min(total, ZFCP_DBF_ELS_PAYLOAD); | 1012 | buflen = min(total, ZFCP_DBF_ELS_PAYLOAD); |
679 | } | 1013 | } |
680 | 1014 | ||
681 | len += zfcp_dbf_view_dump(out_buf + len, "payload", | 1015 | zfcp_dbf_outd(&p, "payload", buffer, buflen, 0, total); |
682 | buffer, buflen, 0, total); | ||
683 | |||
684 | if (buflen == total) | 1016 | if (buflen == total) |
685 | len += sprintf(out_buf + len, "\n"); | 1017 | p += sprintf(p, "\n"); |
686 | 1018 | ||
687 | return len; | 1019 | return p - out_buf; |
688 | } | 1020 | } |
689 | 1021 | ||
690 | static struct debug_view zfcp_san_dbf_view = { | 1022 | static struct debug_view zfcp_san_dbf_view = { |
@@ -696,12 +1028,11 @@ static struct debug_view zfcp_san_dbf_view = { | |||
696 | NULL | 1028 | NULL |
697 | }; | 1029 | }; |
698 | 1030 | ||
699 | static void | 1031 | static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, |
700 | _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | 1032 | struct zfcp_adapter *adapter, |
701 | struct zfcp_adapter *adapter, | 1033 | struct scsi_cmnd *scsi_cmnd, |
702 | struct scsi_cmnd *scsi_cmnd, | 1034 | struct zfcp_fsf_req *fsf_req, |
703 | struct zfcp_fsf_req *fsf_req, | 1035 | unsigned long old_req_id) |
704 | unsigned long old_req_id) | ||
705 | { | 1036 | { |
706 | struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; | 1037 | struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; |
707 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; | 1038 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; |
@@ -712,7 +1043,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
712 | 1043 | ||
713 | spin_lock_irqsave(&adapter->scsi_dbf_lock, flags); | 1044 | spin_lock_irqsave(&adapter->scsi_dbf_lock, flags); |
714 | do { | 1045 | do { |
715 | memset(rec, 0, sizeof(struct zfcp_scsi_dbf_record)); | 1046 | memset(rec, 0, sizeof(*rec)); |
716 | if (offset == 0) { | 1047 | if (offset == 0) { |
717 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); | 1048 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); |
718 | strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); | 1049 | strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); |
@@ -738,20 +1069,16 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
738 | fcp_sns_info = | 1069 | fcp_sns_info = |
739 | zfcp_get_fcp_sns_info_ptr(fcp_rsp); | 1070 | zfcp_get_fcp_sns_info_ptr(fcp_rsp); |
740 | 1071 | ||
741 | rec->type.fcp.rsp_validity = | 1072 | rec->rsp_validity = fcp_rsp->validity.value; |
742 | fcp_rsp->validity.value; | 1073 | rec->rsp_scsi_status = fcp_rsp->scsi_status; |
743 | rec->type.fcp.rsp_scsi_status = | 1074 | rec->rsp_resid = fcp_rsp->fcp_resid; |
744 | fcp_rsp->scsi_status; | ||
745 | rec->type.fcp.rsp_resid = fcp_rsp->fcp_resid; | ||
746 | if (fcp_rsp->validity.bits.fcp_rsp_len_valid) | 1075 | if (fcp_rsp->validity.bits.fcp_rsp_len_valid) |
747 | rec->type.fcp.rsp_code = | 1076 | rec->rsp_code = *(fcp_rsp_info + 3); |
748 | *(fcp_rsp_info + 3); | ||
749 | if (fcp_rsp->validity.bits.fcp_sns_len_valid) { | 1077 | if (fcp_rsp->validity.bits.fcp_sns_len_valid) { |
750 | buflen = min((int)fcp_rsp->fcp_sns_len, | 1078 | buflen = min((int)fcp_rsp->fcp_sns_len, |
751 | ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); | 1079 | ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); |
752 | rec->type.fcp.sns_info_len = buflen; | 1080 | rec->sns_info_len = buflen; |
753 | memcpy(rec->type.fcp.sns_info, | 1081 | memcpy(rec->sns_info, fcp_sns_info, |
754 | fcp_sns_info, | ||
755 | min(buflen, | 1082 | min(buflen, |
756 | ZFCP_DBF_SCSI_FCP_SNS_INFO)); | 1083 | ZFCP_DBF_SCSI_FCP_SNS_INFO)); |
757 | offset += min(buflen, | 1084 | offset += min(buflen, |
@@ -762,7 +1089,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
762 | rec->fsf_seqno = fsf_req->seq_no; | 1089 | rec->fsf_seqno = fsf_req->seq_no; |
763 | rec->fsf_issued = fsf_req->issued; | 1090 | rec->fsf_issued = fsf_req->issued; |
764 | } | 1091 | } |
765 | rec->type.old_fsf_reqid = old_req_id; | 1092 | rec->old_fsf_reqid = old_req_id; |
766 | } else { | 1093 | } else { |
767 | strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); | 1094 | strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); |
768 | dump->total_size = buflen; | 1095 | dump->total_size = buflen; |
@@ -774,108 +1101,101 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
774 | memcpy(dump->data, fcp_sns_info + offset, dump->size); | 1101 | memcpy(dump->data, fcp_sns_info + offset, dump->size); |
775 | offset += dump->size; | 1102 | offset += dump->size; |
776 | } | 1103 | } |
777 | debug_event(adapter->scsi_dbf, level, | 1104 | debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec)); |
778 | rec, sizeof(struct zfcp_scsi_dbf_record)); | ||
779 | } while (offset < buflen); | 1105 | } while (offset < buflen); |
780 | spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); | 1106 | spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); |
781 | } | 1107 | } |
782 | 1108 | ||
783 | void | 1109 | /** |
784 | zfcp_scsi_dbf_event_result(const char *tag, int level, | 1110 | * zfcp_scsi_dbf_event_result - trace event for SCSI command completion |
785 | struct zfcp_adapter *adapter, | 1111 | * @tag: tag indicating success or failure of SCSI command |
786 | struct scsi_cmnd *scsi_cmnd, | 1112 | * @level: trace level applicable for this event |
787 | struct zfcp_fsf_req *fsf_req) | 1113 | * @adapter: adapter that has been used to issue the SCSI command |
1114 | * @scsi_cmnd: SCSI command pointer | ||
1115 | * @fsf_req: request used to issue SCSI command (might be NULL) | ||
1116 | */ | ||
1117 | void zfcp_scsi_dbf_event_result(const char *tag, int level, | ||
1118 | struct zfcp_adapter *adapter, | ||
1119 | struct scsi_cmnd *scsi_cmnd, | ||
1120 | struct zfcp_fsf_req *fsf_req) | ||
788 | { | 1121 | { |
789 | _zfcp_scsi_dbf_event_common("rslt", tag, level, | 1122 | zfcp_scsi_dbf_event("rslt", tag, level, adapter, scsi_cmnd, fsf_req, 0); |
790 | adapter, scsi_cmnd, fsf_req, 0); | ||
791 | } | 1123 | } |
792 | 1124 | ||
793 | void | 1125 | /** |
794 | zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, | 1126 | * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort |
795 | struct scsi_cmnd *scsi_cmnd, | 1127 | * @tag: tag indicating success or failure of abort operation |
796 | struct zfcp_fsf_req *new_fsf_req, | 1128 | * @adapter: adapter thas has been used to issue SCSI command to be aborted |
797 | unsigned long old_req_id) | 1129 | * @scsi_cmnd: SCSI command to be aborted |
1130 | * @new_fsf_req: request containing abort (might be NULL) | ||
1131 | * @old_req_id: identifier of request containg SCSI command to be aborted | ||
1132 | */ | ||
1133 | void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, | ||
1134 | struct scsi_cmnd *scsi_cmnd, | ||
1135 | struct zfcp_fsf_req *new_fsf_req, | ||
1136 | unsigned long old_req_id) | ||
798 | { | 1137 | { |
799 | _zfcp_scsi_dbf_event_common("abrt", tag, 1, | 1138 | zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scsi_cmnd, new_fsf_req, |
800 | adapter, scsi_cmnd, new_fsf_req, old_req_id); | 1139 | old_req_id); |
801 | } | 1140 | } |
802 | 1141 | ||
803 | void | 1142 | /** |
804 | zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, | 1143 | * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset |
805 | struct scsi_cmnd *scsi_cmnd) | 1144 | * @tag: tag indicating success or failure of reset operation |
1145 | * @flag: indicates type of reset (Target Reset, Logical Unit Reset) | ||
1146 | * @unit: unit that needs reset | ||
1147 | * @scsi_cmnd: SCSI command which caused this error recovery | ||
1148 | */ | ||
1149 | void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, | ||
1150 | struct zfcp_unit *unit, | ||
1151 | struct scsi_cmnd *scsi_cmnd) | ||
806 | { | 1152 | { |
807 | struct zfcp_adapter *adapter = unit->port->adapter; | 1153 | zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, |
808 | 1154 | unit->port->adapter, scsi_cmnd, NULL, 0); | |
809 | _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", | ||
810 | tag, 1, adapter, scsi_cmnd, NULL, 0); | ||
811 | } | 1155 | } |
812 | 1156 | ||
813 | static int | 1157 | static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, |
814 | zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, | 1158 | char *out_buf, const char *in_buf) |
815 | char *out_buf, const char *in_buf) | ||
816 | { | 1159 | { |
817 | struct zfcp_scsi_dbf_record *rec = | 1160 | struct zfcp_scsi_dbf_record *r = (struct zfcp_scsi_dbf_record *)in_buf; |
818 | (struct zfcp_scsi_dbf_record *)in_buf; | 1161 | struct timespec t; |
819 | int len = 0; | 1162 | char *p = out_buf; |
820 | 1163 | ||
821 | if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) | 1164 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) |
822 | return 0; | 1165 | return 0; |
823 | 1166 | ||
824 | len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag); | 1167 | zfcp_dbf_tag(&p, "tag", r->tag); |
825 | len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2); | 1168 | zfcp_dbf_tag(&p, "tag2", r->tag2); |
826 | len += zfcp_dbf_view(out_buf + len, "scsi_id", "0x%08x", rec->scsi_id); | 1169 | zfcp_dbf_out(&p, "scsi_id", "0x%08x", r->scsi_id); |
827 | len += zfcp_dbf_view(out_buf + len, "scsi_lun", "0x%08x", | 1170 | zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); |
828 | rec->scsi_lun); | 1171 | zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); |
829 | len += zfcp_dbf_view(out_buf + len, "scsi_result", "0x%08x", | 1172 | zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); |
830 | rec->scsi_result); | 1173 | zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial); |
831 | len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx", | 1174 | zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, |
832 | rec->scsi_cmnd); | 1175 | 0, ZFCP_DBF_SCSI_OPCODE); |
833 | len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx", | 1176 | zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); |
834 | rec->scsi_serial); | 1177 | zfcp_dbf_out(&p, "scsi_allowed", "0x%02x", r->scsi_allowed); |
835 | len += zfcp_dbf_view_dump(out_buf + len, "scsi_opcode", | 1178 | if (strncmp(r->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) |
836 | rec->scsi_opcode, | 1179 | zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); |
837 | ZFCP_DBF_SCSI_OPCODE, | 1180 | zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); |
838 | 0, ZFCP_DBF_SCSI_OPCODE); | 1181 | zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); |
839 | len += zfcp_dbf_view(out_buf + len, "scsi_retries", "0x%02x", | 1182 | zfcp_dbf_timestamp(r->fsf_issued, &t); |
840 | rec->scsi_retries); | 1183 | zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); |
841 | len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", | 1184 | |
842 | rec->scsi_allowed); | 1185 | if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { |
843 | if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) { | 1186 | zfcp_dbf_out(&p, "fcp_rsp_validity", "0x%02x", r->rsp_validity); |
844 | len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx", | 1187 | zfcp_dbf_out(&p, "fcp_rsp_scsi_status", "0x%02x", |
845 | rec->type.old_fsf_reqid); | 1188 | r->rsp_scsi_status); |
846 | } | 1189 | zfcp_dbf_out(&p, "fcp_rsp_resid", "0x%08x", r->rsp_resid); |
847 | len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", | 1190 | zfcp_dbf_out(&p, "fcp_rsp_code", "0x%08x", r->rsp_code); |
848 | rec->fsf_reqid); | 1191 | zfcp_dbf_out(&p, "fcp_sns_info_len", "0x%08x", r->sns_info_len); |
849 | len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", | 1192 | zfcp_dbf_outd(&p, "fcp_sns_info", r->sns_info, |
850 | rec->fsf_seqno); | 1193 | min((int)r->sns_info_len, |
851 | len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued); | 1194 | ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, |
852 | if (strncmp(rec->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { | 1195 | r->sns_info_len); |
853 | len += | ||
854 | zfcp_dbf_view(out_buf + len, "fcp_rsp_validity", "0x%02x", | ||
855 | rec->type.fcp.rsp_validity); | ||
856 | len += | ||
857 | zfcp_dbf_view(out_buf + len, "fcp_rsp_scsi_status", | ||
858 | "0x%02x", rec->type.fcp.rsp_scsi_status); | ||
859 | len += | ||
860 | zfcp_dbf_view(out_buf + len, "fcp_rsp_resid", "0x%08x", | ||
861 | rec->type.fcp.rsp_resid); | ||
862 | len += | ||
863 | zfcp_dbf_view(out_buf + len, "fcp_rsp_code", "0x%08x", | ||
864 | rec->type.fcp.rsp_code); | ||
865 | len += | ||
866 | zfcp_dbf_view(out_buf + len, "fcp_sns_info_len", "0x%08x", | ||
867 | rec->type.fcp.sns_info_len); | ||
868 | len += | ||
869 | zfcp_dbf_view_dump(out_buf + len, "fcp_sns_info", | ||
870 | rec->type.fcp.sns_info, | ||
871 | min((int)rec->type.fcp.sns_info_len, | ||
872 | ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, | ||
873 | rec->type.fcp.sns_info_len); | ||
874 | } | 1196 | } |
875 | 1197 | p += sprintf(p, "\n"); | |
876 | len += sprintf(out_buf + len, "\n"); | 1198 | return p - out_buf; |
877 | |||
878 | return len; | ||
879 | } | 1199 | } |
880 | 1200 | ||
881 | static struct debug_view zfcp_scsi_dbf_view = { | 1201 | static struct debug_view zfcp_scsi_dbf_view = { |
@@ -897,13 +1217,14 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) | |||
897 | char dbf_name[DEBUG_MAX_NAME_LEN]; | 1217 | char dbf_name[DEBUG_MAX_NAME_LEN]; |
898 | 1218 | ||
899 | /* debug feature area which records recovery activity */ | 1219 | /* debug feature area which records recovery activity */ |
900 | sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter)); | 1220 | sprintf(dbf_name, "zfcp_%s_rec", zfcp_get_busid_by_adapter(adapter)); |
901 | adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2, | 1221 | adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1, |
902 | sizeof(struct zfcp_erp_dbf_record)); | 1222 | sizeof(struct zfcp_rec_dbf_record)); |
903 | if (!adapter->erp_dbf) | 1223 | if (!adapter->rec_dbf) |
904 | goto failed; | 1224 | goto failed; |
905 | debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view); | 1225 | debug_register_view(adapter->rec_dbf, &debug_hex_ascii_view); |
906 | debug_set_level(adapter->erp_dbf, 3); | 1226 | debug_register_view(adapter->rec_dbf, &zfcp_rec_dbf_view); |
1227 | debug_set_level(adapter->rec_dbf, 3); | ||
907 | 1228 | ||
908 | /* debug feature area which records HBA (FSF and QDIO) conditions */ | 1229 | /* debug feature area which records HBA (FSF and QDIO) conditions */ |
909 | sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter)); | 1230 | sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter)); |
@@ -952,11 +1273,11 @@ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter) | |||
952 | debug_unregister(adapter->scsi_dbf); | 1273 | debug_unregister(adapter->scsi_dbf); |
953 | debug_unregister(adapter->san_dbf); | 1274 | debug_unregister(adapter->san_dbf); |
954 | debug_unregister(adapter->hba_dbf); | 1275 | debug_unregister(adapter->hba_dbf); |
955 | debug_unregister(adapter->erp_dbf); | 1276 | debug_unregister(adapter->rec_dbf); |
956 | adapter->scsi_dbf = NULL; | 1277 | adapter->scsi_dbf = NULL; |
957 | adapter->san_dbf = NULL; | 1278 | adapter->san_dbf = NULL; |
958 | adapter->hba_dbf = NULL; | 1279 | adapter->hba_dbf = NULL; |
959 | adapter->erp_dbf = NULL; | 1280 | adapter->rec_dbf = NULL; |
960 | } | 1281 | } |
961 | 1282 | ||
962 | #undef ZFCP_LOG_AREA | 1283 | #undef ZFCP_LOG_AREA |
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h new file mode 100644 index 000000000000..54c34e483457 --- /dev/null +++ b/drivers/s390/scsi/zfcp_dbf.h | |||
@@ -0,0 +1,228 @@ | |||
1 | /* | ||
2 | * This file is part of the zfcp device driver for | ||
3 | * FCP adapters for IBM System z9 and zSeries. | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008, 2008 | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | ||
21 | |||
22 | #ifndef ZFCP_DBF_H | ||
23 | #define ZFCP_DBF_H | ||
24 | |||
25 | #include "zfcp_fsf.h" | ||
26 | |||
27 | #define ZFCP_DBF_TAG_SIZE 4 | ||
28 | |||
29 | struct zfcp_dbf_dump { | ||
30 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
31 | u32 total_size; /* size of total dump data */ | ||
32 | u32 offset; /* how much data has being already dumped */ | ||
33 | u32 size; /* how much data comes with this record */ | ||
34 | u8 data[]; /* dump data */ | ||
35 | } __attribute__ ((packed)); | ||
36 | |||
37 | struct zfcp_rec_dbf_record_thread { | ||
38 | u32 total; | ||
39 | u32 ready; | ||
40 | u32 running; | ||
41 | } __attribute__ ((packed)); | ||
42 | |||
43 | struct zfcp_rec_dbf_record_target { | ||
44 | u64 ref; | ||
45 | u32 status; | ||
46 | u32 d_id; | ||
47 | u64 wwpn; | ||
48 | u64 fcp_lun; | ||
49 | u32 erp_count; | ||
50 | } __attribute__ ((packed)); | ||
51 | |||
52 | struct zfcp_rec_dbf_record_trigger { | ||
53 | u8 want; | ||
54 | u8 need; | ||
55 | u32 as; | ||
56 | u32 ps; | ||
57 | u32 us; | ||
58 | u64 ref; | ||
59 | u64 action; | ||
60 | u64 wwpn; | ||
61 | u64 fcp_lun; | ||
62 | } __attribute__ ((packed)); | ||
63 | |||
64 | struct zfcp_rec_dbf_record_action { | ||
65 | u32 status; | ||
66 | u32 step; | ||
67 | u64 action; | ||
68 | u64 fsf_req; | ||
69 | } __attribute__ ((packed)); | ||
70 | |||
71 | struct zfcp_rec_dbf_record { | ||
72 | u8 id; | ||
73 | u8 id2; | ||
74 | union { | ||
75 | struct zfcp_rec_dbf_record_action action; | ||
76 | struct zfcp_rec_dbf_record_thread thread; | ||
77 | struct zfcp_rec_dbf_record_target target; | ||
78 | struct zfcp_rec_dbf_record_trigger trigger; | ||
79 | } u; | ||
80 | } __attribute__ ((packed)); | ||
81 | |||
82 | enum { | ||
83 | ZFCP_REC_DBF_ID_ACTION, | ||
84 | ZFCP_REC_DBF_ID_THREAD, | ||
85 | ZFCP_REC_DBF_ID_TARGET, | ||
86 | ZFCP_REC_DBF_ID_TRIGGER, | ||
87 | }; | ||
88 | |||
89 | struct zfcp_hba_dbf_record_response { | ||
90 | u32 fsf_command; | ||
91 | u64 fsf_reqid; | ||
92 | u32 fsf_seqno; | ||
93 | u64 fsf_issued; | ||
94 | u32 fsf_prot_status; | ||
95 | u32 fsf_status; | ||
96 | u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; | ||
97 | u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; | ||
98 | u32 fsf_req_status; | ||
99 | u8 sbal_first; | ||
100 | u8 sbal_curr; | ||
101 | u8 sbal_last; | ||
102 | u8 pool; | ||
103 | u64 erp_action; | ||
104 | union { | ||
105 | struct { | ||
106 | u64 cmnd; | ||
107 | u64 serial; | ||
108 | } fcp; | ||
109 | struct { | ||
110 | u64 wwpn; | ||
111 | u32 d_id; | ||
112 | u32 port_handle; | ||
113 | } port; | ||
114 | struct { | ||
115 | u64 wwpn; | ||
116 | u64 fcp_lun; | ||
117 | u32 port_handle; | ||
118 | u32 lun_handle; | ||
119 | } unit; | ||
120 | struct { | ||
121 | u32 d_id; | ||
122 | u8 ls_code; | ||
123 | } els; | ||
124 | } u; | ||
125 | } __attribute__ ((packed)); | ||
126 | |||
127 | struct zfcp_hba_dbf_record_status { | ||
128 | u8 failed; | ||
129 | u32 status_type; | ||
130 | u32 status_subtype; | ||
131 | struct fsf_queue_designator | ||
132 | queue_designator; | ||
133 | u32 payload_size; | ||
134 | #define ZFCP_DBF_UNSOL_PAYLOAD 80 | ||
135 | #define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32 | ||
136 | #define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56 | ||
137 | #define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32) | ||
138 | u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; | ||
139 | } __attribute__ ((packed)); | ||
140 | |||
141 | struct zfcp_hba_dbf_record_qdio { | ||
142 | u32 status; | ||
143 | u32 qdio_error; | ||
144 | u32 siga_error; | ||
145 | u8 sbal_index; | ||
146 | u8 sbal_count; | ||
147 | } __attribute__ ((packed)); | ||
148 | |||
149 | struct zfcp_hba_dbf_record { | ||
150 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
151 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | ||
152 | union { | ||
153 | struct zfcp_hba_dbf_record_response response; | ||
154 | struct zfcp_hba_dbf_record_status status; | ||
155 | struct zfcp_hba_dbf_record_qdio qdio; | ||
156 | } u; | ||
157 | } __attribute__ ((packed)); | ||
158 | |||
159 | struct zfcp_san_dbf_record_ct_request { | ||
160 | u16 cmd_req_code; | ||
161 | u8 revision; | ||
162 | u8 gs_type; | ||
163 | u8 gs_subtype; | ||
164 | u8 options; | ||
165 | u16 max_res_size; | ||
166 | u32 len; | ||
167 | #define ZFCP_DBF_CT_PAYLOAD 24 | ||
168 | u8 payload[ZFCP_DBF_CT_PAYLOAD]; | ||
169 | } __attribute__ ((packed)); | ||
170 | |||
171 | struct zfcp_san_dbf_record_ct_response { | ||
172 | u16 cmd_rsp_code; | ||
173 | u8 revision; | ||
174 | u8 reason_code; | ||
175 | u8 expl; | ||
176 | u8 vendor_unique; | ||
177 | u32 len; | ||
178 | u8 payload[ZFCP_DBF_CT_PAYLOAD]; | ||
179 | } __attribute__ ((packed)); | ||
180 | |||
181 | struct zfcp_san_dbf_record_els { | ||
182 | u8 ls_code; | ||
183 | u32 len; | ||
184 | #define ZFCP_DBF_ELS_PAYLOAD 32 | ||
185 | #define ZFCP_DBF_ELS_MAX_PAYLOAD 1024 | ||
186 | u8 payload[ZFCP_DBF_ELS_PAYLOAD]; | ||
187 | } __attribute__ ((packed)); | ||
188 | |||
189 | struct zfcp_san_dbf_record { | ||
190 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
191 | u64 fsf_reqid; | ||
192 | u32 fsf_seqno; | ||
193 | u32 s_id; | ||
194 | u32 d_id; | ||
195 | union { | ||
196 | struct zfcp_san_dbf_record_ct_request ct_req; | ||
197 | struct zfcp_san_dbf_record_ct_response ct_resp; | ||
198 | struct zfcp_san_dbf_record_els els; | ||
199 | } u; | ||
200 | } __attribute__ ((packed)); | ||
201 | |||
202 | struct zfcp_scsi_dbf_record { | ||
203 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
204 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | ||
205 | u32 scsi_id; | ||
206 | u32 scsi_lun; | ||
207 | u32 scsi_result; | ||
208 | u64 scsi_cmnd; | ||
209 | u64 scsi_serial; | ||
210 | #define ZFCP_DBF_SCSI_OPCODE 16 | ||
211 | u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; | ||
212 | u8 scsi_retries; | ||
213 | u8 scsi_allowed; | ||
214 | u64 fsf_reqid; | ||
215 | u32 fsf_seqno; | ||
216 | u64 fsf_issued; | ||
217 | u64 old_fsf_reqid; | ||
218 | u8 rsp_validity; | ||
219 | u8 rsp_scsi_status; | ||
220 | u32 rsp_resid; | ||
221 | u8 rsp_code; | ||
222 | #define ZFCP_DBF_SCSI_FCP_SNS_INFO 16 | ||
223 | #define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256 | ||
224 | u32 sns_info_len; | ||
225 | u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; | ||
226 | } __attribute__ ((packed)); | ||
227 | |||
228 | #endif /* ZFCP_DBF_H */ | ||
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 45a7cd98c140..bda8c77b22da 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/qdio.h> | 47 | #include <asm/qdio.h> |
48 | #include <asm/debug.h> | 48 | #include <asm/debug.h> |
49 | #include <asm/ebcdic.h> | 49 | #include <asm/ebcdic.h> |
50 | #include "zfcp_dbf.h" | ||
50 | #include "zfcp_fsf.h" | 51 | #include "zfcp_fsf.h" |
51 | 52 | ||
52 | 53 | ||
@@ -262,167 +263,6 @@ struct fcp_logo { | |||
262 | } __attribute__((packed)); | 263 | } __attribute__((packed)); |
263 | 264 | ||
264 | /* | 265 | /* |
265 | * DBF stuff | ||
266 | */ | ||
267 | #define ZFCP_DBF_TAG_SIZE 4 | ||
268 | |||
269 | struct zfcp_dbf_dump { | ||
270 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
271 | u32 total_size; /* size of total dump data */ | ||
272 | u32 offset; /* how much data has being already dumped */ | ||
273 | u32 size; /* how much data comes with this record */ | ||
274 | u8 data[]; /* dump data */ | ||
275 | } __attribute__ ((packed)); | ||
276 | |||
277 | /* FIXME: to be inflated when reworking the erp dbf */ | ||
278 | struct zfcp_erp_dbf_record { | ||
279 | u8 dummy[16]; | ||
280 | } __attribute__ ((packed)); | ||
281 | |||
282 | struct zfcp_hba_dbf_record_response { | ||
283 | u32 fsf_command; | ||
284 | u64 fsf_reqid; | ||
285 | u32 fsf_seqno; | ||
286 | u64 fsf_issued; | ||
287 | u32 fsf_prot_status; | ||
288 | u32 fsf_status; | ||
289 | u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; | ||
290 | u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; | ||
291 | u32 fsf_req_status; | ||
292 | u8 sbal_first; | ||
293 | u8 sbal_curr; | ||
294 | u8 sbal_last; | ||
295 | u8 pool; | ||
296 | u64 erp_action; | ||
297 | union { | ||
298 | struct { | ||
299 | u64 scsi_cmnd; | ||
300 | u64 scsi_serial; | ||
301 | } send_fcp; | ||
302 | struct { | ||
303 | u64 wwpn; | ||
304 | u32 d_id; | ||
305 | u32 port_handle; | ||
306 | } port; | ||
307 | struct { | ||
308 | u64 wwpn; | ||
309 | u64 fcp_lun; | ||
310 | u32 port_handle; | ||
311 | u32 lun_handle; | ||
312 | } unit; | ||
313 | struct { | ||
314 | u32 d_id; | ||
315 | u8 ls_code; | ||
316 | } send_els; | ||
317 | } data; | ||
318 | } __attribute__ ((packed)); | ||
319 | |||
320 | struct zfcp_hba_dbf_record_status { | ||
321 | u8 failed; | ||
322 | u32 status_type; | ||
323 | u32 status_subtype; | ||
324 | struct fsf_queue_designator | ||
325 | queue_designator; | ||
326 | u32 payload_size; | ||
327 | #define ZFCP_DBF_UNSOL_PAYLOAD 80 | ||
328 | #define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32 | ||
329 | #define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56 | ||
330 | #define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32) | ||
331 | u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; | ||
332 | } __attribute__ ((packed)); | ||
333 | |||
334 | struct zfcp_hba_dbf_record_qdio { | ||
335 | u32 status; | ||
336 | u32 qdio_error; | ||
337 | u32 siga_error; | ||
338 | u8 sbal_index; | ||
339 | u8 sbal_count; | ||
340 | } __attribute__ ((packed)); | ||
341 | |||
342 | struct zfcp_hba_dbf_record { | ||
343 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
344 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | ||
345 | union { | ||
346 | struct zfcp_hba_dbf_record_response response; | ||
347 | struct zfcp_hba_dbf_record_status status; | ||
348 | struct zfcp_hba_dbf_record_qdio qdio; | ||
349 | } type; | ||
350 | } __attribute__ ((packed)); | ||
351 | |||
352 | struct zfcp_san_dbf_record_ct { | ||
353 | union { | ||
354 | struct { | ||
355 | u16 cmd_req_code; | ||
356 | u8 revision; | ||
357 | u8 gs_type; | ||
358 | u8 gs_subtype; | ||
359 | u8 options; | ||
360 | u16 max_res_size; | ||
361 | } request; | ||
362 | struct { | ||
363 | u16 cmd_rsp_code; | ||
364 | u8 revision; | ||
365 | u8 reason_code; | ||
366 | u8 reason_code_expl; | ||
367 | u8 vendor_unique; | ||
368 | } response; | ||
369 | } type; | ||
370 | u32 payload_size; | ||
371 | #define ZFCP_DBF_CT_PAYLOAD 24 | ||
372 | u8 payload[ZFCP_DBF_CT_PAYLOAD]; | ||
373 | } __attribute__ ((packed)); | ||
374 | |||
375 | struct zfcp_san_dbf_record_els { | ||
376 | u8 ls_code; | ||
377 | u32 payload_size; | ||
378 | #define ZFCP_DBF_ELS_PAYLOAD 32 | ||
379 | #define ZFCP_DBF_ELS_MAX_PAYLOAD 1024 | ||
380 | u8 payload[ZFCP_DBF_ELS_PAYLOAD]; | ||
381 | } __attribute__ ((packed)); | ||
382 | |||
383 | struct zfcp_san_dbf_record { | ||
384 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
385 | u64 fsf_reqid; | ||
386 | u32 fsf_seqno; | ||
387 | u32 s_id; | ||
388 | u32 d_id; | ||
389 | union { | ||
390 | struct zfcp_san_dbf_record_ct ct; | ||
391 | struct zfcp_san_dbf_record_els els; | ||
392 | } type; | ||
393 | } __attribute__ ((packed)); | ||
394 | |||
395 | struct zfcp_scsi_dbf_record { | ||
396 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
397 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | ||
398 | u32 scsi_id; | ||
399 | u32 scsi_lun; | ||
400 | u32 scsi_result; | ||
401 | u64 scsi_cmnd; | ||
402 | u64 scsi_serial; | ||
403 | #define ZFCP_DBF_SCSI_OPCODE 16 | ||
404 | u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; | ||
405 | u8 scsi_retries; | ||
406 | u8 scsi_allowed; | ||
407 | u64 fsf_reqid; | ||
408 | u32 fsf_seqno; | ||
409 | u64 fsf_issued; | ||
410 | union { | ||
411 | u64 old_fsf_reqid; | ||
412 | struct { | ||
413 | u8 rsp_validity; | ||
414 | u8 rsp_scsi_status; | ||
415 | u32 rsp_resid; | ||
416 | u8 rsp_code; | ||
417 | #define ZFCP_DBF_SCSI_FCP_SNS_INFO 16 | ||
418 | #define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256 | ||
419 | u32 sns_info_len; | ||
420 | u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; | ||
421 | } fcp; | ||
422 | } type; | ||
423 | } __attribute__ ((packed)); | ||
424 | |||
425 | /* | ||
426 | * FC-FS stuff | 266 | * FC-FS stuff |
427 | */ | 267 | */ |
428 | #define R_A_TOV 10 /* seconds */ | 268 | #define R_A_TOV 10 /* seconds */ |
@@ -634,7 +474,6 @@ do { \ | |||
634 | ZFCP_STATUS_PORT_NO_SCSI_ID) | 474 | ZFCP_STATUS_PORT_NO_SCSI_ID) |
635 | 475 | ||
636 | /* logical unit status */ | 476 | /* logical unit status */ |
637 | #define ZFCP_STATUS_UNIT_NOTSUPPUNITRESET 0x00000001 | ||
638 | #define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002 | 477 | #define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002 |
639 | #define ZFCP_STATUS_UNIT_SHARED 0x00000004 | 478 | #define ZFCP_STATUS_UNIT_SHARED 0x00000004 |
640 | #define ZFCP_STATUS_UNIT_READONLY 0x00000008 | 479 | #define ZFCP_STATUS_UNIT_READONLY 0x00000008 |
@@ -917,15 +756,15 @@ struct zfcp_adapter { | |||
917 | u32 erp_low_mem_count; /* nr of erp actions waiting | 756 | u32 erp_low_mem_count; /* nr of erp actions waiting |
918 | for memory */ | 757 | for memory */ |
919 | struct zfcp_port *nameserver_port; /* adapter's nameserver */ | 758 | struct zfcp_port *nameserver_port; /* adapter's nameserver */ |
920 | debug_info_t *erp_dbf; | 759 | debug_info_t *rec_dbf; |
921 | debug_info_t *hba_dbf; | 760 | debug_info_t *hba_dbf; |
922 | debug_info_t *san_dbf; /* debug feature areas */ | 761 | debug_info_t *san_dbf; /* debug feature areas */ |
923 | debug_info_t *scsi_dbf; | 762 | debug_info_t *scsi_dbf; |
924 | spinlock_t erp_dbf_lock; | 763 | spinlock_t rec_dbf_lock; |
925 | spinlock_t hba_dbf_lock; | 764 | spinlock_t hba_dbf_lock; |
926 | spinlock_t san_dbf_lock; | 765 | spinlock_t san_dbf_lock; |
927 | spinlock_t scsi_dbf_lock; | 766 | spinlock_t scsi_dbf_lock; |
928 | struct zfcp_erp_dbf_record erp_dbf_buf; | 767 | struct zfcp_rec_dbf_record rec_dbf_buf; |
929 | struct zfcp_hba_dbf_record hba_dbf_buf; | 768 | struct zfcp_hba_dbf_record hba_dbf_buf; |
930 | struct zfcp_san_dbf_record san_dbf_buf; | 769 | struct zfcp_san_dbf_record san_dbf_buf; |
931 | struct zfcp_scsi_dbf_record scsi_dbf_buf; | 770 | struct zfcp_scsi_dbf_record scsi_dbf_buf; |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 2dc8110ebf74..805484658dd9 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -26,13 +26,17 @@ | |||
26 | static int zfcp_erp_adisc(struct zfcp_port *); | 26 | static int zfcp_erp_adisc(struct zfcp_port *); |
27 | static void zfcp_erp_adisc_handler(unsigned long); | 27 | static void zfcp_erp_adisc_handler(unsigned long); |
28 | 28 | ||
29 | static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int); | 29 | static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int, u8, |
30 | static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int); | 30 | void *); |
31 | static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int); | 31 | static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int, u8, |
32 | static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int); | 32 | void *); |
33 | 33 | static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int, u8, void *); | |
34 | static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int); | 34 | static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int, u8, void *); |
35 | static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int); | 35 | |
36 | static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int, u8, | ||
37 | void *); | ||
38 | static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int, u8, | ||
39 | void *); | ||
36 | 40 | ||
37 | static void zfcp_erp_adapter_block(struct zfcp_adapter *, int); | 41 | static void zfcp_erp_adapter_block(struct zfcp_adapter *, int); |
38 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *); | 42 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *); |
@@ -97,7 +101,8 @@ static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); | |||
97 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); | 101 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); |
98 | 102 | ||
99 | static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, | 103 | static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, |
100 | struct zfcp_port *, struct zfcp_unit *); | 104 | struct zfcp_port *, struct zfcp_unit *, |
105 | u8 id, void *ref); | ||
101 | static int zfcp_erp_action_dequeue(struct zfcp_erp_action *); | 106 | static int zfcp_erp_action_dequeue(struct zfcp_erp_action *); |
102 | static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *, | 107 | static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *, |
103 | struct zfcp_port *, struct zfcp_unit *, | 108 | struct zfcp_port *, struct zfcp_unit *, |
@@ -128,11 +133,9 @@ static void zfcp_close_qdio(struct zfcp_adapter *adapter) | |||
128 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 133 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); |
129 | write_unlock_irq(&req_queue->queue_lock); | 134 | write_unlock_irq(&req_queue->queue_lock); |
130 | 135 | ||
131 | debug_text_event(adapter->erp_dbf, 3, "qdio_down2a"); | ||
132 | while (qdio_shutdown(adapter->ccw_device, | 136 | while (qdio_shutdown(adapter->ccw_device, |
133 | QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) | 137 | QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) |
134 | ssleep(1); | 138 | ssleep(1); |
135 | debug_text_event(adapter->erp_dbf, 3, "qdio_down2b"); | ||
136 | 139 | ||
137 | /* cleanup used outbound sbals */ | 140 | /* cleanup used outbound sbals */ |
138 | count = atomic_read(&req_queue->free_count); | 141 | count = atomic_read(&req_queue->free_count); |
@@ -163,7 +166,7 @@ static void zfcp_close_fsf(struct zfcp_adapter *adapter) | |||
163 | /* reset FSF request sequence number */ | 166 | /* reset FSF request sequence number */ |
164 | adapter->fsf_req_seq_no = 0; | 167 | adapter->fsf_req_seq_no = 0; |
165 | /* all ports and units are closed */ | 168 | /* all ports and units are closed */ |
166 | zfcp_erp_modify_adapter_status(adapter, | 169 | zfcp_erp_modify_adapter_status(adapter, 24, NULL, |
167 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); | 170 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); |
168 | } | 171 | } |
169 | 172 | ||
@@ -179,7 +182,8 @@ static void zfcp_close_fsf(struct zfcp_adapter *adapter) | |||
179 | static void zfcp_fsf_request_timeout_handler(unsigned long data) | 182 | static void zfcp_fsf_request_timeout_handler(unsigned long data) |
180 | { | 183 | { |
181 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; | 184 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; |
182 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); | 185 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62, |
186 | NULL); | ||
183 | } | 187 | } |
184 | 188 | ||
185 | void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) | 189 | void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) |
@@ -200,12 +204,11 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) | |||
200 | * returns: 0 - initiated action successfully | 204 | * returns: 0 - initiated action successfully |
201 | * <0 - failed to initiate action | 205 | * <0 - failed to initiate action |
202 | */ | 206 | */ |
203 | static int | 207 | static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, |
204 | zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) | 208 | int clear_mask, u8 id, void *ref) |
205 | { | 209 | { |
206 | int retval; | 210 | int retval; |
207 | 211 | ||
208 | debug_text_event(adapter->erp_dbf, 5, "a_ro"); | ||
209 | ZFCP_LOG_DEBUG("reopen adapter %s\n", | 212 | ZFCP_LOG_DEBUG("reopen adapter %s\n", |
210 | zfcp_get_busid_by_adapter(adapter)); | 213 | zfcp_get_busid_by_adapter(adapter)); |
211 | 214 | ||
@@ -214,14 +217,13 @@ zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) | |||
214 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) { | 217 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) { |
215 | ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n", | 218 | ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n", |
216 | zfcp_get_busid_by_adapter(adapter)); | 219 | zfcp_get_busid_by_adapter(adapter)); |
217 | debug_text_event(adapter->erp_dbf, 5, "a_ro_f"); | ||
218 | /* ensure propagation of failed status to new devices */ | 220 | /* ensure propagation of failed status to new devices */ |
219 | zfcp_erp_adapter_failed(adapter); | 221 | zfcp_erp_adapter_failed(adapter, 13, NULL); |
220 | retval = -EIO; | 222 | retval = -EIO; |
221 | goto out; | 223 | goto out; |
222 | } | 224 | } |
223 | retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, | 225 | retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, |
224 | adapter, NULL, NULL); | 226 | adapter, NULL, NULL, id, ref); |
225 | 227 | ||
226 | out: | 228 | out: |
227 | return retval; | 229 | return retval; |
@@ -236,56 +238,56 @@ zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) | |||
236 | * returns: 0 - initiated action successfully | 238 | * returns: 0 - initiated action successfully |
237 | * <0 - failed to initiate action | 239 | * <0 - failed to initiate action |
238 | */ | 240 | */ |
239 | int | 241 | int zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask, |
240 | zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask) | 242 | u8 id, void *ref) |
241 | { | 243 | { |
242 | int retval; | 244 | int retval; |
243 | unsigned long flags; | 245 | unsigned long flags; |
244 | 246 | ||
245 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 247 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
246 | write_lock(&adapter->erp_lock); | 248 | write_lock(&adapter->erp_lock); |
247 | retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask); | 249 | retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask, id, ref); |
248 | write_unlock(&adapter->erp_lock); | 250 | write_unlock(&adapter->erp_lock); |
249 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 251 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
250 | 252 | ||
251 | return retval; | 253 | return retval; |
252 | } | 254 | } |
253 | 255 | ||
254 | int | 256 | int zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask, |
255 | zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask) | 257 | u8 id, void *ref) |
256 | { | 258 | { |
257 | int retval; | 259 | int retval; |
258 | 260 | ||
259 | retval = zfcp_erp_adapter_reopen(adapter, | 261 | retval = zfcp_erp_adapter_reopen(adapter, |
260 | ZFCP_STATUS_COMMON_RUNNING | | 262 | ZFCP_STATUS_COMMON_RUNNING | |
261 | ZFCP_STATUS_COMMON_ERP_FAILED | | 263 | ZFCP_STATUS_COMMON_ERP_FAILED | |
262 | clear_mask); | 264 | clear_mask, id, ref); |
263 | 265 | ||
264 | return retval; | 266 | return retval; |
265 | } | 267 | } |
266 | 268 | ||
267 | int | 269 | int zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask, u8 id, |
268 | zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask) | 270 | void *ref) |
269 | { | 271 | { |
270 | int retval; | 272 | int retval; |
271 | 273 | ||
272 | retval = zfcp_erp_port_reopen(port, | 274 | retval = zfcp_erp_port_reopen(port, |
273 | ZFCP_STATUS_COMMON_RUNNING | | 275 | ZFCP_STATUS_COMMON_RUNNING | |
274 | ZFCP_STATUS_COMMON_ERP_FAILED | | 276 | ZFCP_STATUS_COMMON_ERP_FAILED | |
275 | clear_mask); | 277 | clear_mask, id, ref); |
276 | 278 | ||
277 | return retval; | 279 | return retval; |
278 | } | 280 | } |
279 | 281 | ||
280 | int | 282 | int zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask, u8 id, |
281 | zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask) | 283 | void *ref) |
282 | { | 284 | { |
283 | int retval; | 285 | int retval; |
284 | 286 | ||
285 | retval = zfcp_erp_unit_reopen(unit, | 287 | retval = zfcp_erp_unit_reopen(unit, |
286 | ZFCP_STATUS_COMMON_RUNNING | | 288 | ZFCP_STATUS_COMMON_RUNNING | |
287 | ZFCP_STATUS_COMMON_ERP_FAILED | | 289 | ZFCP_STATUS_COMMON_ERP_FAILED | |
288 | clear_mask); | 290 | clear_mask, id, ref); |
289 | 291 | ||
290 | return retval; | 292 | return retval; |
291 | } | 293 | } |
@@ -399,8 +401,7 @@ zfcp_erp_adisc_handler(unsigned long data) | |||
399 | "force physical port reopen " | 401 | "force physical port reopen " |
400 | "(adapter %s, port d_id=0x%06x)\n", | 402 | "(adapter %s, port d_id=0x%06x)\n", |
401 | zfcp_get_busid_by_adapter(adapter), d_id); | 403 | zfcp_get_busid_by_adapter(adapter), d_id); |
402 | debug_text_event(adapter->erp_dbf, 3, "forcreop"); | 404 | if (zfcp_erp_port_forced_reopen(port, 0, 63, NULL)) |
403 | if (zfcp_erp_port_forced_reopen(port, 0)) | ||
404 | ZFCP_LOG_NORMAL("failed reopen of port " | 405 | ZFCP_LOG_NORMAL("failed reopen of port " |
405 | "(adapter %s, wwpn=0x%016Lx)\n", | 406 | "(adapter %s, wwpn=0x%016Lx)\n", |
406 | zfcp_get_busid_by_port(port), | 407 | zfcp_get_busid_by_port(port), |
@@ -427,7 +428,7 @@ zfcp_erp_adisc_handler(unsigned long data) | |||
427 | "adisc_resp_wwpn=0x%016Lx)\n", | 428 | "adisc_resp_wwpn=0x%016Lx)\n", |
428 | zfcp_get_busid_by_port(port), | 429 | zfcp_get_busid_by_port(port), |
429 | port->wwpn, (wwn_t) adisc->wwpn); | 430 | port->wwpn, (wwn_t) adisc->wwpn); |
430 | if (zfcp_erp_port_reopen(port, 0)) | 431 | if (zfcp_erp_port_reopen(port, 0, 64, NULL)) |
431 | ZFCP_LOG_NORMAL("failed reopen of port " | 432 | ZFCP_LOG_NORMAL("failed reopen of port " |
432 | "(adapter %s, wwpn=0x%016Lx)\n", | 433 | "(adapter %s, wwpn=0x%016Lx)\n", |
433 | zfcp_get_busid_by_port(port), | 434 | zfcp_get_busid_by_port(port), |
@@ -461,7 +462,7 @@ zfcp_test_link(struct zfcp_port *port) | |||
461 | ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx " | 462 | ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx " |
462 | "on adapter %s\n ", port->wwpn, | 463 | "on adapter %s\n ", port->wwpn, |
463 | zfcp_get_busid_by_port(port)); | 464 | zfcp_get_busid_by_port(port)); |
464 | retval = zfcp_erp_port_forced_reopen(port, 0); | 465 | retval = zfcp_erp_port_forced_reopen(port, 0, 65, NULL); |
465 | if (retval != 0) { | 466 | if (retval != 0) { |
466 | ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx " | 467 | ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx " |
467 | "on adapter %s failed\n", port->wwpn, | 468 | "on adapter %s failed\n", port->wwpn, |
@@ -484,14 +485,11 @@ zfcp_test_link(struct zfcp_port *port) | |||
484 | * returns: 0 - initiated action successfully | 485 | * returns: 0 - initiated action successfully |
485 | * <0 - failed to initiate action | 486 | * <0 - failed to initiate action |
486 | */ | 487 | */ |
487 | static int | 488 | static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, |
488 | zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask) | 489 | int clear_mask, u8 id, |
490 | void *ref) | ||
489 | { | 491 | { |
490 | int retval; | 492 | int retval; |
491 | struct zfcp_adapter *adapter = port->adapter; | ||
492 | |||
493 | debug_text_event(adapter->erp_dbf, 5, "pf_ro"); | ||
494 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
495 | 493 | ||
496 | ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n", | 494 | ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n", |
497 | port->wwpn, zfcp_get_busid_by_port(port)); | 495 | port->wwpn, zfcp_get_busid_by_port(port)); |
@@ -502,14 +500,12 @@ zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask) | |||
502 | ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx " | 500 | ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx " |
503 | "on adapter %s\n", port->wwpn, | 501 | "on adapter %s\n", port->wwpn, |
504 | zfcp_get_busid_by_port(port)); | 502 | zfcp_get_busid_by_port(port)); |
505 | debug_text_event(adapter->erp_dbf, 5, "pf_ro_f"); | ||
506 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
507 | retval = -EIO; | 503 | retval = -EIO; |
508 | goto out; | 504 | goto out; |
509 | } | 505 | } |
510 | 506 | ||
511 | retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, | 507 | retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, |
512 | port->adapter, port, NULL); | 508 | port->adapter, port, NULL, id, ref); |
513 | 509 | ||
514 | out: | 510 | out: |
515 | return retval; | 511 | return retval; |
@@ -524,8 +520,8 @@ zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask) | |||
524 | * returns: 0 - initiated action successfully | 520 | * returns: 0 - initiated action successfully |
525 | * <0 - failed to initiate action | 521 | * <0 - failed to initiate action |
526 | */ | 522 | */ |
527 | int | 523 | int zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask, u8 id, |
528 | zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask) | 524 | void *ref) |
529 | { | 525 | { |
530 | int retval; | 526 | int retval; |
531 | unsigned long flags; | 527 | unsigned long flags; |
@@ -534,7 +530,8 @@ zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask) | |||
534 | adapter = port->adapter; | 530 | adapter = port->adapter; |
535 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 531 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
536 | write_lock(&adapter->erp_lock); | 532 | write_lock(&adapter->erp_lock); |
537 | retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask); | 533 | retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask, id, |
534 | ref); | ||
538 | write_unlock(&adapter->erp_lock); | 535 | write_unlock(&adapter->erp_lock); |
539 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 536 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
540 | 537 | ||
@@ -551,14 +548,10 @@ zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask) | |||
551 | * returns: 0 - initiated action successfully | 548 | * returns: 0 - initiated action successfully |
552 | * <0 - failed to initiate action | 549 | * <0 - failed to initiate action |
553 | */ | 550 | */ |
554 | static int | 551 | static int zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask, |
555 | zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask) | 552 | u8 id, void *ref) |
556 | { | 553 | { |
557 | int retval; | 554 | int retval; |
558 | struct zfcp_adapter *adapter = port->adapter; | ||
559 | |||
560 | debug_text_event(adapter->erp_dbf, 5, "p_ro"); | ||
561 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
562 | 555 | ||
563 | ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n", | 556 | ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n", |
564 | port->wwpn, zfcp_get_busid_by_port(port)); | 557 | port->wwpn, zfcp_get_busid_by_port(port)); |
@@ -569,16 +562,14 @@ zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask) | |||
569 | ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx " | 562 | ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx " |
570 | "on adapter %s\n", port->wwpn, | 563 | "on adapter %s\n", port->wwpn, |
571 | zfcp_get_busid_by_port(port)); | 564 | zfcp_get_busid_by_port(port)); |
572 | debug_text_event(adapter->erp_dbf, 5, "p_ro_f"); | ||
573 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
574 | /* ensure propagation of failed status to new devices */ | 565 | /* ensure propagation of failed status to new devices */ |
575 | zfcp_erp_port_failed(port); | 566 | zfcp_erp_port_failed(port, 14, NULL); |
576 | retval = -EIO; | 567 | retval = -EIO; |
577 | goto out; | 568 | goto out; |
578 | } | 569 | } |
579 | 570 | ||
580 | retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, | 571 | retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, |
581 | port->adapter, port, NULL); | 572 | port->adapter, port, NULL, id, ref); |
582 | 573 | ||
583 | out: | 574 | out: |
584 | return retval; | 575 | return retval; |
@@ -594,8 +585,8 @@ zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask) | |||
594 | * correct locking. An error recovery task is initiated to do the reopen. | 585 | * correct locking. An error recovery task is initiated to do the reopen. |
595 | * To wait for the completion of the reopen zfcp_erp_wait should be used. | 586 | * To wait for the completion of the reopen zfcp_erp_wait should be used. |
596 | */ | 587 | */ |
597 | int | 588 | int zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask, u8 id, |
598 | zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask) | 589 | void *ref) |
599 | { | 590 | { |
600 | int retval; | 591 | int retval; |
601 | unsigned long flags; | 592 | unsigned long flags; |
@@ -603,7 +594,7 @@ zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask) | |||
603 | 594 | ||
604 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 595 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
605 | write_lock(&adapter->erp_lock); | 596 | write_lock(&adapter->erp_lock); |
606 | retval = zfcp_erp_port_reopen_internal(port, clear_mask); | 597 | retval = zfcp_erp_port_reopen_internal(port, clear_mask, id, ref); |
607 | write_unlock(&adapter->erp_lock); | 598 | write_unlock(&adapter->erp_lock); |
608 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 599 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
609 | 600 | ||
@@ -620,14 +611,12 @@ zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask) | |||
620 | * returns: 0 - initiated action successfully | 611 | * returns: 0 - initiated action successfully |
621 | * <0 - failed to initiate action | 612 | * <0 - failed to initiate action |
622 | */ | 613 | */ |
623 | static int | 614 | static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask, |
624 | zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask) | 615 | u8 id, void *ref) |
625 | { | 616 | { |
626 | int retval; | 617 | int retval; |
627 | struct zfcp_adapter *adapter = unit->port->adapter; | 618 | struct zfcp_adapter *adapter = unit->port->adapter; |
628 | 619 | ||
629 | debug_text_event(adapter->erp_dbf, 5, "u_ro"); | ||
630 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
631 | ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx " | 620 | ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx " |
632 | "on adapter %s\n", unit->fcp_lun, | 621 | "on adapter %s\n", unit->fcp_lun, |
633 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); | 622 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); |
@@ -639,15 +628,12 @@ zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask) | |||
639 | "on port 0x%016Lx on adapter %s\n", | 628 | "on port 0x%016Lx on adapter %s\n", |
640 | unit->fcp_lun, unit->port->wwpn, | 629 | unit->fcp_lun, unit->port->wwpn, |
641 | zfcp_get_busid_by_unit(unit)); | 630 | zfcp_get_busid_by_unit(unit)); |
642 | debug_text_event(adapter->erp_dbf, 5, "u_ro_f"); | ||
643 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, | ||
644 | sizeof (fcp_lun_t)); | ||
645 | retval = -EIO; | 631 | retval = -EIO; |
646 | goto out; | 632 | goto out; |
647 | } | 633 | } |
648 | 634 | ||
649 | retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, | 635 | retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, |
650 | unit->port->adapter, unit->port, unit); | 636 | adapter, unit->port, unit, id, ref); |
651 | out: | 637 | out: |
652 | return retval; | 638 | return retval; |
653 | } | 639 | } |
@@ -662,8 +648,8 @@ zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask) | |||
662 | * locking. An error recovery task is initiated to do the reopen. | 648 | * locking. An error recovery task is initiated to do the reopen. |
663 | * To wait for the completion of the reopen zfcp_erp_wait should be used. | 649 | * To wait for the completion of the reopen zfcp_erp_wait should be used. |
664 | */ | 650 | */ |
665 | int | 651 | int zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask, u8 id, |
666 | zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) | 652 | void *ref) |
667 | { | 653 | { |
668 | int retval; | 654 | int retval; |
669 | unsigned long flags; | 655 | unsigned long flags; |
@@ -675,7 +661,7 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) | |||
675 | 661 | ||
676 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 662 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
677 | write_lock(&adapter->erp_lock); | 663 | write_lock(&adapter->erp_lock); |
678 | retval = zfcp_erp_unit_reopen_internal(unit, clear_mask); | 664 | retval = zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref); |
679 | write_unlock(&adapter->erp_lock); | 665 | write_unlock(&adapter->erp_lock); |
680 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 666 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
681 | 667 | ||
@@ -687,19 +673,43 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) | |||
687 | */ | 673 | */ |
688 | static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) | 674 | static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) |
689 | { | 675 | { |
690 | debug_text_event(adapter->erp_dbf, 6, "a_bl"); | 676 | zfcp_erp_modify_adapter_status(adapter, 15, NULL, |
691 | zfcp_erp_modify_adapter_status(adapter, | ||
692 | ZFCP_STATUS_COMMON_UNBLOCKED | | 677 | ZFCP_STATUS_COMMON_UNBLOCKED | |
693 | clear_mask, ZFCP_CLEAR); | 678 | clear_mask, ZFCP_CLEAR); |
694 | } | 679 | } |
695 | 680 | ||
681 | /* FIXME: isn't really atomic */ | ||
682 | /* | ||
683 | * returns the mask which has not been set so far, i.e. | ||
684 | * 0 if no bit has been changed, !0 if some bit has been changed | ||
685 | */ | ||
686 | static int atomic_test_and_set_mask(unsigned long mask, atomic_t *v) | ||
687 | { | ||
688 | int changed_bits = (atomic_read(v) /*XOR*/^ mask) & mask; | ||
689 | atomic_set_mask(mask, v); | ||
690 | return changed_bits; | ||
691 | } | ||
692 | |||
693 | /* FIXME: isn't really atomic */ | ||
694 | /* | ||
695 | * returns the mask which has not been cleared so far, i.e. | ||
696 | * 0 if no bit has been changed, !0 if some bit has been changed | ||
697 | */ | ||
698 | static int atomic_test_and_clear_mask(unsigned long mask, atomic_t *v) | ||
699 | { | ||
700 | int changed_bits = atomic_read(v) & mask; | ||
701 | atomic_clear_mask(mask, v); | ||
702 | return changed_bits; | ||
703 | } | ||
704 | |||
696 | /** | 705 | /** |
697 | * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests | 706 | * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests |
698 | */ | 707 | */ |
699 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) | 708 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) |
700 | { | 709 | { |
701 | debug_text_event(adapter->erp_dbf, 6, "a_ubl"); | 710 | if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, |
702 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); | 711 | &adapter->status)) |
712 | zfcp_rec_dbf_event_adapter(16, NULL, adapter); | ||
703 | } | 713 | } |
704 | 714 | ||
705 | /* | 715 | /* |
@@ -714,11 +724,7 @@ static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) | |||
714 | static void | 724 | static void |
715 | zfcp_erp_port_block(struct zfcp_port *port, int clear_mask) | 725 | zfcp_erp_port_block(struct zfcp_port *port, int clear_mask) |
716 | { | 726 | { |
717 | struct zfcp_adapter *adapter = port->adapter; | 727 | zfcp_erp_modify_port_status(port, 17, NULL, |
718 | |||
719 | debug_text_event(adapter->erp_dbf, 6, "p_bl"); | ||
720 | debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t)); | ||
721 | zfcp_erp_modify_port_status(port, | ||
722 | ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, | 728 | ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, |
723 | ZFCP_CLEAR); | 729 | ZFCP_CLEAR); |
724 | } | 730 | } |
@@ -733,11 +739,9 @@ zfcp_erp_port_block(struct zfcp_port *port, int clear_mask) | |||
733 | static void | 739 | static void |
734 | zfcp_erp_port_unblock(struct zfcp_port *port) | 740 | zfcp_erp_port_unblock(struct zfcp_port *port) |
735 | { | 741 | { |
736 | struct zfcp_adapter *adapter = port->adapter; | 742 | if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, |
737 | 743 | &port->status)) | |
738 | debug_text_event(adapter->erp_dbf, 6, "p_ubl"); | 744 | zfcp_rec_dbf_event_port(18, NULL, port); |
739 | debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t)); | ||
740 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); | ||
741 | } | 745 | } |
742 | 746 | ||
743 | /* | 747 | /* |
@@ -752,11 +756,7 @@ zfcp_erp_port_unblock(struct zfcp_port *port) | |||
752 | static void | 756 | static void |
753 | zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) | 757 | zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) |
754 | { | 758 | { |
755 | struct zfcp_adapter *adapter = unit->port->adapter; | 759 | zfcp_erp_modify_unit_status(unit, 19, NULL, |
756 | |||
757 | debug_text_event(adapter->erp_dbf, 6, "u_bl"); | ||
758 | debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
759 | zfcp_erp_modify_unit_status(unit, | ||
760 | ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, | 760 | ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, |
761 | ZFCP_CLEAR); | 761 | ZFCP_CLEAR); |
762 | } | 762 | } |
@@ -771,11 +771,9 @@ zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) | |||
771 | static void | 771 | static void |
772 | zfcp_erp_unit_unblock(struct zfcp_unit *unit) | 772 | zfcp_erp_unit_unblock(struct zfcp_unit *unit) |
773 | { | 773 | { |
774 | struct zfcp_adapter *adapter = unit->port->adapter; | 774 | if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, |
775 | 775 | &unit->status)) | |
776 | debug_text_event(adapter->erp_dbf, 6, "u_ubl"); | 776 | zfcp_rec_dbf_event_unit(20, NULL, unit); |
777 | debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
778 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); | ||
779 | } | 777 | } |
780 | 778 | ||
781 | static void | 779 | static void |
@@ -783,11 +781,9 @@ zfcp_erp_action_ready(struct zfcp_erp_action *erp_action) | |||
783 | { | 781 | { |
784 | struct zfcp_adapter *adapter = erp_action->adapter; | 782 | struct zfcp_adapter *adapter = erp_action->adapter; |
785 | 783 | ||
786 | debug_text_event(adapter->erp_dbf, 4, "a_ar"); | ||
787 | debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof (int)); | ||
788 | |||
789 | zfcp_erp_action_to_ready(erp_action); | 784 | zfcp_erp_action_to_ready(erp_action); |
790 | up(&adapter->erp_ready_sem); | 785 | up(&adapter->erp_ready_sem); |
786 | zfcp_rec_dbf_event_thread(2, adapter, 0); | ||
791 | } | 787 | } |
792 | 788 | ||
793 | /* | 789 | /* |
@@ -849,18 +845,15 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) | |||
849 | if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) && | 845 | if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) && |
850 | erp_action->fsf_req->erp_action == erp_action) { | 846 | erp_action->fsf_req->erp_action == erp_action) { |
851 | /* fsf_req still exists */ | 847 | /* fsf_req still exists */ |
852 | debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); | ||
853 | debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req, | ||
854 | sizeof (unsigned long)); | ||
855 | /* dismiss fsf_req of timed out/dismissed erp_action */ | 848 | /* dismiss fsf_req of timed out/dismissed erp_action */ |
856 | if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | | 849 | if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | |
857 | ZFCP_STATUS_ERP_TIMEDOUT)) { | 850 | ZFCP_STATUS_ERP_TIMEDOUT)) { |
858 | debug_text_event(adapter->erp_dbf, 3, | ||
859 | "a_ca_disreq"); | ||
860 | erp_action->fsf_req->status |= | 851 | erp_action->fsf_req->status |= |
861 | ZFCP_STATUS_FSFREQ_DISMISSED; | 852 | ZFCP_STATUS_FSFREQ_DISMISSED; |
853 | zfcp_rec_dbf_event_action(142, erp_action); | ||
862 | } | 854 | } |
863 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { | 855 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { |
856 | zfcp_rec_dbf_event_action(143, erp_action); | ||
864 | ZFCP_LOG_NORMAL("error: erp step timed out " | 857 | ZFCP_LOG_NORMAL("error: erp step timed out " |
865 | "(action=%d, fsf_req=%p)\n ", | 858 | "(action=%d, fsf_req=%p)\n ", |
866 | erp_action->action, | 859 | erp_action->action, |
@@ -879,7 +872,6 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) | |||
879 | erp_action->fsf_req = NULL; | 872 | erp_action->fsf_req = NULL; |
880 | } | 873 | } |
881 | } else { | 874 | } else { |
882 | debug_text_event(adapter->erp_dbf, 3, "a_ca_gonereq"); | ||
883 | /* | 875 | /* |
884 | * even if this fsf_req has gone, forget about | 876 | * even if this fsf_req has gone, forget about |
885 | * association between erp_action and fsf_req | 877 | * association between erp_action and fsf_req |
@@ -887,8 +879,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) | |||
887 | erp_action->fsf_req = NULL; | 879 | erp_action->fsf_req = NULL; |
888 | } | 880 | } |
889 | spin_unlock(&adapter->req_list_lock); | 881 | spin_unlock(&adapter->req_list_lock); |
890 | } else | 882 | } |
891 | debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); | ||
892 | } | 883 | } |
893 | 884 | ||
894 | /** | 885 | /** |
@@ -900,19 +891,11 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) | |||
900 | static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, | 891 | static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, |
901 | unsigned long set_mask) | 892 | unsigned long set_mask) |
902 | { | 893 | { |
903 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
904 | |||
905 | if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { | 894 | if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { |
906 | debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex"); | ||
907 | debug_event(adapter->erp_dbf, 2, &erp_action->action, | ||
908 | sizeof (int)); | ||
909 | erp_action->status |= set_mask; | 895 | erp_action->status |= set_mask; |
910 | zfcp_erp_action_ready(erp_action); | 896 | zfcp_erp_action_ready(erp_action); |
911 | } else { | 897 | } else { |
912 | /* action is ready or gone - nothing to do */ | 898 | /* action is ready or gone - nothing to do */ |
913 | debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); | ||
914 | debug_event(adapter->erp_dbf, 3, &erp_action->action, | ||
915 | sizeof (int)); | ||
916 | } | 899 | } |
917 | } | 900 | } |
918 | 901 | ||
@@ -939,10 +922,6 @@ static void | |||
939 | zfcp_erp_memwait_handler(unsigned long data) | 922 | zfcp_erp_memwait_handler(unsigned long data) |
940 | { | 923 | { |
941 | struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; | 924 | struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; |
942 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
943 | |||
944 | debug_text_event(adapter->erp_dbf, 2, "a_mwh"); | ||
945 | debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); | ||
946 | 925 | ||
947 | zfcp_erp_async_handler(erp_action, 0); | 926 | zfcp_erp_async_handler(erp_action, 0); |
948 | } | 927 | } |
@@ -955,10 +934,6 @@ zfcp_erp_memwait_handler(unsigned long data) | |||
955 | static void zfcp_erp_timeout_handler(unsigned long data) | 934 | static void zfcp_erp_timeout_handler(unsigned long data) |
956 | { | 935 | { |
957 | struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; | 936 | struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; |
958 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
959 | |||
960 | debug_text_event(adapter->erp_dbf, 2, "a_th"); | ||
961 | debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); | ||
962 | 937 | ||
963 | zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); | 938 | zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); |
964 | } | 939 | } |
@@ -973,11 +948,6 @@ static void zfcp_erp_timeout_handler(unsigned long data) | |||
973 | */ | 948 | */ |
974 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) | 949 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) |
975 | { | 950 | { |
976 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
977 | |||
978 | debug_text_event(adapter->erp_dbf, 2, "a_adis"); | ||
979 | debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); | ||
980 | |||
981 | erp_action->status |= ZFCP_STATUS_ERP_DISMISSED; | 951 | erp_action->status |= ZFCP_STATUS_ERP_DISMISSED; |
982 | if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) | 952 | if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) |
983 | zfcp_erp_action_ready(erp_action); | 953 | zfcp_erp_action_ready(erp_action); |
@@ -995,12 +965,10 @@ zfcp_erp_thread_setup(struct zfcp_adapter *adapter) | |||
995 | ZFCP_LOG_NORMAL("error: creation of erp thread failed for " | 965 | ZFCP_LOG_NORMAL("error: creation of erp thread failed for " |
996 | "adapter %s\n", | 966 | "adapter %s\n", |
997 | zfcp_get_busid_by_adapter(adapter)); | 967 | zfcp_get_busid_by_adapter(adapter)); |
998 | debug_text_event(adapter->erp_dbf, 5, "a_thset_fail"); | ||
999 | } else { | 968 | } else { |
1000 | wait_event(adapter->erp_thread_wqh, | 969 | wait_event(adapter->erp_thread_wqh, |
1001 | atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, | 970 | atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, |
1002 | &adapter->status)); | 971 | &adapter->status)); |
1003 | debug_text_event(adapter->erp_dbf, 5, "a_thset_ok"); | ||
1004 | } | 972 | } |
1005 | 973 | ||
1006 | return (retval < 0); | 974 | return (retval < 0); |
@@ -1027,6 +995,7 @@ zfcp_erp_thread_kill(struct zfcp_adapter *adapter) | |||
1027 | 995 | ||
1028 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); | 996 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); |
1029 | up(&adapter->erp_ready_sem); | 997 | up(&adapter->erp_ready_sem); |
998 | zfcp_rec_dbf_event_thread(2, adapter, 1); | ||
1030 | 999 | ||
1031 | wait_event(adapter->erp_thread_wqh, | 1000 | wait_event(adapter->erp_thread_wqh, |
1032 | !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, | 1001 | !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, |
@@ -1035,8 +1004,6 @@ zfcp_erp_thread_kill(struct zfcp_adapter *adapter) | |||
1035 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, | 1004 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, |
1036 | &adapter->status); | 1005 | &adapter->status); |
1037 | 1006 | ||
1038 | debug_text_event(adapter->erp_dbf, 5, "a_thki_ok"); | ||
1039 | |||
1040 | return retval; | 1007 | return retval; |
1041 | } | 1008 | } |
1042 | 1009 | ||
@@ -1059,7 +1026,6 @@ zfcp_erp_thread(void *data) | |||
1059 | /* Block all signals */ | 1026 | /* Block all signals */ |
1060 | siginitsetinv(¤t->blocked, 0); | 1027 | siginitsetinv(¤t->blocked, 0); |
1061 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); | 1028 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); |
1062 | debug_text_event(adapter->erp_dbf, 5, "a_th_run"); | ||
1063 | wake_up(&adapter->erp_thread_wqh); | 1029 | wake_up(&adapter->erp_thread_wqh); |
1064 | 1030 | ||
1065 | while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, | 1031 | while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, |
@@ -1084,12 +1050,12 @@ zfcp_erp_thread(void *data) | |||
1084 | * no action in 'ready' queue to be processed and | 1050 | * no action in 'ready' queue to be processed and |
1085 | * thread is not to be killed | 1051 | * thread is not to be killed |
1086 | */ | 1052 | */ |
1053 | zfcp_rec_dbf_event_thread(4, adapter, 1); | ||
1087 | down_interruptible(&adapter->erp_ready_sem); | 1054 | down_interruptible(&adapter->erp_ready_sem); |
1088 | debug_text_event(adapter->erp_dbf, 5, "a_th_woken"); | 1055 | zfcp_rec_dbf_event_thread(5, adapter, 1); |
1089 | } | 1056 | } |
1090 | 1057 | ||
1091 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); | 1058 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); |
1092 | debug_text_event(adapter->erp_dbf, 5, "a_th_stop"); | ||
1093 | wake_up(&adapter->erp_thread_wqh); | 1059 | wake_up(&adapter->erp_thread_wqh); |
1094 | 1060 | ||
1095 | return 0; | 1061 | return 0; |
@@ -1125,7 +1091,6 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1125 | /* dequeue dismissed action and leave, if required */ | 1091 | /* dequeue dismissed action and leave, if required */ |
1126 | retval = zfcp_erp_strategy_check_action(erp_action, retval); | 1092 | retval = zfcp_erp_strategy_check_action(erp_action, retval); |
1127 | if (retval == ZFCP_ERP_DISMISSED) { | 1093 | if (retval == ZFCP_ERP_DISMISSED) { |
1128 | debug_text_event(adapter->erp_dbf, 4, "a_st_dis1"); | ||
1129 | goto unlock; | 1094 | goto unlock; |
1130 | } | 1095 | } |
1131 | 1096 | ||
@@ -1176,20 +1141,17 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1176 | element was timed out. | 1141 | element was timed out. |
1177 | */ | 1142 | */ |
1178 | if (adapter->erp_total_count == adapter->erp_low_mem_count) { | 1143 | if (adapter->erp_total_count == adapter->erp_low_mem_count) { |
1179 | debug_text_event(adapter->erp_dbf, 3, "a_st_lowmem"); | ||
1180 | ZFCP_LOG_NORMAL("error: no mempool elements available, " | 1144 | ZFCP_LOG_NORMAL("error: no mempool elements available, " |
1181 | "restarting I/O on adapter %s " | 1145 | "restarting I/O on adapter %s " |
1182 | "to free mempool\n", | 1146 | "to free mempool\n", |
1183 | zfcp_get_busid_by_adapter(adapter)); | 1147 | zfcp_get_busid_by_adapter(adapter)); |
1184 | zfcp_erp_adapter_reopen_internal(adapter, 0); | 1148 | zfcp_erp_adapter_reopen_internal(adapter, 0, 66, NULL); |
1185 | } else { | 1149 | } else { |
1186 | debug_text_event(adapter->erp_dbf, 2, "a_st_memw"); | ||
1187 | retval = zfcp_erp_strategy_memwait(erp_action); | 1150 | retval = zfcp_erp_strategy_memwait(erp_action); |
1188 | } | 1151 | } |
1189 | goto unlock; | 1152 | goto unlock; |
1190 | case ZFCP_ERP_CONTINUES: | 1153 | case ZFCP_ERP_CONTINUES: |
1191 | /* leave since this action runs asynchronously */ | 1154 | /* leave since this action runs asynchronously */ |
1192 | debug_text_event(adapter->erp_dbf, 6, "a_st_cont"); | ||
1193 | if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { | 1155 | if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { |
1194 | --adapter->erp_low_mem_count; | 1156 | --adapter->erp_low_mem_count; |
1195 | erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; | 1157 | erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; |
@@ -1218,7 +1180,6 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1218 | * action is repeated in order to process state change | 1180 | * action is repeated in order to process state change |
1219 | */ | 1181 | */ |
1220 | if (retval == ZFCP_ERP_EXIT) { | 1182 | if (retval == ZFCP_ERP_EXIT) { |
1221 | debug_text_event(adapter->erp_dbf, 2, "a_st_exit"); | ||
1222 | goto unlock; | 1183 | goto unlock; |
1223 | } | 1184 | } |
1224 | 1185 | ||
@@ -1244,8 +1205,6 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1244 | if (retval != ZFCP_ERP_DISMISSED) | 1205 | if (retval != ZFCP_ERP_DISMISSED) |
1245 | zfcp_erp_strategy_check_queues(adapter); | 1206 | zfcp_erp_strategy_check_queues(adapter); |
1246 | 1207 | ||
1247 | debug_text_event(adapter->erp_dbf, 6, "a_st_done"); | ||
1248 | |||
1249 | return retval; | 1208 | return retval; |
1250 | } | 1209 | } |
1251 | 1210 | ||
@@ -1260,17 +1219,12 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1260 | static int | 1219 | static int |
1261 | zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval) | 1220 | zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval) |
1262 | { | 1221 | { |
1263 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
1264 | |||
1265 | zfcp_erp_strategy_check_fsfreq(erp_action); | 1222 | zfcp_erp_strategy_check_fsfreq(erp_action); |
1266 | 1223 | ||
1267 | debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int)); | ||
1268 | if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { | 1224 | if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { |
1269 | debug_text_event(adapter->erp_dbf, 3, "a_stcd_dis"); | ||
1270 | zfcp_erp_action_dequeue(erp_action); | 1225 | zfcp_erp_action_dequeue(erp_action); |
1271 | retval = ZFCP_ERP_DISMISSED; | 1226 | retval = ZFCP_ERP_DISMISSED; |
1272 | } else | 1227 | } |
1273 | debug_text_event(adapter->erp_dbf, 5, "a_stcd_nodis"); | ||
1274 | 1228 | ||
1275 | return retval; | 1229 | return retval; |
1276 | } | 1230 | } |
@@ -1279,7 +1233,6 @@ static int | |||
1279 | zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) | 1233 | zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) |
1280 | { | 1234 | { |
1281 | int retval = ZFCP_ERP_FAILED; | 1235 | int retval = ZFCP_ERP_FAILED; |
1282 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
1283 | 1236 | ||
1284 | /* | 1237 | /* |
1285 | * try to execute/continue action as far as possible, | 1238 | * try to execute/continue action as far as possible, |
@@ -1309,9 +1262,6 @@ zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) | |||
1309 | break; | 1262 | break; |
1310 | 1263 | ||
1311 | default: | 1264 | default: |
1312 | debug_text_exception(adapter->erp_dbf, 1, "a_stda_bug"); | ||
1313 | debug_event(adapter->erp_dbf, 1, &erp_action->action, | ||
1314 | sizeof (int)); | ||
1315 | ZFCP_LOG_NORMAL("bug: unknown erp action requested on " | 1265 | ZFCP_LOG_NORMAL("bug: unknown erp action requested on " |
1316 | "adapter %s (action=%d)\n", | 1266 | "adapter %s (action=%d)\n", |
1317 | zfcp_get_busid_by_adapter(erp_action->adapter), | 1267 | zfcp_get_busid_by_adapter(erp_action->adapter), |
@@ -1333,10 +1283,7 @@ static int | |||
1333 | zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) | 1283 | zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) |
1334 | { | 1284 | { |
1335 | int retval = ZFCP_ERP_CONTINUES; | 1285 | int retval = ZFCP_ERP_CONTINUES; |
1336 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
1337 | 1286 | ||
1338 | debug_text_event(adapter->erp_dbf, 6, "a_mwinit"); | ||
1339 | debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int)); | ||
1340 | init_timer(&erp_action->timer); | 1287 | init_timer(&erp_action->timer); |
1341 | erp_action->timer.function = zfcp_erp_memwait_handler; | 1288 | erp_action->timer.function = zfcp_erp_memwait_handler; |
1342 | erp_action->timer.data = (unsigned long) erp_action; | 1289 | erp_action->timer.data = (unsigned long) erp_action; |
@@ -1353,13 +1300,12 @@ zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) | |||
1353 | * | 1300 | * |
1354 | */ | 1301 | */ |
1355 | void | 1302 | void |
1356 | zfcp_erp_adapter_failed(struct zfcp_adapter *adapter) | 1303 | zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref) |
1357 | { | 1304 | { |
1358 | zfcp_erp_modify_adapter_status(adapter, | 1305 | zfcp_erp_modify_adapter_status(adapter, id, ref, |
1359 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); | 1306 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); |
1360 | ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n", | 1307 | ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n", |
1361 | zfcp_get_busid_by_adapter(adapter)); | 1308 | zfcp_get_busid_by_adapter(adapter)); |
1362 | debug_text_event(adapter->erp_dbf, 2, "a_afail"); | ||
1363 | } | 1309 | } |
1364 | 1310 | ||
1365 | /* | 1311 | /* |
@@ -1369,9 +1315,9 @@ zfcp_erp_adapter_failed(struct zfcp_adapter *adapter) | |||
1369 | * | 1315 | * |
1370 | */ | 1316 | */ |
1371 | void | 1317 | void |
1372 | zfcp_erp_port_failed(struct zfcp_port *port) | 1318 | zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref) |
1373 | { | 1319 | { |
1374 | zfcp_erp_modify_port_status(port, | 1320 | zfcp_erp_modify_port_status(port, id, ref, |
1375 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); | 1321 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); |
1376 | 1322 | ||
1377 | if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) | 1323 | if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) |
@@ -1381,9 +1327,6 @@ zfcp_erp_port_failed(struct zfcp_port *port) | |||
1381 | else | 1327 | else |
1382 | ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n", | 1328 | ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n", |
1383 | zfcp_get_busid_by_port(port), port->wwpn); | 1329 | zfcp_get_busid_by_port(port), port->wwpn); |
1384 | |||
1385 | debug_text_event(port->adapter->erp_dbf, 2, "p_pfail"); | ||
1386 | debug_event(port->adapter->erp_dbf, 2, &port->wwpn, sizeof (wwn_t)); | ||
1387 | } | 1330 | } |
1388 | 1331 | ||
1389 | /* | 1332 | /* |
@@ -1393,17 +1336,14 @@ zfcp_erp_port_failed(struct zfcp_port *port) | |||
1393 | * | 1336 | * |
1394 | */ | 1337 | */ |
1395 | void | 1338 | void |
1396 | zfcp_erp_unit_failed(struct zfcp_unit *unit) | 1339 | zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref) |
1397 | { | 1340 | { |
1398 | zfcp_erp_modify_unit_status(unit, | 1341 | zfcp_erp_modify_unit_status(unit, id, ref, |
1399 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); | 1342 | ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); |
1400 | 1343 | ||
1401 | ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx " | 1344 | ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx " |
1402 | " on adapter %s\n", unit->fcp_lun, | 1345 | " on adapter %s\n", unit->fcp_lun, |
1403 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); | 1346 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); |
1404 | debug_text_event(unit->port->adapter->erp_dbf, 2, "u_ufail"); | ||
1405 | debug_event(unit->port->adapter->erp_dbf, 2, | ||
1406 | &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
1407 | } | 1347 | } |
1408 | 1348 | ||
1409 | /* | 1349 | /* |
@@ -1427,10 +1367,6 @@ zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, int result) | |||
1427 | struct zfcp_port *port = erp_action->port; | 1367 | struct zfcp_port *port = erp_action->port; |
1428 | struct zfcp_unit *unit = erp_action->unit; | 1368 | struct zfcp_unit *unit = erp_action->unit; |
1429 | 1369 | ||
1430 | debug_text_event(adapter->erp_dbf, 5, "a_stct_norm"); | ||
1431 | debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int)); | ||
1432 | debug_event(adapter->erp_dbf, 5, &result, sizeof (int)); | ||
1433 | |||
1434 | switch (erp_action->action) { | 1370 | switch (erp_action->action) { |
1435 | 1371 | ||
1436 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1372 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
@@ -1457,15 +1393,14 @@ zfcp_erp_strategy_statechange(int action, | |||
1457 | struct zfcp_port *port, | 1393 | struct zfcp_port *port, |
1458 | struct zfcp_unit *unit, int retval) | 1394 | struct zfcp_unit *unit, int retval) |
1459 | { | 1395 | { |
1460 | debug_text_event(adapter->erp_dbf, 3, "a_stsc"); | ||
1461 | debug_event(adapter->erp_dbf, 3, &action, sizeof (int)); | ||
1462 | |||
1463 | switch (action) { | 1396 | switch (action) { |
1464 | 1397 | ||
1465 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 1398 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
1466 | if (zfcp_erp_strategy_statechange_detected(&adapter->status, | 1399 | if (zfcp_erp_strategy_statechange_detected(&adapter->status, |
1467 | status)) { | 1400 | status)) { |
1468 | zfcp_erp_adapter_reopen_internal(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); | 1401 | zfcp_erp_adapter_reopen_internal(adapter, |
1402 | ZFCP_STATUS_COMMON_ERP_FAILED, | ||
1403 | 67, NULL); | ||
1469 | retval = ZFCP_ERP_EXIT; | 1404 | retval = ZFCP_ERP_EXIT; |
1470 | } | 1405 | } |
1471 | break; | 1406 | break; |
@@ -1474,7 +1409,9 @@ zfcp_erp_strategy_statechange(int action, | |||
1474 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 1409 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
1475 | if (zfcp_erp_strategy_statechange_detected(&port->status, | 1410 | if (zfcp_erp_strategy_statechange_detected(&port->status, |
1476 | status)) { | 1411 | status)) { |
1477 | zfcp_erp_port_reopen_internal(port, ZFCP_STATUS_COMMON_ERP_FAILED); | 1412 | zfcp_erp_port_reopen_internal(port, |
1413 | ZFCP_STATUS_COMMON_ERP_FAILED, | ||
1414 | 68, NULL); | ||
1478 | retval = ZFCP_ERP_EXIT; | 1415 | retval = ZFCP_ERP_EXIT; |
1479 | } | 1416 | } |
1480 | break; | 1417 | break; |
@@ -1482,7 +1419,9 @@ zfcp_erp_strategy_statechange(int action, | |||
1482 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1419 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
1483 | if (zfcp_erp_strategy_statechange_detected(&unit->status, | 1420 | if (zfcp_erp_strategy_statechange_detected(&unit->status, |
1484 | status)) { | 1421 | status)) { |
1485 | zfcp_erp_unit_reopen_internal(unit, ZFCP_STATUS_COMMON_ERP_FAILED); | 1422 | zfcp_erp_unit_reopen_internal(unit, |
1423 | ZFCP_STATUS_COMMON_ERP_FAILED, | ||
1424 | 69, NULL); | ||
1486 | retval = ZFCP_ERP_EXIT; | 1425 | retval = ZFCP_ERP_EXIT; |
1487 | } | 1426 | } |
1488 | break; | 1427 | break; |
@@ -1506,10 +1445,6 @@ zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status) | |||
1506 | static int | 1445 | static int |
1507 | zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) | 1446 | zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) |
1508 | { | 1447 | { |
1509 | debug_text_event(unit->port->adapter->erp_dbf, 5, "u_stct"); | ||
1510 | debug_event(unit->port->adapter->erp_dbf, 5, &unit->fcp_lun, | ||
1511 | sizeof (fcp_lun_t)); | ||
1512 | |||
1513 | switch (result) { | 1448 | switch (result) { |
1514 | case ZFCP_ERP_SUCCEEDED : | 1449 | case ZFCP_ERP_SUCCEEDED : |
1515 | atomic_set(&unit->erp_counter, 0); | 1450 | atomic_set(&unit->erp_counter, 0); |
@@ -1518,7 +1453,7 @@ zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) | |||
1518 | case ZFCP_ERP_FAILED : | 1453 | case ZFCP_ERP_FAILED : |
1519 | atomic_inc(&unit->erp_counter); | 1454 | atomic_inc(&unit->erp_counter); |
1520 | if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) | 1455 | if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) |
1521 | zfcp_erp_unit_failed(unit); | 1456 | zfcp_erp_unit_failed(unit, 21, NULL); |
1522 | break; | 1457 | break; |
1523 | case ZFCP_ERP_EXIT : | 1458 | case ZFCP_ERP_EXIT : |
1524 | /* nothing */ | 1459 | /* nothing */ |
@@ -1536,9 +1471,6 @@ zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) | |||
1536 | static int | 1471 | static int |
1537 | zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) | 1472 | zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) |
1538 | { | 1473 | { |
1539 | debug_text_event(port->adapter->erp_dbf, 5, "p_stct"); | ||
1540 | debug_event(port->adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
1541 | |||
1542 | switch (result) { | 1474 | switch (result) { |
1543 | case ZFCP_ERP_SUCCEEDED : | 1475 | case ZFCP_ERP_SUCCEEDED : |
1544 | atomic_set(&port->erp_counter, 0); | 1476 | atomic_set(&port->erp_counter, 0); |
@@ -1547,7 +1479,7 @@ zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) | |||
1547 | case ZFCP_ERP_FAILED : | 1479 | case ZFCP_ERP_FAILED : |
1548 | atomic_inc(&port->erp_counter); | 1480 | atomic_inc(&port->erp_counter); |
1549 | if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) | 1481 | if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) |
1550 | zfcp_erp_port_failed(port); | 1482 | zfcp_erp_port_failed(port, 22, NULL); |
1551 | break; | 1483 | break; |
1552 | case ZFCP_ERP_EXIT : | 1484 | case ZFCP_ERP_EXIT : |
1553 | /* nothing */ | 1485 | /* nothing */ |
@@ -1565,8 +1497,6 @@ zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) | |||
1565 | static int | 1497 | static int |
1566 | zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result) | 1498 | zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result) |
1567 | { | 1499 | { |
1568 | debug_text_event(adapter->erp_dbf, 5, "a_stct"); | ||
1569 | |||
1570 | switch (result) { | 1500 | switch (result) { |
1571 | case ZFCP_ERP_SUCCEEDED : | 1501 | case ZFCP_ERP_SUCCEEDED : |
1572 | atomic_set(&adapter->erp_counter, 0); | 1502 | atomic_set(&adapter->erp_counter, 0); |
@@ -1575,7 +1505,7 @@ zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result) | |||
1575 | case ZFCP_ERP_FAILED : | 1505 | case ZFCP_ERP_FAILED : |
1576 | atomic_inc(&adapter->erp_counter); | 1506 | atomic_inc(&adapter->erp_counter); |
1577 | if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) | 1507 | if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) |
1578 | zfcp_erp_adapter_failed(adapter); | 1508 | zfcp_erp_adapter_failed(adapter, 23, NULL); |
1579 | break; | 1509 | break; |
1580 | case ZFCP_ERP_EXIT : | 1510 | case ZFCP_ERP_EXIT : |
1581 | /* nothing */ | 1511 | /* nothing */ |
@@ -1658,37 +1588,34 @@ zfcp_erp_strategy_followup_actions(int action, | |||
1658 | struct zfcp_port *port, | 1588 | struct zfcp_port *port, |
1659 | struct zfcp_unit *unit, int status) | 1589 | struct zfcp_unit *unit, int status) |
1660 | { | 1590 | { |
1661 | debug_text_event(adapter->erp_dbf, 5, "a_stfol"); | ||
1662 | debug_event(adapter->erp_dbf, 5, &action, sizeof (int)); | ||
1663 | |||
1664 | /* initiate follow-up actions depending on success of finished action */ | 1591 | /* initiate follow-up actions depending on success of finished action */ |
1665 | switch (action) { | 1592 | switch (action) { |
1666 | 1593 | ||
1667 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 1594 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
1668 | if (status == ZFCP_ERP_SUCCEEDED) | 1595 | if (status == ZFCP_ERP_SUCCEEDED) |
1669 | zfcp_erp_port_reopen_all_internal(adapter, 0); | 1596 | zfcp_erp_port_reopen_all_internal(adapter, 0, 70, NULL); |
1670 | else | 1597 | else |
1671 | zfcp_erp_adapter_reopen_internal(adapter, 0); | 1598 | zfcp_erp_adapter_reopen_internal(adapter, 0, 71, NULL); |
1672 | break; | 1599 | break; |
1673 | 1600 | ||
1674 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 1601 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
1675 | if (status == ZFCP_ERP_SUCCEEDED) | 1602 | if (status == ZFCP_ERP_SUCCEEDED) |
1676 | zfcp_erp_port_reopen_internal(port, 0); | 1603 | zfcp_erp_port_reopen_internal(port, 0, 72, NULL); |
1677 | else | 1604 | else |
1678 | zfcp_erp_adapter_reopen_internal(adapter, 0); | 1605 | zfcp_erp_adapter_reopen_internal(adapter, 0, 73, NULL); |
1679 | break; | 1606 | break; |
1680 | 1607 | ||
1681 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 1608 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
1682 | if (status == ZFCP_ERP_SUCCEEDED) | 1609 | if (status == ZFCP_ERP_SUCCEEDED) |
1683 | zfcp_erp_unit_reopen_all_internal(port, 0); | 1610 | zfcp_erp_unit_reopen_all_internal(port, 0, 74, NULL); |
1684 | else | 1611 | else |
1685 | zfcp_erp_port_forced_reopen_internal(port, 0); | 1612 | zfcp_erp_port_forced_reopen_internal(port, 0, 75, NULL); |
1686 | break; | 1613 | break; |
1687 | 1614 | ||
1688 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1615 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
1689 | /* Nothing to do if status == ZFCP_ERP_SUCCEEDED */ | 1616 | /* Nothing to do if status == ZFCP_ERP_SUCCEEDED */ |
1690 | if (status != ZFCP_ERP_SUCCEEDED) | 1617 | if (status != ZFCP_ERP_SUCCEEDED) |
1691 | zfcp_erp_port_reopen_internal(unit->port, 0); | 1618 | zfcp_erp_port_reopen_internal(unit->port, 0, 76, NULL); |
1692 | break; | 1619 | break; |
1693 | } | 1620 | } |
1694 | 1621 | ||
@@ -1704,12 +1631,10 @@ zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter) | |||
1704 | read_lock(&adapter->erp_lock); | 1631 | read_lock(&adapter->erp_lock); |
1705 | if (list_empty(&adapter->erp_ready_head) && | 1632 | if (list_empty(&adapter->erp_ready_head) && |
1706 | list_empty(&adapter->erp_running_head)) { | 1633 | list_empty(&adapter->erp_running_head)) { |
1707 | debug_text_event(adapter->erp_dbf, 4, "a_cq_wake"); | ||
1708 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, | 1634 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, |
1709 | &adapter->status); | 1635 | &adapter->status); |
1710 | wake_up(&adapter->erp_done_wqh); | 1636 | wake_up(&adapter->erp_done_wqh); |
1711 | } else | 1637 | } |
1712 | debug_text_event(adapter->erp_dbf, 5, "a_cq_notempty"); | ||
1713 | read_unlock(&adapter->erp_lock); | 1638 | read_unlock(&adapter->erp_lock); |
1714 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 1639 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
1715 | 1640 | ||
@@ -1733,29 +1658,27 @@ zfcp_erp_wait(struct zfcp_adapter *adapter) | |||
1733 | return retval; | 1658 | return retval; |
1734 | } | 1659 | } |
1735 | 1660 | ||
1736 | void | 1661 | void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id, |
1737 | zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, | 1662 | void *ref, u32 mask, int set_or_clear) |
1738 | u32 mask, int set_or_clear) | ||
1739 | { | 1663 | { |
1740 | struct zfcp_port *port; | 1664 | struct zfcp_port *port; |
1741 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1665 | u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS; |
1742 | 1666 | ||
1743 | if (set_or_clear == ZFCP_SET) { | 1667 | if (set_or_clear == ZFCP_SET) { |
1744 | atomic_set_mask(mask, &adapter->status); | 1668 | changed = atomic_test_and_set_mask(mask, &adapter->status); |
1745 | debug_text_event(adapter->erp_dbf, 3, "a_mod_as_s"); | ||
1746 | } else { | 1669 | } else { |
1747 | atomic_clear_mask(mask, &adapter->status); | 1670 | changed = atomic_test_and_clear_mask(mask, &adapter->status); |
1748 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) | 1671 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) |
1749 | atomic_set(&adapter->erp_counter, 0); | 1672 | atomic_set(&adapter->erp_counter, 0); |
1750 | debug_text_event(adapter->erp_dbf, 3, "a_mod_as_c"); | ||
1751 | } | 1673 | } |
1752 | debug_event(adapter->erp_dbf, 3, &mask, sizeof (u32)); | 1674 | if (changed) |
1675 | zfcp_rec_dbf_event_adapter(id, ref, adapter); | ||
1753 | 1676 | ||
1754 | /* Deal with all underlying devices, only pass common_mask */ | 1677 | /* Deal with all underlying devices, only pass common_mask */ |
1755 | if (common_mask) | 1678 | if (common_mask) |
1756 | list_for_each_entry(port, &adapter->port_list_head, list) | 1679 | list_for_each_entry(port, &adapter->port_list_head, list) |
1757 | zfcp_erp_modify_port_status(port, common_mask, | 1680 | zfcp_erp_modify_port_status(port, id, ref, common_mask, |
1758 | set_or_clear); | 1681 | set_or_clear); |
1759 | } | 1682 | } |
1760 | 1683 | ||
1761 | /* | 1684 | /* |
@@ -1764,29 +1687,27 @@ zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, | |||
1764 | * purpose: sets the port and all underlying devices to ERP_FAILED | 1687 | * purpose: sets the port and all underlying devices to ERP_FAILED |
1765 | * | 1688 | * |
1766 | */ | 1689 | */ |
1767 | void | 1690 | void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref, |
1768 | zfcp_erp_modify_port_status(struct zfcp_port *port, u32 mask, int set_or_clear) | 1691 | u32 mask, int set_or_clear) |
1769 | { | 1692 | { |
1770 | struct zfcp_unit *unit; | 1693 | struct zfcp_unit *unit; |
1771 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1694 | u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS; |
1772 | 1695 | ||
1773 | if (set_or_clear == ZFCP_SET) { | 1696 | if (set_or_clear == ZFCP_SET) { |
1774 | atomic_set_mask(mask, &port->status); | 1697 | changed = atomic_test_and_set_mask(mask, &port->status); |
1775 | debug_text_event(port->adapter->erp_dbf, 3, "p_mod_ps_s"); | ||
1776 | } else { | 1698 | } else { |
1777 | atomic_clear_mask(mask, &port->status); | 1699 | changed = atomic_test_and_clear_mask(mask, &port->status); |
1778 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) | 1700 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) |
1779 | atomic_set(&port->erp_counter, 0); | 1701 | atomic_set(&port->erp_counter, 0); |
1780 | debug_text_event(port->adapter->erp_dbf, 3, "p_mod_ps_c"); | ||
1781 | } | 1702 | } |
1782 | debug_event(port->adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t)); | 1703 | if (changed) |
1783 | debug_event(port->adapter->erp_dbf, 3, &mask, sizeof (u32)); | 1704 | zfcp_rec_dbf_event_port(id, ref, port); |
1784 | 1705 | ||
1785 | /* Modify status of all underlying devices, only pass common mask */ | 1706 | /* Modify status of all underlying devices, only pass common mask */ |
1786 | if (common_mask) | 1707 | if (common_mask) |
1787 | list_for_each_entry(unit, &port->unit_list_head, list) | 1708 | list_for_each_entry(unit, &port->unit_list_head, list) |
1788 | zfcp_erp_modify_unit_status(unit, common_mask, | 1709 | zfcp_erp_modify_unit_status(unit, id, ref, common_mask, |
1789 | set_or_clear); | 1710 | set_or_clear); |
1790 | } | 1711 | } |
1791 | 1712 | ||
1792 | /* | 1713 | /* |
@@ -1795,22 +1716,21 @@ zfcp_erp_modify_port_status(struct zfcp_port *port, u32 mask, int set_or_clear) | |||
1795 | * purpose: sets the unit to ERP_FAILED | 1716 | * purpose: sets the unit to ERP_FAILED |
1796 | * | 1717 | * |
1797 | */ | 1718 | */ |
1798 | void | 1719 | void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref, |
1799 | zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear) | 1720 | u32 mask, int set_or_clear) |
1800 | { | 1721 | { |
1722 | u32 changed; | ||
1723 | |||
1801 | if (set_or_clear == ZFCP_SET) { | 1724 | if (set_or_clear == ZFCP_SET) { |
1802 | atomic_set_mask(mask, &unit->status); | 1725 | changed = atomic_test_and_set_mask(mask, &unit->status); |
1803 | debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_s"); | ||
1804 | } else { | 1726 | } else { |
1805 | atomic_clear_mask(mask, &unit->status); | 1727 | changed = atomic_test_and_clear_mask(mask, &unit->status); |
1806 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { | 1728 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { |
1807 | atomic_set(&unit->erp_counter, 0); | 1729 | atomic_set(&unit->erp_counter, 0); |
1808 | } | 1730 | } |
1809 | debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_c"); | ||
1810 | } | 1731 | } |
1811 | debug_event(unit->port->adapter->erp_dbf, 3, &unit->fcp_lun, | 1732 | if (changed) |
1812 | sizeof (fcp_lun_t)); | 1733 | zfcp_rec_dbf_event_unit(id, ref, unit); |
1813 | debug_event(unit->port->adapter->erp_dbf, 3, &mask, sizeof (u32)); | ||
1814 | } | 1734 | } |
1815 | 1735 | ||
1816 | /* | 1736 | /* |
@@ -1822,30 +1742,32 @@ zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear) | |||
1822 | * returns: 0 - initiated action successfully | 1742 | * returns: 0 - initiated action successfully |
1823 | * <0 - failed to initiate action | 1743 | * <0 - failed to initiate action |
1824 | */ | 1744 | */ |
1825 | int | 1745 | int zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask, |
1826 | zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask) | 1746 | u8 id, void *ref) |
1827 | { | 1747 | { |
1828 | int retval; | 1748 | int retval; |
1829 | unsigned long flags; | 1749 | unsigned long flags; |
1830 | 1750 | ||
1831 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 1751 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
1832 | write_lock(&adapter->erp_lock); | 1752 | write_lock(&adapter->erp_lock); |
1833 | retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask); | 1753 | retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask, id, |
1754 | ref); | ||
1834 | write_unlock(&adapter->erp_lock); | 1755 | write_unlock(&adapter->erp_lock); |
1835 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 1756 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
1836 | 1757 | ||
1837 | return retval; | 1758 | return retval; |
1838 | } | 1759 | } |
1839 | 1760 | ||
1840 | static int | 1761 | static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, |
1841 | zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, int clear_mask) | 1762 | int clear_mask, u8 id, void *ref) |
1842 | { | 1763 | { |
1843 | int retval = 0; | 1764 | int retval = 0; |
1844 | struct zfcp_port *port; | 1765 | struct zfcp_port *port; |
1845 | 1766 | ||
1846 | list_for_each_entry(port, &adapter->port_list_head, list) | 1767 | list_for_each_entry(port, &adapter->port_list_head, list) |
1847 | if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) | 1768 | if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) |
1848 | zfcp_erp_port_reopen_internal(port, clear_mask); | 1769 | zfcp_erp_port_reopen_internal(port, clear_mask, id, |
1770 | ref); | ||
1849 | 1771 | ||
1850 | return retval; | 1772 | return retval; |
1851 | } | 1773 | } |
@@ -1857,14 +1779,14 @@ zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, int clear_mask) | |||
1857 | * | 1779 | * |
1858 | * returns: FIXME | 1780 | * returns: FIXME |
1859 | */ | 1781 | */ |
1860 | static int | 1782 | static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port, |
1861 | zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port, int clear_mask) | 1783 | int clear_mask, u8 id, void *ref) |
1862 | { | 1784 | { |
1863 | int retval = 0; | 1785 | int retval = 0; |
1864 | struct zfcp_unit *unit; | 1786 | struct zfcp_unit *unit; |
1865 | 1787 | ||
1866 | list_for_each_entry(unit, &port->unit_list_head, list) | 1788 | list_for_each_entry(unit, &port->unit_list_head, list) |
1867 | zfcp_erp_unit_reopen_internal(unit, clear_mask); | 1789 | zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref); |
1868 | 1790 | ||
1869 | return retval; | 1791 | return retval; |
1870 | } | 1792 | } |
@@ -1892,10 +1814,6 @@ zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action) | |||
1892 | else | 1814 | else |
1893 | retval = zfcp_erp_adapter_strategy_open(erp_action); | 1815 | retval = zfcp_erp_adapter_strategy_open(erp_action); |
1894 | 1816 | ||
1895 | debug_text_event(adapter->erp_dbf, 3, "a_ast/ret"); | ||
1896 | debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int)); | ||
1897 | debug_event(adapter->erp_dbf, 3, &retval, sizeof (int)); | ||
1898 | |||
1899 | if (retval == ZFCP_ERP_FAILED) { | 1817 | if (retval == ZFCP_ERP_FAILED) { |
1900 | ZFCP_LOG_INFO("Waiting to allow the adapter %s " | 1818 | ZFCP_LOG_INFO("Waiting to allow the adapter %s " |
1901 | "to recover itself\n", | 1819 | "to recover itself\n", |
@@ -2021,7 +1939,6 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) | |||
2021 | zfcp_get_busid_by_adapter(adapter)); | 1939 | zfcp_get_busid_by_adapter(adapter)); |
2022 | goto failed_qdio_establish; | 1940 | goto failed_qdio_establish; |
2023 | } | 1941 | } |
2024 | debug_text_event(adapter->erp_dbf, 3, "qdio_est"); | ||
2025 | 1942 | ||
2026 | if (qdio_activate(adapter->ccw_device, 0) != 0) { | 1943 | if (qdio_activate(adapter->ccw_device, 0) != 0) { |
2027 | ZFCP_LOG_INFO("error: activation of QDIO queues failed " | 1944 | ZFCP_LOG_INFO("error: activation of QDIO queues failed " |
@@ -2029,7 +1946,6 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) | |||
2029 | zfcp_get_busid_by_adapter(adapter)); | 1946 | zfcp_get_busid_by_adapter(adapter)); |
2030 | goto failed_qdio_activate; | 1947 | goto failed_qdio_activate; |
2031 | } | 1948 | } |
2032 | debug_text_event(adapter->erp_dbf, 3, "qdio_act"); | ||
2033 | 1949 | ||
2034 | /* | 1950 | /* |
2035 | * put buffers into response queue, | 1951 | * put buffers into response queue, |
@@ -2077,11 +1993,9 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) | |||
2077 | /* NOP */ | 1993 | /* NOP */ |
2078 | 1994 | ||
2079 | failed_qdio_activate: | 1995 | failed_qdio_activate: |
2080 | debug_text_event(adapter->erp_dbf, 3, "qdio_down1a"); | ||
2081 | while (qdio_shutdown(adapter->ccw_device, | 1996 | while (qdio_shutdown(adapter->ccw_device, |
2082 | QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) | 1997 | QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) |
2083 | ssleep(1); | 1998 | ssleep(1); |
2084 | debug_text_event(adapter->erp_dbf, 3, "qdio_down1b"); | ||
2085 | 1999 | ||
2086 | failed_qdio_establish: | 2000 | failed_qdio_establish: |
2087 | failed_sanity: | 2001 | failed_sanity: |
@@ -2127,14 +2041,12 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) | |||
2127 | write_unlock_irq(&adapter->erp_lock); | 2041 | write_unlock_irq(&adapter->erp_lock); |
2128 | if (zfcp_fsf_exchange_config_data(erp_action)) { | 2042 | if (zfcp_fsf_exchange_config_data(erp_action)) { |
2129 | retval = ZFCP_ERP_FAILED; | 2043 | retval = ZFCP_ERP_FAILED; |
2130 | debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf"); | ||
2131 | ZFCP_LOG_INFO("error: initiation of exchange of " | 2044 | ZFCP_LOG_INFO("error: initiation of exchange of " |
2132 | "configuration data failed for " | 2045 | "configuration data failed for " |
2133 | "adapter %s\n", | 2046 | "adapter %s\n", |
2134 | zfcp_get_busid_by_adapter(adapter)); | 2047 | zfcp_get_busid_by_adapter(adapter)); |
2135 | break; | 2048 | break; |
2136 | } | 2049 | } |
2137 | debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok"); | ||
2138 | ZFCP_LOG_DEBUG("Xchange underway\n"); | 2050 | ZFCP_LOG_DEBUG("Xchange underway\n"); |
2139 | 2051 | ||
2140 | /* | 2052 | /* |
@@ -2150,7 +2062,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) | |||
2150 | * _must_ be the one belonging to the 'exchange config | 2062 | * _must_ be the one belonging to the 'exchange config |
2151 | * data' request. | 2063 | * data' request. |
2152 | */ | 2064 | */ |
2065 | zfcp_rec_dbf_event_thread(6, adapter, 1); | ||
2153 | down(&adapter->erp_ready_sem); | 2066 | down(&adapter->erp_ready_sem); |
2067 | zfcp_rec_dbf_event_thread(7, adapter, 1); | ||
2154 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { | 2068 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { |
2155 | ZFCP_LOG_INFO("error: exchange of configuration data " | 2069 | ZFCP_LOG_INFO("error: exchange of configuration data " |
2156 | "for adapter %s timed out\n", | 2070 | "for adapter %s timed out\n", |
@@ -2198,16 +2112,15 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) | |||
2198 | 2112 | ||
2199 | ret = zfcp_fsf_exchange_port_data(erp_action); | 2113 | ret = zfcp_fsf_exchange_port_data(erp_action); |
2200 | if (ret == -EOPNOTSUPP) { | 2114 | if (ret == -EOPNOTSUPP) { |
2201 | debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); | ||
2202 | return ZFCP_ERP_SUCCEEDED; | 2115 | return ZFCP_ERP_SUCCEEDED; |
2203 | } else if (ret) { | 2116 | } else if (ret) { |
2204 | debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); | ||
2205 | return ZFCP_ERP_FAILED; | 2117 | return ZFCP_ERP_FAILED; |
2206 | } | 2118 | } |
2207 | debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); | ||
2208 | 2119 | ||
2209 | ret = ZFCP_ERP_SUCCEEDED; | 2120 | ret = ZFCP_ERP_SUCCEEDED; |
2121 | zfcp_rec_dbf_event_thread(8, adapter, 1); | ||
2210 | down(&adapter->erp_ready_sem); | 2122 | down(&adapter->erp_ready_sem); |
2123 | zfcp_rec_dbf_event_thread(9, adapter, 1); | ||
2211 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { | 2124 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { |
2212 | ZFCP_LOG_INFO("error: exchange port data timed out (adapter " | 2125 | ZFCP_LOG_INFO("error: exchange port data timed out (adapter " |
2213 | "%s)\n", zfcp_get_busid_by_adapter(adapter)); | 2126 | "%s)\n", zfcp_get_busid_by_adapter(adapter)); |
@@ -2261,7 +2174,6 @@ zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) | |||
2261 | { | 2174 | { |
2262 | int retval = ZFCP_ERP_FAILED; | 2175 | int retval = ZFCP_ERP_FAILED; |
2263 | struct zfcp_port *port = erp_action->port; | 2176 | struct zfcp_port *port = erp_action->port; |
2264 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2265 | 2177 | ||
2266 | switch (erp_action->step) { | 2178 | switch (erp_action->step) { |
2267 | 2179 | ||
@@ -2298,11 +2210,6 @@ zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) | |||
2298 | break; | 2210 | break; |
2299 | } | 2211 | } |
2300 | 2212 | ||
2301 | debug_text_event(adapter->erp_dbf, 3, "p_pfst/ret"); | ||
2302 | debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t)); | ||
2303 | debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int)); | ||
2304 | debug_event(adapter->erp_dbf, 3, &retval, sizeof (int)); | ||
2305 | |||
2306 | return retval; | 2213 | return retval; |
2307 | } | 2214 | } |
2308 | 2215 | ||
@@ -2320,7 +2227,6 @@ zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) | |||
2320 | { | 2227 | { |
2321 | int retval = ZFCP_ERP_FAILED; | 2228 | int retval = ZFCP_ERP_FAILED; |
2322 | struct zfcp_port *port = erp_action->port; | 2229 | struct zfcp_port *port = erp_action->port; |
2323 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2324 | 2230 | ||
2325 | switch (erp_action->step) { | 2231 | switch (erp_action->step) { |
2326 | 2232 | ||
@@ -2353,11 +2259,6 @@ zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) | |||
2353 | retval = zfcp_erp_port_strategy_open(erp_action); | 2259 | retval = zfcp_erp_port_strategy_open(erp_action); |
2354 | 2260 | ||
2355 | out: | 2261 | out: |
2356 | debug_text_event(adapter->erp_dbf, 3, "p_pst/ret"); | ||
2357 | debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof (wwn_t)); | ||
2358 | debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int)); | ||
2359 | debug_event(adapter->erp_dbf, 3, &retval, sizeof (int)); | ||
2360 | |||
2361 | return retval; | 2262 | return retval; |
2362 | } | 2263 | } |
2363 | 2264 | ||
@@ -2395,7 +2296,7 @@ zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action) | |||
2395 | port->wwpn, | 2296 | port->wwpn, |
2396 | zfcp_get_busid_by_adapter(adapter), | 2297 | zfcp_get_busid_by_adapter(adapter), |
2397 | adapter->peer_wwpn); | 2298 | adapter->peer_wwpn); |
2398 | zfcp_erp_port_failed(port); | 2299 | zfcp_erp_port_failed(port, 25, NULL); |
2399 | retval = ZFCP_ERP_FAILED; | 2300 | retval = ZFCP_ERP_FAILED; |
2400 | break; | 2301 | break; |
2401 | } | 2302 | } |
@@ -2421,8 +2322,8 @@ zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action) | |||
2421 | /* nameserver port may live again */ | 2322 | /* nameserver port may live again */ |
2422 | atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, | 2323 | atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, |
2423 | &adapter->nameserver_port->status); | 2324 | &adapter->nameserver_port->status); |
2424 | if (zfcp_erp_port_reopen(adapter->nameserver_port, 0) | 2325 | if (zfcp_erp_port_reopen(adapter->nameserver_port, 0, |
2425 | >= 0) { | 2326 | 77, erp_action) >= 0) { |
2426 | erp_action->step = | 2327 | erp_action->step = |
2427 | ZFCP_ERP_STEP_NAMESERVER_OPEN; | 2328 | ZFCP_ERP_STEP_NAMESERVER_OPEN; |
2428 | retval = ZFCP_ERP_CONTINUES; | 2329 | retval = ZFCP_ERP_CONTINUES; |
@@ -2453,7 +2354,7 @@ zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action) | |||
2453 | "for port 0x%016Lx " | 2354 | "for port 0x%016Lx " |
2454 | "(misconfigured WWPN?)\n", | 2355 | "(misconfigured WWPN?)\n", |
2455 | port->wwpn); | 2356 | port->wwpn); |
2456 | zfcp_erp_port_failed(port); | 2357 | zfcp_erp_port_failed(port, 26, NULL); |
2457 | retval = ZFCP_ERP_EXIT; | 2358 | retval = ZFCP_ERP_EXIT; |
2458 | } else { | 2359 | } else { |
2459 | ZFCP_LOG_DEBUG("nameserver look-up failed for " | 2360 | ZFCP_LOG_DEBUG("nameserver look-up failed for " |
@@ -2549,17 +2450,12 @@ zfcp_erp_port_strategy_open_nameserver_wakeup(struct zfcp_erp_action | |||
2549 | read_lock_irqsave(&adapter->erp_lock, flags); | 2450 | read_lock_irqsave(&adapter->erp_lock, flags); |
2550 | list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head, | 2451 | list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head, |
2551 | list) { | 2452 | list) { |
2552 | debug_text_event(adapter->erp_dbf, 4, "p_pstnsw_n"); | ||
2553 | debug_event(adapter->erp_dbf, 4, &erp_action->port->wwpn, | ||
2554 | sizeof (wwn_t)); | ||
2555 | if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) { | 2453 | if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) { |
2556 | debug_text_event(adapter->erp_dbf, 3, "p_pstnsw_w"); | ||
2557 | debug_event(adapter->erp_dbf, 3, | ||
2558 | &erp_action->port->wwpn, sizeof (wwn_t)); | ||
2559 | if (atomic_test_mask( | 2454 | if (atomic_test_mask( |
2560 | ZFCP_STATUS_COMMON_ERP_FAILED, | 2455 | ZFCP_STATUS_COMMON_ERP_FAILED, |
2561 | &adapter->nameserver_port->status)) | 2456 | &adapter->nameserver_port->status)) |
2562 | zfcp_erp_port_failed(erp_action->port); | 2457 | zfcp_erp_port_failed(erp_action->port, 27, |
2458 | NULL); | ||
2563 | zfcp_erp_action_ready(erp_action); | 2459 | zfcp_erp_action_ready(erp_action); |
2564 | } | 2460 | } |
2565 | } | 2461 | } |
@@ -2580,26 +2476,18 @@ static int | |||
2580 | zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action) | 2476 | zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action) |
2581 | { | 2477 | { |
2582 | int retval; | 2478 | int retval; |
2583 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2584 | struct zfcp_port *port = erp_action->port; | ||
2585 | 2479 | ||
2586 | retval = zfcp_fsf_close_physical_port(erp_action); | 2480 | retval = zfcp_fsf_close_physical_port(erp_action); |
2587 | if (retval == -ENOMEM) { | 2481 | if (retval == -ENOMEM) { |
2588 | debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem"); | ||
2589 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2590 | retval = ZFCP_ERP_NOMEM; | 2482 | retval = ZFCP_ERP_NOMEM; |
2591 | goto out; | 2483 | goto out; |
2592 | } | 2484 | } |
2593 | erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING; | 2485 | erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING; |
2594 | if (retval != 0) { | 2486 | if (retval != 0) { |
2595 | debug_text_event(adapter->erp_dbf, 5, "o_pfstc_cpf"); | ||
2596 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2597 | /* could not send 'open', fail */ | 2487 | /* could not send 'open', fail */ |
2598 | retval = ZFCP_ERP_FAILED; | 2488 | retval = ZFCP_ERP_FAILED; |
2599 | goto out; | 2489 | goto out; |
2600 | } | 2490 | } |
2601 | debug_text_event(adapter->erp_dbf, 6, "o_pfstc_cpok"); | ||
2602 | debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t)); | ||
2603 | retval = ZFCP_ERP_CONTINUES; | 2491 | retval = ZFCP_ERP_CONTINUES; |
2604 | out: | 2492 | out: |
2605 | return retval; | 2493 | return retval; |
@@ -2609,10 +2497,6 @@ static int | |||
2609 | zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) | 2497 | zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) |
2610 | { | 2498 | { |
2611 | int retval = 0; | 2499 | int retval = 0; |
2612 | struct zfcp_adapter *adapter = port->adapter; | ||
2613 | |||
2614 | debug_text_event(adapter->erp_dbf, 5, "p_pstclst"); | ||
2615 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2616 | 2500 | ||
2617 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | | 2501 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | |
2618 | ZFCP_STATUS_COMMON_CLOSING | | 2502 | ZFCP_STATUS_COMMON_CLOSING | |
@@ -2636,26 +2520,18 @@ static int | |||
2636 | zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action) | 2520 | zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action) |
2637 | { | 2521 | { |
2638 | int retval; | 2522 | int retval; |
2639 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2640 | struct zfcp_port *port = erp_action->port; | ||
2641 | 2523 | ||
2642 | retval = zfcp_fsf_close_port(erp_action); | 2524 | retval = zfcp_fsf_close_port(erp_action); |
2643 | if (retval == -ENOMEM) { | 2525 | if (retval == -ENOMEM) { |
2644 | debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem"); | ||
2645 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2646 | retval = ZFCP_ERP_NOMEM; | 2526 | retval = ZFCP_ERP_NOMEM; |
2647 | goto out; | 2527 | goto out; |
2648 | } | 2528 | } |
2649 | erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING; | 2529 | erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING; |
2650 | if (retval != 0) { | 2530 | if (retval != 0) { |
2651 | debug_text_event(adapter->erp_dbf, 5, "p_pstc_cpf"); | ||
2652 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2653 | /* could not send 'close', fail */ | 2531 | /* could not send 'close', fail */ |
2654 | retval = ZFCP_ERP_FAILED; | 2532 | retval = ZFCP_ERP_FAILED; |
2655 | goto out; | 2533 | goto out; |
2656 | } | 2534 | } |
2657 | debug_text_event(adapter->erp_dbf, 6, "p_pstc_cpok"); | ||
2658 | debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t)); | ||
2659 | retval = ZFCP_ERP_CONTINUES; | 2535 | retval = ZFCP_ERP_CONTINUES; |
2660 | out: | 2536 | out: |
2661 | return retval; | 2537 | return retval; |
@@ -2673,26 +2549,18 @@ static int | |||
2673 | zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action) | 2549 | zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action) |
2674 | { | 2550 | { |
2675 | int retval; | 2551 | int retval; |
2676 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2677 | struct zfcp_port *port = erp_action->port; | ||
2678 | 2552 | ||
2679 | retval = zfcp_fsf_open_port(erp_action); | 2553 | retval = zfcp_fsf_open_port(erp_action); |
2680 | if (retval == -ENOMEM) { | 2554 | if (retval == -ENOMEM) { |
2681 | debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem"); | ||
2682 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2683 | retval = ZFCP_ERP_NOMEM; | 2555 | retval = ZFCP_ERP_NOMEM; |
2684 | goto out; | 2556 | goto out; |
2685 | } | 2557 | } |
2686 | erp_action->step = ZFCP_ERP_STEP_PORT_OPENING; | 2558 | erp_action->step = ZFCP_ERP_STEP_PORT_OPENING; |
2687 | if (retval != 0) { | 2559 | if (retval != 0) { |
2688 | debug_text_event(adapter->erp_dbf, 5, "p_psto_opf"); | ||
2689 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2690 | /* could not send 'open', fail */ | 2560 | /* could not send 'open', fail */ |
2691 | retval = ZFCP_ERP_FAILED; | 2561 | retval = ZFCP_ERP_FAILED; |
2692 | goto out; | 2562 | goto out; |
2693 | } | 2563 | } |
2694 | debug_text_event(adapter->erp_dbf, 6, "p_psto_opok"); | ||
2695 | debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t)); | ||
2696 | retval = ZFCP_ERP_CONTINUES; | 2564 | retval = ZFCP_ERP_CONTINUES; |
2697 | out: | 2565 | out: |
2698 | return retval; | 2566 | return retval; |
@@ -2710,26 +2578,18 @@ static int | |||
2710 | zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action) | 2578 | zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action) |
2711 | { | 2579 | { |
2712 | int retval; | 2580 | int retval; |
2713 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2714 | struct zfcp_port *port = erp_action->port; | ||
2715 | 2581 | ||
2716 | retval = zfcp_ns_gid_pn_request(erp_action); | 2582 | retval = zfcp_ns_gid_pn_request(erp_action); |
2717 | if (retval == -ENOMEM) { | 2583 | if (retval == -ENOMEM) { |
2718 | debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem"); | ||
2719 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2720 | retval = ZFCP_ERP_NOMEM; | 2584 | retval = ZFCP_ERP_NOMEM; |
2721 | goto out; | 2585 | goto out; |
2722 | } | 2586 | } |
2723 | erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; | 2587 | erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; |
2724 | if (retval != 0) { | 2588 | if (retval != 0) { |
2725 | debug_text_event(adapter->erp_dbf, 5, "p_pstn_ref"); | ||
2726 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
2727 | /* could not send nameserver request, fail */ | 2589 | /* could not send nameserver request, fail */ |
2728 | retval = ZFCP_ERP_FAILED; | 2590 | retval = ZFCP_ERP_FAILED; |
2729 | goto out; | 2591 | goto out; |
2730 | } | 2592 | } |
2731 | debug_text_event(adapter->erp_dbf, 6, "p_pstn_reok"); | ||
2732 | debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof (wwn_t)); | ||
2733 | retval = ZFCP_ERP_CONTINUES; | 2593 | retval = ZFCP_ERP_CONTINUES; |
2734 | out: | 2594 | out: |
2735 | return retval; | 2595 | return retval; |
@@ -2750,7 +2610,6 @@ zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) | |||
2750 | { | 2610 | { |
2751 | int retval = ZFCP_ERP_FAILED; | 2611 | int retval = ZFCP_ERP_FAILED; |
2752 | struct zfcp_unit *unit = erp_action->unit; | 2612 | struct zfcp_unit *unit = erp_action->unit; |
2753 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2754 | 2613 | ||
2755 | switch (erp_action->step) { | 2614 | switch (erp_action->step) { |
2756 | 2615 | ||
@@ -2797,10 +2656,6 @@ zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) | |||
2797 | break; | 2656 | break; |
2798 | } | 2657 | } |
2799 | 2658 | ||
2800 | debug_text_event(adapter->erp_dbf, 3, "u_ust/ret"); | ||
2801 | debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
2802 | debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int)); | ||
2803 | debug_event(adapter->erp_dbf, 3, &retval, sizeof (int)); | ||
2804 | return retval; | 2659 | return retval; |
2805 | } | 2660 | } |
2806 | 2661 | ||
@@ -2808,10 +2663,6 @@ static int | |||
2808 | zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) | 2663 | zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) |
2809 | { | 2664 | { |
2810 | int retval = 0; | 2665 | int retval = 0; |
2811 | struct zfcp_adapter *adapter = unit->port->adapter; | ||
2812 | |||
2813 | debug_text_event(adapter->erp_dbf, 5, "u_ustclst"); | ||
2814 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
2815 | 2666 | ||
2816 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | | 2667 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | |
2817 | ZFCP_STATUS_COMMON_CLOSING | | 2668 | ZFCP_STATUS_COMMON_CLOSING | |
@@ -2835,28 +2686,18 @@ static int | |||
2835 | zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) | 2686 | zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) |
2836 | { | 2687 | { |
2837 | int retval; | 2688 | int retval; |
2838 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2839 | struct zfcp_unit *unit = erp_action->unit; | ||
2840 | 2689 | ||
2841 | retval = zfcp_fsf_close_unit(erp_action); | 2690 | retval = zfcp_fsf_close_unit(erp_action); |
2842 | if (retval == -ENOMEM) { | 2691 | if (retval == -ENOMEM) { |
2843 | debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem"); | ||
2844 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, | ||
2845 | sizeof (fcp_lun_t)); | ||
2846 | retval = ZFCP_ERP_NOMEM; | 2692 | retval = ZFCP_ERP_NOMEM; |
2847 | goto out; | 2693 | goto out; |
2848 | } | 2694 | } |
2849 | erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; | 2695 | erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; |
2850 | if (retval != 0) { | 2696 | if (retval != 0) { |
2851 | debug_text_event(adapter->erp_dbf, 5, "u_ustc_cuf"); | ||
2852 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, | ||
2853 | sizeof (fcp_lun_t)); | ||
2854 | /* could not send 'close', fail */ | 2697 | /* could not send 'close', fail */ |
2855 | retval = ZFCP_ERP_FAILED; | 2698 | retval = ZFCP_ERP_FAILED; |
2856 | goto out; | 2699 | goto out; |
2857 | } | 2700 | } |
2858 | debug_text_event(adapter->erp_dbf, 6, "u_ustc_cuok"); | ||
2859 | debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
2860 | retval = ZFCP_ERP_CONTINUES; | 2701 | retval = ZFCP_ERP_CONTINUES; |
2861 | 2702 | ||
2862 | out: | 2703 | out: |
@@ -2875,28 +2716,18 @@ static int | |||
2875 | zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) | 2716 | zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) |
2876 | { | 2717 | { |
2877 | int retval; | 2718 | int retval; |
2878 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
2879 | struct zfcp_unit *unit = erp_action->unit; | ||
2880 | 2719 | ||
2881 | retval = zfcp_fsf_open_unit(erp_action); | 2720 | retval = zfcp_fsf_open_unit(erp_action); |
2882 | if (retval == -ENOMEM) { | 2721 | if (retval == -ENOMEM) { |
2883 | debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem"); | ||
2884 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, | ||
2885 | sizeof (fcp_lun_t)); | ||
2886 | retval = ZFCP_ERP_NOMEM; | 2722 | retval = ZFCP_ERP_NOMEM; |
2887 | goto out; | 2723 | goto out; |
2888 | } | 2724 | } |
2889 | erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; | 2725 | erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; |
2890 | if (retval != 0) { | 2726 | if (retval != 0) { |
2891 | debug_text_event(adapter->erp_dbf, 5, "u_usto_ouf"); | ||
2892 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, | ||
2893 | sizeof (fcp_lun_t)); | ||
2894 | /* could not send 'open', fail */ | 2727 | /* could not send 'open', fail */ |
2895 | retval = ZFCP_ERP_FAILED; | 2728 | retval = ZFCP_ERP_FAILED; |
2896 | goto out; | 2729 | goto out; |
2897 | } | 2730 | } |
2898 | debug_text_event(adapter->erp_dbf, 6, "u_usto_ouok"); | ||
2899 | debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
2900 | retval = ZFCP_ERP_CONTINUES; | 2731 | retval = ZFCP_ERP_CONTINUES; |
2901 | out: | 2732 | out: |
2902 | return retval; | 2733 | return retval; |
@@ -2918,14 +2749,12 @@ void zfcp_erp_start_timer(struct zfcp_fsf_req *fsf_req) | |||
2918 | * | 2749 | * |
2919 | * returns: | 2750 | * returns: |
2920 | */ | 2751 | */ |
2921 | static int | 2752 | static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, |
2922 | zfcp_erp_action_enqueue(int action, | 2753 | struct zfcp_port *port, |
2923 | struct zfcp_adapter *adapter, | 2754 | struct zfcp_unit *unit, u8 id, void *ref) |
2924 | struct zfcp_port *port, struct zfcp_unit *unit) | ||
2925 | { | 2755 | { |
2926 | int retval = 1; | 2756 | int retval = 1, need = want; |
2927 | struct zfcp_erp_action *erp_action = NULL; | 2757 | struct zfcp_erp_action *erp_action = NULL; |
2928 | int stronger_action = 0; | ||
2929 | u32 status = 0; | 2758 | u32 status = 0; |
2930 | 2759 | ||
2931 | /* | 2760 | /* |
@@ -2944,17 +2773,11 @@ zfcp_erp_action_enqueue(int action, | |||
2944 | &adapter->status)) | 2773 | &adapter->status)) |
2945 | return -EIO; | 2774 | return -EIO; |
2946 | 2775 | ||
2947 | debug_event(adapter->erp_dbf, 4, &action, sizeof (int)); | ||
2948 | /* check whether we really need this */ | 2776 | /* check whether we really need this */ |
2949 | switch (action) { | 2777 | switch (want) { |
2950 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 2778 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
2951 | if (atomic_test_mask | 2779 | if (atomic_test_mask |
2952 | (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) { | 2780 | (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) { |
2953 | debug_text_event(adapter->erp_dbf, 4, "u_actenq_drp"); | ||
2954 | debug_event(adapter->erp_dbf, 4, &port->wwpn, | ||
2955 | sizeof (wwn_t)); | ||
2956 | debug_event(adapter->erp_dbf, 4, &unit->fcp_lun, | ||
2957 | sizeof (fcp_lun_t)); | ||
2958 | goto out; | 2781 | goto out; |
2959 | } | 2782 | } |
2960 | if (!atomic_test_mask | 2783 | if (!atomic_test_mask |
@@ -2964,18 +2787,13 @@ zfcp_erp_action_enqueue(int action, | |||
2964 | goto out; | 2787 | goto out; |
2965 | } | 2788 | } |
2966 | if (!atomic_test_mask | 2789 | if (!atomic_test_mask |
2967 | (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) { | 2790 | (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) |
2968 | stronger_action = ZFCP_ERP_ACTION_REOPEN_PORT; | 2791 | need = ZFCP_ERP_ACTION_REOPEN_PORT; |
2969 | unit = NULL; | ||
2970 | } | ||
2971 | /* fall through !!! */ | 2792 | /* fall through !!! */ |
2972 | 2793 | ||
2973 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 2794 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
2974 | if (atomic_test_mask | 2795 | if (atomic_test_mask |
2975 | (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) { | 2796 | (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) { |
2976 | debug_text_event(adapter->erp_dbf, 4, "p_actenq_drp"); | ||
2977 | debug_event(adapter->erp_dbf, 4, &port->wwpn, | ||
2978 | sizeof (wwn_t)); | ||
2979 | goto out; | 2797 | goto out; |
2980 | } | 2798 | } |
2981 | /* fall through !!! */ | 2799 | /* fall through !!! */ |
@@ -2987,15 +2805,9 @@ zfcp_erp_action_enqueue(int action, | |||
2987 | ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) { | 2805 | ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) { |
2988 | ZFCP_LOG_INFO("dropped erp action %i (port " | 2806 | ZFCP_LOG_INFO("dropped erp action %i (port " |
2989 | "0x%016Lx, action in use: %i)\n", | 2807 | "0x%016Lx, action in use: %i)\n", |
2990 | action, port->wwpn, | 2808 | want, port->wwpn, |
2991 | port->erp_action.action); | 2809 | port->erp_action.action); |
2992 | debug_text_event(adapter->erp_dbf, 4, | 2810 | } |
2993 | "pf_actenq_drp"); | ||
2994 | } else | ||
2995 | debug_text_event(adapter->erp_dbf, 4, | ||
2996 | "pf_actenq_drpcp"); | ||
2997 | debug_event(adapter->erp_dbf, 4, &port->wwpn, | ||
2998 | sizeof (wwn_t)); | ||
2999 | goto out; | 2811 | goto out; |
3000 | } | 2812 | } |
3001 | if (!atomic_test_mask | 2813 | if (!atomic_test_mask |
@@ -3005,46 +2817,36 @@ zfcp_erp_action_enqueue(int action, | |||
3005 | goto out; | 2817 | goto out; |
3006 | } | 2818 | } |
3007 | if (!atomic_test_mask | 2819 | if (!atomic_test_mask |
3008 | (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) { | 2820 | (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) |
3009 | stronger_action = ZFCP_ERP_ACTION_REOPEN_ADAPTER; | 2821 | need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; |
3010 | port = NULL; | ||
3011 | } | ||
3012 | /* fall through !!! */ | 2822 | /* fall through !!! */ |
3013 | 2823 | ||
3014 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 2824 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
3015 | if (atomic_test_mask | 2825 | if (atomic_test_mask |
3016 | (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) { | 2826 | (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) { |
3017 | debug_text_event(adapter->erp_dbf, 4, "a_actenq_drp"); | ||
3018 | goto out; | 2827 | goto out; |
3019 | } | 2828 | } |
3020 | break; | 2829 | break; |
3021 | 2830 | ||
3022 | default: | 2831 | default: |
3023 | debug_text_exception(adapter->erp_dbf, 1, "a_actenq_bug"); | ||
3024 | debug_event(adapter->erp_dbf, 1, &action, sizeof (int)); | ||
3025 | ZFCP_LOG_NORMAL("bug: unknown erp action requested " | 2832 | ZFCP_LOG_NORMAL("bug: unknown erp action requested " |
3026 | "on adapter %s (action=%d)\n", | 2833 | "on adapter %s (action=%d)\n", |
3027 | zfcp_get_busid_by_adapter(adapter), action); | 2834 | zfcp_get_busid_by_adapter(adapter), want); |
3028 | goto out; | 2835 | goto out; |
3029 | } | 2836 | } |
3030 | 2837 | ||
3031 | /* check whether we need something stronger first */ | 2838 | /* check whether we need something stronger first */ |
3032 | if (stronger_action) { | 2839 | if (need) { |
3033 | debug_text_event(adapter->erp_dbf, 4, "a_actenq_str"); | ||
3034 | debug_event(adapter->erp_dbf, 4, &stronger_action, | ||
3035 | sizeof (int)); | ||
3036 | ZFCP_LOG_DEBUG("stronger erp action %d needed before " | 2840 | ZFCP_LOG_DEBUG("stronger erp action %d needed before " |
3037 | "erp action %d on adapter %s\n", | 2841 | "erp action %d on adapter %s\n", |
3038 | stronger_action, action, | 2842 | need, want, zfcp_get_busid_by_adapter(adapter)); |
3039 | zfcp_get_busid_by_adapter(adapter)); | ||
3040 | action = stronger_action; | ||
3041 | } | 2843 | } |
3042 | 2844 | ||
3043 | /* mark adapter to have some error recovery pending */ | 2845 | /* mark adapter to have some error recovery pending */ |
3044 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); | 2846 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); |
3045 | 2847 | ||
3046 | /* setup error recovery action */ | 2848 | /* setup error recovery action */ |
3047 | switch (action) { | 2849 | switch (need) { |
3048 | 2850 | ||
3049 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 2851 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
3050 | zfcp_unit_get(unit); | 2852 | zfcp_unit_get(unit); |
@@ -3077,13 +2879,11 @@ zfcp_erp_action_enqueue(int action, | |||
3077 | break; | 2879 | break; |
3078 | } | 2880 | } |
3079 | 2881 | ||
3080 | debug_text_event(adapter->erp_dbf, 4, "a_actenq"); | ||
3081 | |||
3082 | memset(erp_action, 0, sizeof (struct zfcp_erp_action)); | 2882 | memset(erp_action, 0, sizeof (struct zfcp_erp_action)); |
3083 | erp_action->adapter = adapter; | 2883 | erp_action->adapter = adapter; |
3084 | erp_action->port = port; | 2884 | erp_action->port = port; |
3085 | erp_action->unit = unit; | 2885 | erp_action->unit = unit; |
3086 | erp_action->action = action; | 2886 | erp_action->action = need; |
3087 | erp_action->status = status; | 2887 | erp_action->status = status; |
3088 | 2888 | ||
3089 | ++adapter->erp_total_count; | 2889 | ++adapter->erp_total_count; |
@@ -3091,8 +2891,11 @@ zfcp_erp_action_enqueue(int action, | |||
3091 | /* finally put it into 'ready' queue and kick erp thread */ | 2891 | /* finally put it into 'ready' queue and kick erp thread */ |
3092 | list_add_tail(&erp_action->list, &adapter->erp_ready_head); | 2892 | list_add_tail(&erp_action->list, &adapter->erp_ready_head); |
3093 | up(&adapter->erp_ready_sem); | 2893 | up(&adapter->erp_ready_sem); |
2894 | zfcp_rec_dbf_event_thread(1, adapter, 0); | ||
3094 | retval = 0; | 2895 | retval = 0; |
3095 | out: | 2896 | out: |
2897 | zfcp_rec_dbf_event_trigger(id, ref, want, need, erp_action, | ||
2898 | adapter, port, unit); | ||
3096 | return retval; | 2899 | return retval; |
3097 | } | 2900 | } |
3098 | 2901 | ||
@@ -3108,9 +2911,9 @@ zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) | |||
3108 | erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; | 2911 | erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; |
3109 | } | 2912 | } |
3110 | 2913 | ||
3111 | debug_text_event(adapter->erp_dbf, 4, "a_actdeq"); | ||
3112 | debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof (int)); | ||
3113 | list_del(&erp_action->list); | 2914 | list_del(&erp_action->list); |
2915 | zfcp_rec_dbf_event_action(144, erp_action); | ||
2916 | |||
3114 | switch (erp_action->action) { | 2917 | switch (erp_action->action) { |
3115 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 2918 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
3116 | atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, | 2919 | atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, |
@@ -3215,7 +3018,6 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | |||
3215 | { | 3018 | { |
3216 | struct zfcp_port *port; | 3019 | struct zfcp_port *port; |
3217 | 3020 | ||
3218 | debug_text_event(adapter->erp_dbf, 5, "a_actab"); | ||
3219 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) | 3021 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) |
3220 | zfcp_erp_action_dismiss(&adapter->erp_action); | 3022 | zfcp_erp_action_dismiss(&adapter->erp_action); |
3221 | else | 3023 | else |
@@ -3226,10 +3028,7 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | |||
3226 | static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) | 3028 | static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) |
3227 | { | 3029 | { |
3228 | struct zfcp_unit *unit; | 3030 | struct zfcp_unit *unit; |
3229 | struct zfcp_adapter *adapter = port->adapter; | ||
3230 | 3031 | ||
3231 | debug_text_event(adapter->erp_dbf, 5, "p_actab"); | ||
3232 | debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof (wwn_t)); | ||
3233 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) | 3032 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) |
3234 | zfcp_erp_action_dismiss(&port->erp_action); | 3033 | zfcp_erp_action_dismiss(&port->erp_action); |
3235 | else | 3034 | else |
@@ -3239,92 +3038,60 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) | |||
3239 | 3038 | ||
3240 | static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) | 3039 | static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) |
3241 | { | 3040 | { |
3242 | struct zfcp_adapter *adapter = unit->port->adapter; | ||
3243 | |||
3244 | debug_text_event(adapter->erp_dbf, 5, "u_actab"); | ||
3245 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); | ||
3246 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) | 3041 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) |
3247 | zfcp_erp_action_dismiss(&unit->erp_action); | 3042 | zfcp_erp_action_dismiss(&unit->erp_action); |
3248 | } | 3043 | } |
3249 | 3044 | ||
3250 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) | 3045 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) |
3251 | { | 3046 | { |
3252 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
3253 | |||
3254 | debug_text_event(adapter->erp_dbf, 6, "a_toru"); | ||
3255 | debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int)); | ||
3256 | list_move(&erp_action->list, &erp_action->adapter->erp_running_head); | 3047 | list_move(&erp_action->list, &erp_action->adapter->erp_running_head); |
3048 | zfcp_rec_dbf_event_action(145, erp_action); | ||
3257 | } | 3049 | } |
3258 | 3050 | ||
3259 | static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action) | 3051 | static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action) |
3260 | { | 3052 | { |
3261 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
3262 | |||
3263 | debug_text_event(adapter->erp_dbf, 6, "a_tore"); | ||
3264 | debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof (int)); | ||
3265 | list_move(&erp_action->list, &erp_action->adapter->erp_ready_head); | 3053 | list_move(&erp_action->list, &erp_action->adapter->erp_ready_head); |
3054 | zfcp_rec_dbf_event_action(146, erp_action); | ||
3266 | } | 3055 | } |
3267 | 3056 | ||
3268 | void | 3057 | void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref) |
3269 | zfcp_erp_port_boxed(struct zfcp_port *port) | ||
3270 | { | 3058 | { |
3271 | struct zfcp_adapter *adapter = port->adapter; | ||
3272 | unsigned long flags; | 3059 | unsigned long flags; |
3273 | 3060 | ||
3274 | debug_text_event(adapter->erp_dbf, 3, "p_access_boxed"); | ||
3275 | debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t)); | ||
3276 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 3061 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
3277 | zfcp_erp_modify_port_status(port, | 3062 | zfcp_erp_modify_port_status(port, id, ref, |
3278 | ZFCP_STATUS_COMMON_ACCESS_BOXED, | 3063 | ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); |
3279 | ZFCP_SET); | ||
3280 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 3064 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
3281 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED); | 3065 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); |
3282 | } | 3066 | } |
3283 | 3067 | ||
3284 | void | 3068 | void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref) |
3285 | zfcp_erp_unit_boxed(struct zfcp_unit *unit) | ||
3286 | { | 3069 | { |
3287 | struct zfcp_adapter *adapter = unit->port->adapter; | 3070 | zfcp_erp_modify_unit_status(unit, id, ref, |
3288 | 3071 | ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); | |
3289 | debug_text_event(adapter->erp_dbf, 3, "u_access_boxed"); | 3072 | zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); |
3290 | debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t)); | ||
3291 | zfcp_erp_modify_unit_status(unit, | ||
3292 | ZFCP_STATUS_COMMON_ACCESS_BOXED, | ||
3293 | ZFCP_SET); | ||
3294 | zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED); | ||
3295 | } | 3073 | } |
3296 | 3074 | ||
3297 | void | 3075 | void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref) |
3298 | zfcp_erp_port_access_denied(struct zfcp_port *port) | ||
3299 | { | 3076 | { |
3300 | struct zfcp_adapter *adapter = port->adapter; | ||
3301 | unsigned long flags; | 3077 | unsigned long flags; |
3302 | 3078 | ||
3303 | debug_text_event(adapter->erp_dbf, 3, "p_access_denied"); | ||
3304 | debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t)); | ||
3305 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 3079 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
3306 | zfcp_erp_modify_port_status(port, | 3080 | zfcp_erp_modify_port_status(port, id, ref, |
3307 | ZFCP_STATUS_COMMON_ERP_FAILED | | 3081 | ZFCP_STATUS_COMMON_ERP_FAILED | |
3308 | ZFCP_STATUS_COMMON_ACCESS_DENIED, | 3082 | ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); |
3309 | ZFCP_SET); | ||
3310 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 3083 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
3311 | } | 3084 | } |
3312 | 3085 | ||
3313 | void | 3086 | void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref) |
3314 | zfcp_erp_unit_access_denied(struct zfcp_unit *unit) | ||
3315 | { | 3087 | { |
3316 | struct zfcp_adapter *adapter = unit->port->adapter; | 3088 | zfcp_erp_modify_unit_status(unit, id, ref, |
3317 | 3089 | ZFCP_STATUS_COMMON_ERP_FAILED | | |
3318 | debug_text_event(adapter->erp_dbf, 3, "u_access_denied"); | 3090 | ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); |
3319 | debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t)); | ||
3320 | zfcp_erp_modify_unit_status(unit, | ||
3321 | ZFCP_STATUS_COMMON_ERP_FAILED | | ||
3322 | ZFCP_STATUS_COMMON_ACCESS_DENIED, | ||
3323 | ZFCP_SET); | ||
3324 | } | 3091 | } |
3325 | 3092 | ||
3326 | void | 3093 | void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id, |
3327 | zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter) | 3094 | void *ref) |
3328 | { | 3095 | { |
3329 | struct zfcp_port *port; | 3096 | struct zfcp_port *port; |
3330 | unsigned long flags; | 3097 | unsigned long flags; |
@@ -3332,54 +3099,43 @@ zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter) | |||
3332 | if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) | 3099 | if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) |
3333 | return; | 3100 | return; |
3334 | 3101 | ||
3335 | debug_text_event(adapter->erp_dbf, 3, "a_access_recover"); | ||
3336 | debug_event(adapter->erp_dbf, 3, zfcp_get_busid_by_adapter(adapter), 8); | ||
3337 | |||
3338 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 3102 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
3339 | if (adapter->nameserver_port) | 3103 | if (adapter->nameserver_port) |
3340 | zfcp_erp_port_access_changed(adapter->nameserver_port); | 3104 | zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref); |
3341 | list_for_each_entry(port, &adapter->port_list_head, list) | 3105 | list_for_each_entry(port, &adapter->port_list_head, list) |
3342 | if (port != adapter->nameserver_port) | 3106 | if (port != adapter->nameserver_port) |
3343 | zfcp_erp_port_access_changed(port); | 3107 | zfcp_erp_port_access_changed(port, id, ref); |
3344 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 3108 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
3345 | } | 3109 | } |
3346 | 3110 | ||
3347 | void | 3111 | void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id, void *ref) |
3348 | zfcp_erp_port_access_changed(struct zfcp_port *port) | ||
3349 | { | 3112 | { |
3350 | struct zfcp_adapter *adapter = port->adapter; | 3113 | struct zfcp_adapter *adapter = port->adapter; |
3351 | struct zfcp_unit *unit; | 3114 | struct zfcp_unit *unit; |
3352 | 3115 | ||
3353 | debug_text_event(adapter->erp_dbf, 3, "p_access_recover"); | ||
3354 | debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t)); | ||
3355 | |||
3356 | if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, | 3116 | if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, |
3357 | &port->status) && | 3117 | &port->status) && |
3358 | !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, | 3118 | !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, |
3359 | &port->status)) { | 3119 | &port->status)) { |
3360 | if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) | 3120 | if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) |
3361 | list_for_each_entry(unit, &port->unit_list_head, list) | 3121 | list_for_each_entry(unit, &port->unit_list_head, list) |
3362 | zfcp_erp_unit_access_changed(unit); | 3122 | zfcp_erp_unit_access_changed(unit, id, ref); |
3363 | return; | 3123 | return; |
3364 | } | 3124 | } |
3365 | 3125 | ||
3366 | ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s " | 3126 | ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s " |
3367 | "(due to ACT update)\n", | 3127 | "(due to ACT update)\n", |
3368 | port->wwpn, zfcp_get_busid_by_adapter(adapter)); | 3128 | port->wwpn, zfcp_get_busid_by_adapter(adapter)); |
3369 | if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED) != 0) | 3129 | if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref)) |
3370 | ZFCP_LOG_NORMAL("failed reopen of port" | 3130 | ZFCP_LOG_NORMAL("failed reopen of port" |
3371 | "(adapter %s, wwpn=0x%016Lx)\n", | 3131 | "(adapter %s, wwpn=0x%016Lx)\n", |
3372 | zfcp_get_busid_by_adapter(adapter), port->wwpn); | 3132 | zfcp_get_busid_by_adapter(adapter), port->wwpn); |
3373 | } | 3133 | } |
3374 | 3134 | ||
3375 | void | 3135 | void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id, void *ref) |
3376 | zfcp_erp_unit_access_changed(struct zfcp_unit *unit) | ||
3377 | { | 3136 | { |
3378 | struct zfcp_adapter *adapter = unit->port->adapter; | 3137 | struct zfcp_adapter *adapter = unit->port->adapter; |
3379 | 3138 | ||
3380 | debug_text_event(adapter->erp_dbf, 3, "u_access_recover"); | ||
3381 | debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t)); | ||
3382 | |||
3383 | if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, | 3139 | if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, |
3384 | &unit->status) && | 3140 | &unit->status) && |
3385 | !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, | 3141 | !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, |
@@ -3390,7 +3146,7 @@ zfcp_erp_unit_access_changed(struct zfcp_unit *unit) | |||
3390 | " on adapter %s (due to ACT update)\n", | 3146 | " on adapter %s (due to ACT update)\n", |
3391 | unit->fcp_lun, unit->port->wwpn, | 3147 | unit->fcp_lun, unit->port->wwpn, |
3392 | zfcp_get_busid_by_adapter(adapter)); | 3148 | zfcp_get_busid_by_adapter(adapter)); |
3393 | if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED) != 0) | 3149 | if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref)) |
3394 | ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, " | 3150 | ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, " |
3395 | "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", | 3151 | "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", |
3396 | zfcp_get_busid_by_adapter(adapter), | 3152 | zfcp_get_busid_by_adapter(adapter), |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 06b1079b7f3d..6abf178fda5d 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -131,22 +131,25 @@ extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int); | |||
131 | extern struct fc_function_template zfcp_transport_functions; | 131 | extern struct fc_function_template zfcp_transport_functions; |
132 | 132 | ||
133 | /******************************** ERP ****************************************/ | 133 | /******************************** ERP ****************************************/ |
134 | extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int); | 134 | extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *, |
135 | extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); | 135 | u32, int); |
136 | extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); | 136 | extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *); |
137 | extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); | 137 | extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *); |
138 | 138 | extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *); | |
139 | extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); | 139 | |
140 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int); | 140 | extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32, |
141 | extern int zfcp_erp_port_shutdown(struct zfcp_port *, int); | 141 | int); |
142 | extern int zfcp_erp_port_forced_reopen(struct zfcp_port *, int); | 142 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *); |
143 | extern void zfcp_erp_port_failed(struct zfcp_port *); | 143 | extern int zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *); |
144 | extern int zfcp_erp_port_reopen_all(struct zfcp_adapter *, int); | 144 | extern int zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *); |
145 | 145 | extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *); | |
146 | extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u32, int); | 146 | extern int zfcp_erp_port_reopen_all(struct zfcp_adapter *, int, u8, void *); |
147 | extern int zfcp_erp_unit_reopen(struct zfcp_unit *, int); | 147 | |
148 | extern int zfcp_erp_unit_shutdown(struct zfcp_unit *, int); | 148 | extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32, |
149 | extern void zfcp_erp_unit_failed(struct zfcp_unit *); | 149 | int); |
150 | extern int zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *); | ||
151 | extern int zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *); | ||
152 | extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *); | ||
150 | 153 | ||
151 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); | 154 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); |
152 | extern int zfcp_erp_thread_kill(struct zfcp_adapter *); | 155 | extern int zfcp_erp_thread_kill(struct zfcp_adapter *); |
@@ -155,15 +158,25 @@ extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); | |||
155 | 158 | ||
156 | extern int zfcp_test_link(struct zfcp_port *); | 159 | extern int zfcp_test_link(struct zfcp_port *); |
157 | 160 | ||
158 | extern void zfcp_erp_port_boxed(struct zfcp_port *); | 161 | extern void zfcp_erp_port_boxed(struct zfcp_port *, u8 id, void *ref); |
159 | extern void zfcp_erp_unit_boxed(struct zfcp_unit *); | 162 | extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8 id, void *ref); |
160 | extern void zfcp_erp_port_access_denied(struct zfcp_port *); | 163 | extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8 id, void *ref); |
161 | extern void zfcp_erp_unit_access_denied(struct zfcp_unit *); | 164 | extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8 id, void *ref); |
162 | extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *); | 165 | extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *); |
163 | extern void zfcp_erp_port_access_changed(struct zfcp_port *); | 166 | extern void zfcp_erp_port_access_changed(struct zfcp_port *, u8, void *); |
164 | extern void zfcp_erp_unit_access_changed(struct zfcp_unit *); | 167 | extern void zfcp_erp_unit_access_changed(struct zfcp_unit *, u8, void *); |
165 | 168 | ||
166 | /******************************** AUX ****************************************/ | 169 | /******************************** AUX ****************************************/ |
170 | extern void zfcp_rec_dbf_event_thread(u8 id, struct zfcp_adapter *adapter, | ||
171 | int lock); | ||
172 | extern void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *); | ||
173 | extern void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port); | ||
174 | extern void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit); | ||
175 | extern void zfcp_rec_dbf_event_trigger(u8 id, void *ref, u8 want, u8 need, | ||
176 | void *action, struct zfcp_adapter *, | ||
177 | struct zfcp_port *, struct zfcp_unit *); | ||
178 | extern void zfcp_rec_dbf_event_action(u8 id, struct zfcp_erp_action *); | ||
179 | |||
167 | extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); | 180 | extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); |
168 | extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, | 181 | extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, |
169 | struct fsf_status_read_buffer *); | 182 | struct fsf_status_read_buffer *); |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 0dff05840ee2..7c3f02816e95 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -46,7 +46,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *); | |||
46 | static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); | 46 | static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); |
47 | static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); | 47 | static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); |
48 | static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); | 48 | static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); |
49 | static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, | 49 | static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *, u8, |
50 | struct fsf_link_down_info *); | 50 | struct fsf_link_down_info *); |
51 | static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); | 51 | static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); |
52 | 52 | ||
@@ -284,37 +284,6 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
284 | goto skip_protstatus; | 284 | goto skip_protstatus; |
285 | } | 285 | } |
286 | 286 | ||
287 | /* log additional information provided by FSF (if any) */ | ||
288 | if (likely(qtcb->header.log_length)) { | ||
289 | /* do not trust them ;-) */ | ||
290 | if (unlikely(qtcb->header.log_start > | ||
291 | sizeof(struct fsf_qtcb))) { | ||
292 | ZFCP_LOG_NORMAL | ||
293 | ("bug: ULP (FSF logging) log data starts " | ||
294 | "beyond end of packet header. Ignored. " | ||
295 | "(start=%i, size=%li)\n", | ||
296 | qtcb->header.log_start, | ||
297 | sizeof(struct fsf_qtcb)); | ||
298 | goto forget_log; | ||
299 | } | ||
300 | if (unlikely((size_t) (qtcb->header.log_start + | ||
301 | qtcb->header.log_length) > | ||
302 | sizeof(struct fsf_qtcb))) { | ||
303 | ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends " | ||
304 | "beyond end of packet header. Ignored. " | ||
305 | "(start=%i, length=%i, size=%li)\n", | ||
306 | qtcb->header.log_start, | ||
307 | qtcb->header.log_length, | ||
308 | sizeof(struct fsf_qtcb)); | ||
309 | goto forget_log; | ||
310 | } | ||
311 | ZFCP_LOG_TRACE("ULP log data: \n"); | ||
312 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, | ||
313 | (char *) qtcb + qtcb->header.log_start, | ||
314 | qtcb->header.log_length); | ||
315 | } | ||
316 | forget_log: | ||
317 | |||
318 | /* evaluate FSF Protocol Status */ | 287 | /* evaluate FSF Protocol Status */ |
319 | switch (qtcb->prefix.prot_status) { | 288 | switch (qtcb->prefix.prot_status) { |
320 | 289 | ||
@@ -329,7 +298,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
329 | zfcp_get_busid_by_adapter(adapter), | 298 | zfcp_get_busid_by_adapter(adapter), |
330 | prot_status_qual->version_error.fsf_version, | 299 | prot_status_qual->version_error.fsf_version, |
331 | ZFCP_QTCB_VERSION); | 300 | ZFCP_QTCB_VERSION); |
332 | zfcp_erp_adapter_shutdown(adapter, 0); | 301 | zfcp_erp_adapter_shutdown(adapter, 0, 117, fsf_req); |
333 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 302 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
334 | break; | 303 | break; |
335 | 304 | ||
@@ -340,7 +309,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
340 | qtcb->prefix.req_seq_no, | 309 | qtcb->prefix.req_seq_no, |
341 | zfcp_get_busid_by_adapter(adapter), | 310 | zfcp_get_busid_by_adapter(adapter), |
342 | prot_status_qual->sequence_error.exp_req_seq_no); | 311 | prot_status_qual->sequence_error.exp_req_seq_no); |
343 | zfcp_erp_adapter_reopen(adapter, 0); | 312 | zfcp_erp_adapter_reopen(adapter, 0, 98, fsf_req); |
344 | fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; | 313 | fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; |
345 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 314 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
346 | break; | 315 | break; |
@@ -351,7 +320,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
351 | "that used on adapter %s. " | 320 | "that used on adapter %s. " |
352 | "Stopping all operations on this adapter.\n", | 321 | "Stopping all operations on this adapter.\n", |
353 | zfcp_get_busid_by_adapter(adapter)); | 322 | zfcp_get_busid_by_adapter(adapter)); |
354 | zfcp_erp_adapter_shutdown(adapter, 0); | 323 | zfcp_erp_adapter_shutdown(adapter, 0, 118, fsf_req); |
355 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 324 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
356 | break; | 325 | break; |
357 | 326 | ||
@@ -368,14 +337,15 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
368 | *(unsigned long long*) | 337 | *(unsigned long long*) |
369 | (&qtcb->bottom.support.req_handle), | 338 | (&qtcb->bottom.support.req_handle), |
370 | zfcp_get_busid_by_adapter(adapter)); | 339 | zfcp_get_busid_by_adapter(adapter)); |
371 | zfcp_erp_adapter_shutdown(adapter, 0); | 340 | zfcp_erp_adapter_shutdown(adapter, 0, 78, fsf_req); |
372 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 341 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
373 | break; | 342 | break; |
374 | 343 | ||
375 | case FSF_PROT_LINK_DOWN: | 344 | case FSF_PROT_LINK_DOWN: |
376 | zfcp_fsf_link_down_info_eval(adapter, | 345 | zfcp_fsf_link_down_info_eval(fsf_req, 37, |
377 | &prot_status_qual->link_down_info); | 346 | &prot_status_qual->link_down_info); |
378 | zfcp_erp_adapter_reopen(adapter, 0); | 347 | /* FIXME: reopening adapter now? better wait for link up */ |
348 | zfcp_erp_adapter_reopen(adapter, 0, 79, fsf_req); | ||
379 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 349 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
380 | break; | 350 | break; |
381 | 351 | ||
@@ -385,12 +355,13 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
385 | "Re-starting operations on this adapter.\n", | 355 | "Re-starting operations on this adapter.\n", |
386 | zfcp_get_busid_by_adapter(adapter)); | 356 | zfcp_get_busid_by_adapter(adapter)); |
387 | /* All ports should be marked as ready to run again */ | 357 | /* All ports should be marked as ready to run again */ |
388 | zfcp_erp_modify_adapter_status(adapter, | 358 | zfcp_erp_modify_adapter_status(adapter, 28, NULL, |
389 | ZFCP_STATUS_COMMON_RUNNING, | 359 | ZFCP_STATUS_COMMON_RUNNING, |
390 | ZFCP_SET); | 360 | ZFCP_SET); |
391 | zfcp_erp_adapter_reopen(adapter, | 361 | zfcp_erp_adapter_reopen(adapter, |
392 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 362 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
393 | | ZFCP_STATUS_COMMON_ERP_FAILED); | 363 | | ZFCP_STATUS_COMMON_ERP_FAILED, |
364 | 99, fsf_req); | ||
394 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 365 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
395 | break; | 366 | break; |
396 | 367 | ||
@@ -400,7 +371,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
400 | "Restarting all operations on this " | 371 | "Restarting all operations on this " |
401 | "adapter.\n", | 372 | "adapter.\n", |
402 | zfcp_get_busid_by_adapter(adapter)); | 373 | zfcp_get_busid_by_adapter(adapter)); |
403 | zfcp_erp_adapter_reopen(adapter, 0); | 374 | zfcp_erp_adapter_reopen(adapter, 0, 100, fsf_req); |
404 | fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; | 375 | fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; |
405 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 376 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
406 | break; | 377 | break; |
@@ -413,7 +384,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
413 | "(debug info 0x%x).\n", | 384 | "(debug info 0x%x).\n", |
414 | zfcp_get_busid_by_adapter(adapter), | 385 | zfcp_get_busid_by_adapter(adapter), |
415 | qtcb->prefix.prot_status); | 386 | qtcb->prefix.prot_status); |
416 | zfcp_erp_adapter_shutdown(adapter, 0); | 387 | zfcp_erp_adapter_shutdown(adapter, 0, 119, fsf_req); |
417 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 388 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
418 | } | 389 | } |
419 | 390 | ||
@@ -452,7 +423,7 @@ zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req) | |||
452 | "(debug info 0x%x).\n", | 423 | "(debug info 0x%x).\n", |
453 | zfcp_get_busid_by_adapter(fsf_req->adapter), | 424 | zfcp_get_busid_by_adapter(fsf_req->adapter), |
454 | fsf_req->qtcb->header.fsf_command); | 425 | fsf_req->qtcb->header.fsf_command); |
455 | zfcp_erp_adapter_shutdown(fsf_req->adapter, 0); | 426 | zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 120, fsf_req); |
456 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 427 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
457 | break; | 428 | break; |
458 | 429 | ||
@@ -506,7 +477,7 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req) | |||
506 | "problem on the adapter %s " | 477 | "problem on the adapter %s " |
507 | "Stopping all operations on this adapter. ", | 478 | "Stopping all operations on this adapter. ", |
508 | zfcp_get_busid_by_adapter(fsf_req->adapter)); | 479 | zfcp_get_busid_by_adapter(fsf_req->adapter)); |
509 | zfcp_erp_adapter_shutdown(fsf_req->adapter, 0); | 480 | zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 121, fsf_req); |
510 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 481 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
511 | break; | 482 | break; |
512 | case FSF_SQ_ULP_PROGRAMMING_ERROR: | 483 | case FSF_SQ_ULP_PROGRAMMING_ERROR: |
@@ -537,9 +508,11 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req) | |||
537 | * zfcp_fsf_link_down_info_eval - evaluate link down information block | 508 | * zfcp_fsf_link_down_info_eval - evaluate link down information block |
538 | */ | 509 | */ |
539 | static void | 510 | static void |
540 | zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter, | 511 | zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *fsf_req, u8 id, |
541 | struct fsf_link_down_info *link_down) | 512 | struct fsf_link_down_info *link_down) |
542 | { | 513 | { |
514 | struct zfcp_adapter *adapter = fsf_req->adapter; | ||
515 | |||
543 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, | 516 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, |
544 | &adapter->status)) | 517 | &adapter->status)) |
545 | return; | 518 | return; |
@@ -630,7 +603,7 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter, | |||
630 | link_down->vendor_specific_code); | 603 | link_down->vendor_specific_code); |
631 | 604 | ||
632 | out: | 605 | out: |
633 | zfcp_erp_adapter_failed(adapter); | 606 | zfcp_erp_adapter_failed(adapter, id, fsf_req); |
634 | } | 607 | } |
635 | 608 | ||
636 | /* | 609 | /* |
@@ -824,19 +797,14 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req) | |||
824 | switch (status_buffer->status_subtype) { | 797 | switch (status_buffer->status_subtype) { |
825 | 798 | ||
826 | case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT: | 799 | case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT: |
827 | debug_text_event(adapter->erp_dbf, 3, "unsol_pc_phys:"); | 800 | zfcp_erp_port_reopen(port, 0, 101, fsf_req); |
828 | zfcp_erp_port_reopen(port, 0); | ||
829 | break; | 801 | break; |
830 | 802 | ||
831 | case FSF_STATUS_READ_SUB_ERROR_PORT: | 803 | case FSF_STATUS_READ_SUB_ERROR_PORT: |
832 | debug_text_event(adapter->erp_dbf, 1, "unsol_pc_err:"); | 804 | zfcp_erp_port_shutdown(port, 0, 122, fsf_req); |
833 | zfcp_erp_port_shutdown(port, 0); | ||
834 | break; | 805 | break; |
835 | 806 | ||
836 | default: | 807 | default: |
837 | debug_text_event(adapter->erp_dbf, 0, "unsol_unk_sub:"); | ||
838 | debug_exception(adapter->erp_dbf, 0, | ||
839 | &status_buffer->status_subtype, sizeof (u32)); | ||
840 | ZFCP_LOG_NORMAL("bug: Undefined status subtype received " | 808 | ZFCP_LOG_NORMAL("bug: Undefined status subtype received " |
841 | "for a reopen indication on port with " | 809 | "for a reopen indication on port with " |
842 | "d_id 0x%06x on the adapter %s. " | 810 | "d_id 0x%06x on the adapter %s. " |
@@ -928,7 +896,7 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) | |||
928 | case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: | 896 | case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: |
929 | ZFCP_LOG_INFO("Physical link to adapter %s is down\n", | 897 | ZFCP_LOG_INFO("Physical link to adapter %s is down\n", |
930 | zfcp_get_busid_by_adapter(adapter)); | 898 | zfcp_get_busid_by_adapter(adapter)); |
931 | zfcp_fsf_link_down_info_eval(adapter, | 899 | zfcp_fsf_link_down_info_eval(fsf_req, 38, |
932 | (struct fsf_link_down_info *) | 900 | (struct fsf_link_down_info *) |
933 | &status_buffer->payload); | 901 | &status_buffer->payload); |
934 | break; | 902 | break; |
@@ -936,7 +904,7 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) | |||
936 | ZFCP_LOG_INFO("Local link to adapter %s is down " | 904 | ZFCP_LOG_INFO("Local link to adapter %s is down " |
937 | "due to failed FDISC login\n", | 905 | "due to failed FDISC login\n", |
938 | zfcp_get_busid_by_adapter(adapter)); | 906 | zfcp_get_busid_by_adapter(adapter)); |
939 | zfcp_fsf_link_down_info_eval(adapter, | 907 | zfcp_fsf_link_down_info_eval(fsf_req, 39, |
940 | (struct fsf_link_down_info *) | 908 | (struct fsf_link_down_info *) |
941 | &status_buffer->payload); | 909 | &status_buffer->payload); |
942 | break; | 910 | break; |
@@ -944,13 +912,13 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) | |||
944 | ZFCP_LOG_INFO("Local link to adapter %s is down " | 912 | ZFCP_LOG_INFO("Local link to adapter %s is down " |
945 | "due to firmware update on adapter\n", | 913 | "due to firmware update on adapter\n", |
946 | zfcp_get_busid_by_adapter(adapter)); | 914 | zfcp_get_busid_by_adapter(adapter)); |
947 | zfcp_fsf_link_down_info_eval(adapter, NULL); | 915 | zfcp_fsf_link_down_info_eval(fsf_req, 40, NULL); |
948 | break; | 916 | break; |
949 | default: | 917 | default: |
950 | ZFCP_LOG_INFO("Local link to adapter %s is down " | 918 | ZFCP_LOG_INFO("Local link to adapter %s is down " |
951 | "due to unknown reason\n", | 919 | "due to unknown reason\n", |
952 | zfcp_get_busid_by_adapter(adapter)); | 920 | zfcp_get_busid_by_adapter(adapter)); |
953 | zfcp_fsf_link_down_info_eval(adapter, NULL); | 921 | zfcp_fsf_link_down_info_eval(fsf_req, 41, NULL); |
954 | }; | 922 | }; |
955 | break; | 923 | break; |
956 | 924 | ||
@@ -959,12 +927,13 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) | |||
959 | "Restarting operations on this adapter\n", | 927 | "Restarting operations on this adapter\n", |
960 | zfcp_get_busid_by_adapter(adapter)); | 928 | zfcp_get_busid_by_adapter(adapter)); |
961 | /* All ports should be marked as ready to run again */ | 929 | /* All ports should be marked as ready to run again */ |
962 | zfcp_erp_modify_adapter_status(adapter, | 930 | zfcp_erp_modify_adapter_status(adapter, 30, NULL, |
963 | ZFCP_STATUS_COMMON_RUNNING, | 931 | ZFCP_STATUS_COMMON_RUNNING, |
964 | ZFCP_SET); | 932 | ZFCP_SET); |
965 | zfcp_erp_adapter_reopen(adapter, | 933 | zfcp_erp_adapter_reopen(adapter, |
966 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 934 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
967 | | ZFCP_STATUS_COMMON_ERP_FAILED); | 935 | | ZFCP_STATUS_COMMON_ERP_FAILED, |
936 | 102, fsf_req); | ||
968 | break; | 937 | break; |
969 | 938 | ||
970 | case FSF_STATUS_READ_NOTIFICATION_LOST: | 939 | case FSF_STATUS_READ_NOTIFICATION_LOST: |
@@ -998,13 +967,13 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) | |||
998 | 967 | ||
999 | if (status_buffer->status_subtype & | 968 | if (status_buffer->status_subtype & |
1000 | FSF_STATUS_READ_SUB_ACT_UPDATED) | 969 | FSF_STATUS_READ_SUB_ACT_UPDATED) |
1001 | zfcp_erp_adapter_access_changed(adapter); | 970 | zfcp_erp_adapter_access_changed(adapter, 135, fsf_req); |
1002 | break; | 971 | break; |
1003 | 972 | ||
1004 | case FSF_STATUS_READ_CFDC_UPDATED: | 973 | case FSF_STATUS_READ_CFDC_UPDATED: |
1005 | ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n", | 974 | ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n", |
1006 | zfcp_get_busid_by_adapter(adapter)); | 975 | zfcp_get_busid_by_adapter(adapter)); |
1007 | zfcp_erp_adapter_access_changed(adapter); | 976 | zfcp_erp_adapter_access_changed(adapter, 136, fsf_req); |
1008 | break; | 977 | break; |
1009 | 978 | ||
1010 | case FSF_STATUS_READ_CFDC_HARDENED: | 979 | case FSF_STATUS_READ_CFDC_HARDENED: |
@@ -1025,7 +994,6 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) | |||
1025 | break; | 994 | break; |
1026 | 995 | ||
1027 | case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: | 996 | case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: |
1028 | debug_text_event(adapter->erp_dbf, 2, "unsol_features:"); | ||
1029 | ZFCP_LOG_INFO("List of supported features on adapter %s has " | 997 | ZFCP_LOG_INFO("List of supported features on adapter %s has " |
1030 | "been changed from 0x%08X to 0x%08X\n", | 998 | "been changed from 0x%08X to 0x%08X\n", |
1031 | zfcp_get_busid_by_adapter(adapter), | 999 | zfcp_get_busid_by_adapter(adapter), |
@@ -1073,7 +1041,7 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) | |||
1073 | ZFCP_LOG_INFO("restart adapter %s due to status read " | 1041 | ZFCP_LOG_INFO("restart adapter %s due to status read " |
1074 | "buffer shortage\n", | 1042 | "buffer shortage\n", |
1075 | zfcp_get_busid_by_adapter(adapter)); | 1043 | zfcp_get_busid_by_adapter(adapter)); |
1076 | zfcp_erp_adapter_reopen(adapter, 0); | 1044 | zfcp_erp_adapter_reopen(adapter, 0, 103, fsf_req); |
1077 | } | 1045 | } |
1078 | } | 1046 | } |
1079 | out: | 1047 | out: |
@@ -1174,8 +1142,6 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) | |||
1174 | 1142 | ||
1175 | case FSF_PORT_HANDLE_NOT_VALID: | 1143 | case FSF_PORT_HANDLE_NOT_VALID: |
1176 | if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { | 1144 | if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { |
1177 | debug_text_event(new_fsf_req->adapter->erp_dbf, 3, | ||
1178 | "fsf_s_phand_nv0"); | ||
1179 | /* | 1145 | /* |
1180 | * In this case a command that was sent prior to a port | 1146 | * In this case a command that was sent prior to a port |
1181 | * reopen was aborted (handles are different). This is | 1147 | * reopen was aborted (handles are different). This is |
@@ -1194,17 +1160,14 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) | |||
1194 | fsf_status_qual, | 1160 | fsf_status_qual, |
1195 | sizeof (union fsf_status_qual)); | 1161 | sizeof (union fsf_status_qual)); |
1196 | /* Let's hope this sorts out the mess */ | 1162 | /* Let's hope this sorts out the mess */ |
1197 | debug_text_event(new_fsf_req->adapter->erp_dbf, 1, | 1163 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104, |
1198 | "fsf_s_phand_nv1"); | 1164 | new_fsf_req); |
1199 | zfcp_erp_adapter_reopen(unit->port->adapter, 0); | ||
1200 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1165 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1201 | } | 1166 | } |
1202 | break; | 1167 | break; |
1203 | 1168 | ||
1204 | case FSF_LUN_HANDLE_NOT_VALID: | 1169 | case FSF_LUN_HANDLE_NOT_VALID: |
1205 | if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { | 1170 | if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { |
1206 | debug_text_event(new_fsf_req->adapter->erp_dbf, 3, | ||
1207 | "fsf_s_lhand_nv0"); | ||
1208 | /* | 1171 | /* |
1209 | * In this case a command that was sent prior to a unit | 1172 | * In this case a command that was sent prior to a unit |
1210 | * reopen was aborted (handles are different). | 1173 | * reopen was aborted (handles are different). |
@@ -1226,17 +1189,13 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) | |||
1226 | fsf_status_qual, | 1189 | fsf_status_qual, |
1227 | sizeof (union fsf_status_qual)); | 1190 | sizeof (union fsf_status_qual)); |
1228 | /* Let's hope this sorts out the mess */ | 1191 | /* Let's hope this sorts out the mess */ |
1229 | debug_text_event(new_fsf_req->adapter->erp_dbf, 1, | 1192 | zfcp_erp_port_reopen(unit->port, 0, 105, new_fsf_req); |
1230 | "fsf_s_lhand_nv1"); | ||
1231 | zfcp_erp_port_reopen(unit->port, 0); | ||
1232 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1193 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1233 | } | 1194 | } |
1234 | break; | 1195 | break; |
1235 | 1196 | ||
1236 | case FSF_FCP_COMMAND_DOES_NOT_EXIST: | 1197 | case FSF_FCP_COMMAND_DOES_NOT_EXIST: |
1237 | retval = 0; | 1198 | retval = 0; |
1238 | debug_text_event(new_fsf_req->adapter->erp_dbf, 3, | ||
1239 | "fsf_s_no_exist"); | ||
1240 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; | 1199 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; |
1241 | break; | 1200 | break; |
1242 | 1201 | ||
@@ -1244,9 +1203,7 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) | |||
1244 | ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to " | 1203 | ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to " |
1245 | "be reopened\n", unit->port->wwpn, | 1204 | "be reopened\n", unit->port->wwpn, |
1246 | zfcp_get_busid_by_unit(unit)); | 1205 | zfcp_get_busid_by_unit(unit)); |
1247 | debug_text_event(new_fsf_req->adapter->erp_dbf, 2, | 1206 | zfcp_erp_port_boxed(unit->port, 47, new_fsf_req); |
1248 | "fsf_s_pboxed"); | ||
1249 | zfcp_erp_port_boxed(unit->port); | ||
1250 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1207 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1251 | | ZFCP_STATUS_FSFREQ_RETRY; | 1208 | | ZFCP_STATUS_FSFREQ_RETRY; |
1252 | break; | 1209 | break; |
@@ -1257,8 +1214,7 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) | |||
1257 | "to be reopened\n", | 1214 | "to be reopened\n", |
1258 | unit->fcp_lun, unit->port->wwpn, | 1215 | unit->fcp_lun, unit->port->wwpn, |
1259 | zfcp_get_busid_by_unit(unit)); | 1216 | zfcp_get_busid_by_unit(unit)); |
1260 | debug_text_event(new_fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed"); | 1217 | zfcp_erp_unit_boxed(unit, 48, new_fsf_req); |
1261 | zfcp_erp_unit_boxed(unit); | ||
1262 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1218 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1263 | | ZFCP_STATUS_FSFREQ_RETRY; | 1219 | | ZFCP_STATUS_FSFREQ_RETRY; |
1264 | break; | 1220 | break; |
@@ -1266,26 +1222,17 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) | |||
1266 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1222 | case FSF_ADAPTER_STATUS_AVAILABLE: |
1267 | switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) { | 1223 | switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) { |
1268 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 1224 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
1269 | debug_text_event(new_fsf_req->adapter->erp_dbf, 1, | ||
1270 | "fsf_sq_ltest"); | ||
1271 | zfcp_test_link(unit->port); | 1225 | zfcp_test_link(unit->port); |
1272 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1226 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1273 | break; | 1227 | break; |
1274 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 1228 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
1275 | /* SCSI stack will escalate */ | 1229 | /* SCSI stack will escalate */ |
1276 | debug_text_event(new_fsf_req->adapter->erp_dbf, 1, | ||
1277 | "fsf_sq_ulp"); | ||
1278 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1230 | new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1279 | break; | 1231 | break; |
1280 | default: | 1232 | default: |
1281 | ZFCP_LOG_NORMAL | 1233 | ZFCP_LOG_NORMAL |
1282 | ("bug: Wrong status qualifier 0x%x arrived.\n", | 1234 | ("bug: Wrong status qualifier 0x%x arrived.\n", |
1283 | new_fsf_req->qtcb->header.fsf_status_qual.word[0]); | 1235 | new_fsf_req->qtcb->header.fsf_status_qual.word[0]); |
1284 | debug_text_event(new_fsf_req->adapter->erp_dbf, 0, | ||
1285 | "fsf_sq_inval:"); | ||
1286 | debug_exception(new_fsf_req->adapter->erp_dbf, 0, | ||
1287 | &new_fsf_req->qtcb->header. | ||
1288 | fsf_status_qual.word[0], sizeof (u32)); | ||
1289 | break; | 1236 | break; |
1290 | } | 1237 | } |
1291 | break; | 1238 | break; |
@@ -1299,11 +1246,6 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) | |||
1299 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " | 1246 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " |
1300 | "(debug info 0x%x)\n", | 1247 | "(debug info 0x%x)\n", |
1301 | new_fsf_req->qtcb->header.fsf_status); | 1248 | new_fsf_req->qtcb->header.fsf_status); |
1302 | debug_text_event(new_fsf_req->adapter->erp_dbf, 0, | ||
1303 | "fsf_s_inval:"); | ||
1304 | debug_exception(new_fsf_req->adapter->erp_dbf, 0, | ||
1305 | &new_fsf_req->qtcb->header.fsf_status, | ||
1306 | sizeof (u32)); | ||
1307 | break; | 1249 | break; |
1308 | } | 1250 | } |
1309 | skip_fsfstatus: | 1251 | skip_fsfstatus: |
@@ -1506,8 +1448,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) | |||
1506 | zfcp_get_busid_by_port(port), | 1448 | zfcp_get_busid_by_port(port), |
1507 | ZFCP_FC_SERVICE_CLASS_DEFAULT); | 1449 | ZFCP_FC_SERVICE_CLASS_DEFAULT); |
1508 | /* stop operation for this adapter */ | 1450 | /* stop operation for this adapter */ |
1509 | debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup"); | 1451 | zfcp_erp_adapter_shutdown(adapter, 0, 123, fsf_req); |
1510 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
1511 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1452 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1512 | break; | 1453 | break; |
1513 | 1454 | ||
@@ -1515,13 +1456,11 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) | |||
1515 | switch (header->fsf_status_qual.word[0]){ | 1456 | switch (header->fsf_status_qual.word[0]){ |
1516 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 1457 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
1517 | /* reopening link to port */ | 1458 | /* reopening link to port */ |
1518 | debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest"); | ||
1519 | zfcp_test_link(port); | 1459 | zfcp_test_link(port); |
1520 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1460 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1521 | break; | 1461 | break; |
1522 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 1462 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
1523 | /* ERP strategy will escalate */ | 1463 | /* ERP strategy will escalate */ |
1524 | debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp"); | ||
1525 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1464 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1526 | break; | 1465 | break; |
1527 | default: | 1466 | default: |
@@ -1549,8 +1488,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) | |||
1549 | break; | 1488 | break; |
1550 | } | 1489 | } |
1551 | } | 1490 | } |
1552 | debug_text_event(adapter->erp_dbf, 1, "fsf_s_access"); | 1491 | zfcp_erp_port_access_denied(port, 55, fsf_req); |
1553 | zfcp_erp_port_access_denied(port); | ||
1554 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1492 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1555 | break; | 1493 | break; |
1556 | 1494 | ||
@@ -1562,7 +1500,6 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) | |||
1562 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, | 1500 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, |
1563 | (char *) &header->fsf_status_qual, | 1501 | (char *) &header->fsf_status_qual, |
1564 | sizeof (union fsf_status_qual)); | 1502 | sizeof (union fsf_status_qual)); |
1565 | debug_text_event(adapter->erp_dbf, 1, "fsf_s_gcom_rej"); | ||
1566 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1503 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1567 | break; | 1504 | break; |
1568 | 1505 | ||
@@ -1575,8 +1512,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) | |||
1575 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, | 1512 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, |
1576 | (char *) &header->fsf_status_qual, | 1513 | (char *) &header->fsf_status_qual, |
1577 | sizeof (union fsf_status_qual)); | 1514 | sizeof (union fsf_status_qual)); |
1578 | debug_text_event(adapter->erp_dbf, 1, "fsf_s_phandle_nv"); | 1515 | zfcp_erp_adapter_reopen(adapter, 0, 106, fsf_req); |
1579 | zfcp_erp_adapter_reopen(adapter, 0); | ||
1580 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1516 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1581 | break; | 1517 | break; |
1582 | 1518 | ||
@@ -1584,8 +1520,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) | |||
1584 | ZFCP_LOG_INFO("port needs to be reopened " | 1520 | ZFCP_LOG_INFO("port needs to be reopened " |
1585 | "(adapter %s, port d_id=0x%06x)\n", | 1521 | "(adapter %s, port d_id=0x%06x)\n", |
1586 | zfcp_get_busid_by_port(port), port->d_id); | 1522 | zfcp_get_busid_by_port(port), port->d_id); |
1587 | debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed"); | 1523 | zfcp_erp_port_boxed(port, 49, fsf_req); |
1588 | zfcp_erp_port_boxed(port); | ||
1589 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1524 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1590 | | ZFCP_STATUS_FSFREQ_RETRY; | 1525 | | ZFCP_STATUS_FSFREQ_RETRY; |
1591 | break; | 1526 | break; |
@@ -1624,9 +1559,6 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) | |||
1624 | default: | 1559 | default: |
1625 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " | 1560 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " |
1626 | "(debug info 0x%x)\n", header->fsf_status); | 1561 | "(debug info 0x%x)\n", header->fsf_status); |
1627 | debug_text_event(adapter->erp_dbf, 0, "fsf_sq_inval:"); | ||
1628 | debug_exception(adapter->erp_dbf, 0, | ||
1629 | &header->fsf_status_qual.word[0], sizeof (u32)); | ||
1630 | break; | 1562 | break; |
1631 | } | 1563 | } |
1632 | 1564 | ||
@@ -1810,21 +1742,18 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req) | |||
1810 | zfcp_get_busid_by_adapter(adapter), | 1742 | zfcp_get_busid_by_adapter(adapter), |
1811 | ZFCP_FC_SERVICE_CLASS_DEFAULT); | 1743 | ZFCP_FC_SERVICE_CLASS_DEFAULT); |
1812 | /* stop operation for this adapter */ | 1744 | /* stop operation for this adapter */ |
1813 | debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup"); | 1745 | zfcp_erp_adapter_shutdown(adapter, 0, 124, fsf_req); |
1814 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
1815 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1746 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1816 | break; | 1747 | break; |
1817 | 1748 | ||
1818 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1749 | case FSF_ADAPTER_STATUS_AVAILABLE: |
1819 | switch (header->fsf_status_qual.word[0]){ | 1750 | switch (header->fsf_status_qual.word[0]){ |
1820 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 1751 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
1821 | debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest"); | ||
1822 | if (port && (send_els->ls_code != ZFCP_LS_ADISC)) | 1752 | if (port && (send_els->ls_code != ZFCP_LS_ADISC)) |
1823 | zfcp_test_link(port); | 1753 | zfcp_test_link(port); |
1824 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1754 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1825 | break; | 1755 | break; |
1826 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 1756 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
1827 | debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp"); | ||
1828 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1757 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1829 | retval = | 1758 | retval = |
1830 | zfcp_handle_els_rjt(header->fsf_status_qual.word[1], | 1759 | zfcp_handle_els_rjt(header->fsf_status_qual.word[1], |
@@ -1832,7 +1761,6 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req) | |||
1832 | &header->fsf_status_qual.word[2]); | 1761 | &header->fsf_status_qual.word[2]); |
1833 | break; | 1762 | break; |
1834 | case FSF_SQ_RETRY_IF_POSSIBLE: | 1763 | case FSF_SQ_RETRY_IF_POSSIBLE: |
1835 | debug_text_event(adapter->erp_dbf, 1, "fsf_sq_retry"); | ||
1836 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1764 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1837 | break; | 1765 | break; |
1838 | default: | 1766 | default: |
@@ -1909,9 +1837,8 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req) | |||
1909 | break; | 1837 | break; |
1910 | } | 1838 | } |
1911 | } | 1839 | } |
1912 | debug_text_event(adapter->erp_dbf, 1, "fsf_s_access"); | ||
1913 | if (port != NULL) | 1840 | if (port != NULL) |
1914 | zfcp_erp_port_access_denied(port); | 1841 | zfcp_erp_port_access_denied(port, 56, fsf_req); |
1915 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1842 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1916 | break; | 1843 | break; |
1917 | 1844 | ||
@@ -1921,9 +1848,6 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req) | |||
1921 | "(adapter: %s, fsf_status=0x%08x)\n", | 1848 | "(adapter: %s, fsf_status=0x%08x)\n", |
1922 | zfcp_get_busid_by_adapter(adapter), | 1849 | zfcp_get_busid_by_adapter(adapter), |
1923 | header->fsf_status); | 1850 | header->fsf_status); |
1924 | debug_text_event(adapter->erp_dbf, 0, "fsf_sq_inval"); | ||
1925 | debug_exception(adapter->erp_dbf, 0, | ||
1926 | &header->fsf_status_qual.word[0], sizeof(u32)); | ||
1927 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1851 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1928 | break; | 1852 | break; |
1929 | } | 1853 | } |
@@ -2132,8 +2056,7 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok) | |||
2132 | "versions in comparison to this device " | 2056 | "versions in comparison to this device " |
2133 | "driver (try updated device driver)\n", | 2057 | "driver (try updated device driver)\n", |
2134 | zfcp_get_busid_by_adapter(adapter)); | 2058 | zfcp_get_busid_by_adapter(adapter)); |
2135 | debug_text_event(adapter->erp_dbf, 0, "low_qtcb_ver"); | 2059 | zfcp_erp_adapter_shutdown(adapter, 0, 125, fsf_req); |
2136 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
2137 | return -EIO; | 2060 | return -EIO; |
2138 | } | 2061 | } |
2139 | if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) { | 2062 | if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) { |
@@ -2142,8 +2065,7 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok) | |||
2142 | "versions than this device driver uses" | 2065 | "versions than this device driver uses" |
2143 | "(consider a microcode upgrade)\n", | 2066 | "(consider a microcode upgrade)\n", |
2144 | zfcp_get_busid_by_adapter(adapter)); | 2067 | zfcp_get_busid_by_adapter(adapter)); |
2145 | debug_text_event(adapter->erp_dbf, 0, "high_qtcb_ver"); | 2068 | zfcp_erp_adapter_shutdown(adapter, 0, 126, fsf_req); |
2146 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
2147 | return -EIO; | 2069 | return -EIO; |
2148 | } | 2070 | } |
2149 | return 0; | 2071 | return 0; |
@@ -2183,17 +2105,13 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req) | |||
2183 | adapter->peer_wwnn, | 2105 | adapter->peer_wwnn, |
2184 | adapter->peer_wwpn, | 2106 | adapter->peer_wwpn, |
2185 | adapter->peer_d_id); | 2107 | adapter->peer_d_id); |
2186 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | ||
2187 | "top-p-to-p"); | ||
2188 | break; | 2108 | break; |
2189 | case FC_PORTTYPE_NLPORT: | 2109 | case FC_PORTTYPE_NLPORT: |
2190 | ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel " | 2110 | ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel " |
2191 | "topology detected at adapter %s " | 2111 | "topology detected at adapter %s " |
2192 | "unsupported, shutting down adapter\n", | 2112 | "unsupported, shutting down adapter\n", |
2193 | zfcp_get_busid_by_adapter(adapter)); | 2113 | zfcp_get_busid_by_adapter(adapter)); |
2194 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | 2114 | zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req); |
2195 | "top-al"); | ||
2196 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
2197 | return -EIO; | 2115 | return -EIO; |
2198 | case FC_PORTTYPE_NPORT: | 2116 | case FC_PORTTYPE_NPORT: |
2199 | ZFCP_LOG_NORMAL("Switched fabric fibrechannel " | 2117 | ZFCP_LOG_NORMAL("Switched fabric fibrechannel " |
@@ -2208,9 +2126,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req) | |||
2208 | "of a type known to the zfcp " | 2126 | "of a type known to the zfcp " |
2209 | "driver, shutting down adapter\n", | 2127 | "driver, shutting down adapter\n", |
2210 | zfcp_get_busid_by_adapter(adapter)); | 2128 | zfcp_get_busid_by_adapter(adapter)); |
2211 | debug_text_exception(fsf_req->adapter->erp_dbf, 0, | 2129 | zfcp_erp_adapter_shutdown(adapter, 0, 128, fsf_req); |
2212 | "unknown-topo"); | ||
2213 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
2214 | return -EIO; | 2130 | return -EIO; |
2215 | } | 2131 | } |
2216 | bottom = &qtcb->bottom.config; | 2132 | bottom = &qtcb->bottom.config; |
@@ -2222,33 +2138,24 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req) | |||
2222 | bottom->max_qtcb_size, | 2138 | bottom->max_qtcb_size, |
2223 | zfcp_get_busid_by_adapter(adapter), | 2139 | zfcp_get_busid_by_adapter(adapter), |
2224 | sizeof(struct fsf_qtcb)); | 2140 | sizeof(struct fsf_qtcb)); |
2225 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | 2141 | zfcp_erp_adapter_shutdown(adapter, 0, 129, fsf_req); |
2226 | "qtcb-size"); | ||
2227 | debug_event(fsf_req->adapter->erp_dbf, 0, | ||
2228 | &bottom->max_qtcb_size, sizeof (u32)); | ||
2229 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
2230 | return -EIO; | 2142 | return -EIO; |
2231 | } | 2143 | } |
2232 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, | 2144 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, |
2233 | &adapter->status); | 2145 | &adapter->status); |
2234 | break; | 2146 | break; |
2235 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: | 2147 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: |
2236 | debug_text_event(adapter->erp_dbf, 0, "xchg-inco"); | ||
2237 | |||
2238 | if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0)) | 2148 | if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0)) |
2239 | return -EIO; | 2149 | return -EIO; |
2240 | 2150 | ||
2241 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, | 2151 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, |
2242 | &adapter->status); | 2152 | &adapter->status); |
2243 | 2153 | ||
2244 | zfcp_fsf_link_down_info_eval(adapter, | 2154 | zfcp_fsf_link_down_info_eval(fsf_req, 42, |
2245 | &qtcb->header.fsf_status_qual.link_down_info); | 2155 | &qtcb->header.fsf_status_qual.link_down_info); |
2246 | break; | 2156 | break; |
2247 | default: | 2157 | default: |
2248 | debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng"); | 2158 | zfcp_erp_adapter_shutdown(adapter, 0, 130, fsf_req); |
2249 | debug_event(fsf_req->adapter->erp_dbf, 0, | ||
2250 | &fsf_req->qtcb->header.fsf_status, sizeof(u32)); | ||
2251 | zfcp_erp_adapter_shutdown(adapter, 0); | ||
2252 | return -EIO; | 2159 | return -EIO; |
2253 | } | 2160 | } |
2254 | return 0; | 2161 | return 0; |
@@ -2424,13 +2331,9 @@ zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) | |||
2424 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: | 2331 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: |
2425 | zfcp_fsf_exchange_port_evaluate(fsf_req, 0); | 2332 | zfcp_fsf_exchange_port_evaluate(fsf_req, 0); |
2426 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); | 2333 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); |
2427 | zfcp_fsf_link_down_info_eval(adapter, | 2334 | zfcp_fsf_link_down_info_eval(fsf_req, 43, |
2428 | &qtcb->header.fsf_status_qual.link_down_info); | 2335 | &qtcb->header.fsf_status_qual.link_down_info); |
2429 | break; | 2336 | break; |
2430 | default: | ||
2431 | debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); | ||
2432 | debug_event(adapter->erp_dbf, 0, | ||
2433 | &fsf_req->qtcb->header.fsf_status, sizeof(u32)); | ||
2434 | } | 2337 | } |
2435 | } | 2338 | } |
2436 | 2339 | ||
@@ -2528,8 +2431,6 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2528 | ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s " | 2431 | ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s " |
2529 | "is already open.\n", | 2432 | "is already open.\n", |
2530 | port->wwpn, zfcp_get_busid_by_port(port)); | 2433 | port->wwpn, zfcp_get_busid_by_port(port)); |
2531 | debug_text_exception(fsf_req->adapter->erp_dbf, 0, | ||
2532 | "fsf_s_popen"); | ||
2533 | /* | 2434 | /* |
2534 | * This is a bug, however operation should continue normally | 2435 | * This is a bug, however operation should continue normally |
2535 | * if it is simply ignored | 2436 | * if it is simply ignored |
@@ -2553,8 +2454,7 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2553 | break; | 2454 | break; |
2554 | } | 2455 | } |
2555 | } | 2456 | } |
2556 | debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access"); | 2457 | zfcp_erp_port_access_denied(port, 57, fsf_req); |
2557 | zfcp_erp_port_access_denied(port); | ||
2558 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2458 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2559 | break; | 2459 | break; |
2560 | 2460 | ||
@@ -2563,24 +2463,18 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2563 | "The remote port 0x%016Lx on adapter %s " | 2463 | "The remote port 0x%016Lx on adapter %s " |
2564 | "could not be opened. Disabling it.\n", | 2464 | "could not be opened. Disabling it.\n", |
2565 | port->wwpn, zfcp_get_busid_by_port(port)); | 2465 | port->wwpn, zfcp_get_busid_by_port(port)); |
2566 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 2466 | zfcp_erp_port_failed(port, 31, fsf_req); |
2567 | "fsf_s_max_ports"); | ||
2568 | zfcp_erp_port_failed(port); | ||
2569 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2467 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2570 | break; | 2468 | break; |
2571 | 2469 | ||
2572 | case FSF_ADAPTER_STATUS_AVAILABLE: | 2470 | case FSF_ADAPTER_STATUS_AVAILABLE: |
2573 | switch (header->fsf_status_qual.word[0]) { | 2471 | switch (header->fsf_status_qual.word[0]) { |
2574 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 2472 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
2575 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | ||
2576 | "fsf_sq_ltest"); | ||
2577 | /* ERP strategy will escalate */ | 2473 | /* ERP strategy will escalate */ |
2578 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2474 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2579 | break; | 2475 | break; |
2580 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 2476 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
2581 | /* ERP strategy will escalate */ | 2477 | /* ERP strategy will escalate */ |
2582 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | ||
2583 | "fsf_sq_ulp"); | ||
2584 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2478 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2585 | break; | 2479 | break; |
2586 | case FSF_SQ_NO_RETRY_POSSIBLE: | 2480 | case FSF_SQ_NO_RETRY_POSSIBLE: |
@@ -2589,21 +2483,13 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2589 | "Disabling it.\n", | 2483 | "Disabling it.\n", |
2590 | port->wwpn, | 2484 | port->wwpn, |
2591 | zfcp_get_busid_by_port(port)); | 2485 | zfcp_get_busid_by_port(port)); |
2592 | debug_text_exception(fsf_req->adapter->erp_dbf, 0, | 2486 | zfcp_erp_port_failed(port, 32, fsf_req); |
2593 | "fsf_sq_no_retry"); | ||
2594 | zfcp_erp_port_failed(port); | ||
2595 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2487 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2596 | break; | 2488 | break; |
2597 | default: | 2489 | default: |
2598 | ZFCP_LOG_NORMAL | 2490 | ZFCP_LOG_NORMAL |
2599 | ("bug: Wrong status qualifier 0x%x arrived.\n", | 2491 | ("bug: Wrong status qualifier 0x%x arrived.\n", |
2600 | header->fsf_status_qual.word[0]); | 2492 | header->fsf_status_qual.word[0]); |
2601 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | ||
2602 | "fsf_sq_inval:"); | ||
2603 | debug_exception( | ||
2604 | fsf_req->adapter->erp_dbf, 0, | ||
2605 | &header->fsf_status_qual.word[0], | ||
2606 | sizeof (u32)); | ||
2607 | break; | 2493 | break; |
2608 | } | 2494 | } |
2609 | break; | 2495 | break; |
@@ -2646,17 +2532,12 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2646 | "warning: insufficient length of " | 2532 | "warning: insufficient length of " |
2647 | "PLOGI payload (%i)\n", | 2533 | "PLOGI payload (%i)\n", |
2648 | fsf_req->qtcb->bottom.support.els1_length); | 2534 | fsf_req->qtcb->bottom.support.els1_length); |
2649 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | ||
2650 | "fsf_s_short_plogi:"); | ||
2651 | /* skip sanity check and assume wwpn is ok */ | 2535 | /* skip sanity check and assume wwpn is ok */ |
2652 | } else { | 2536 | } else { |
2653 | if (plogi->serv_param.wwpn != port->wwpn) { | 2537 | if (plogi->serv_param.wwpn != port->wwpn) { |
2654 | ZFCP_LOG_INFO("warning: d_id of port " | 2538 | ZFCP_LOG_INFO("warning: d_id of port " |
2655 | "0x%016Lx changed during " | 2539 | "0x%016Lx changed during " |
2656 | "open\n", port->wwpn); | 2540 | "open\n", port->wwpn); |
2657 | debug_text_event( | ||
2658 | fsf_req->adapter->erp_dbf, 0, | ||
2659 | "fsf_s_did_change:"); | ||
2660 | atomic_clear_mask( | 2541 | atomic_clear_mask( |
2661 | ZFCP_STATUS_PORT_DID_DID, | 2542 | ZFCP_STATUS_PORT_DID_DID, |
2662 | &port->status); | 2543 | &port->status); |
@@ -2681,9 +2562,6 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2681 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " | 2562 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " |
2682 | "(debug info 0x%x)\n", | 2563 | "(debug info 0x%x)\n", |
2683 | header->fsf_status); | 2564 | header->fsf_status); |
2684 | debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:"); | ||
2685 | debug_exception(fsf_req->adapter->erp_dbf, 0, | ||
2686 | &header->fsf_status, sizeof (u32)); | ||
2687 | break; | 2565 | break; |
2688 | } | 2566 | } |
2689 | 2567 | ||
@@ -2787,9 +2665,7 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2787 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, | 2665 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, |
2788 | (char *) &fsf_req->qtcb->header.fsf_status_qual, | 2666 | (char *) &fsf_req->qtcb->header.fsf_status_qual, |
2789 | sizeof (union fsf_status_qual)); | 2667 | sizeof (union fsf_status_qual)); |
2790 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 2668 | zfcp_erp_adapter_reopen(port->adapter, 0, 107, fsf_req); |
2791 | "fsf_s_phand_nv"); | ||
2792 | zfcp_erp_adapter_reopen(port->adapter, 0); | ||
2793 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2669 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2794 | break; | 2670 | break; |
2795 | 2671 | ||
@@ -2804,7 +2680,7 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2804 | ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, " | 2680 | ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, " |
2805 | "port handle 0x%x\n", port->wwpn, | 2681 | "port handle 0x%x\n", port->wwpn, |
2806 | zfcp_get_busid_by_port(port), port->handle); | 2682 | zfcp_get_busid_by_port(port), port->handle); |
2807 | zfcp_erp_modify_port_status(port, | 2683 | zfcp_erp_modify_port_status(port, 33, fsf_req, |
2808 | ZFCP_STATUS_COMMON_OPEN, | 2684 | ZFCP_STATUS_COMMON_OPEN, |
2809 | ZFCP_CLEAR); | 2685 | ZFCP_CLEAR); |
2810 | retval = 0; | 2686 | retval = 0; |
@@ -2814,10 +2690,6 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2814 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " | 2690 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " |
2815 | "(debug info 0x%x)\n", | 2691 | "(debug info 0x%x)\n", |
2816 | fsf_req->qtcb->header.fsf_status); | 2692 | fsf_req->qtcb->header.fsf_status); |
2817 | debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:"); | ||
2818 | debug_exception(fsf_req->adapter->erp_dbf, 0, | ||
2819 | &fsf_req->qtcb->header.fsf_status, | ||
2820 | sizeof (u32)); | ||
2821 | break; | 2693 | break; |
2822 | } | 2694 | } |
2823 | 2695 | ||
@@ -2930,9 +2802,7 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2930 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, | 2802 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, |
2931 | (char *) &header->fsf_status_qual, | 2803 | (char *) &header->fsf_status_qual, |
2932 | sizeof (union fsf_status_qual)); | 2804 | sizeof (union fsf_status_qual)); |
2933 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 2805 | zfcp_erp_adapter_reopen(port->adapter, 0, 108, fsf_req); |
2934 | "fsf_s_phand_nv"); | ||
2935 | zfcp_erp_adapter_reopen(port->adapter, 0); | ||
2936 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2806 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2937 | break; | 2807 | break; |
2938 | 2808 | ||
@@ -2953,8 +2823,7 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2953 | break; | 2823 | break; |
2954 | } | 2824 | } |
2955 | } | 2825 | } |
2956 | debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access"); | 2826 | zfcp_erp_port_access_denied(port, 58, fsf_req); |
2957 | zfcp_erp_port_access_denied(port); | ||
2958 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2827 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2959 | break; | 2828 | break; |
2960 | 2829 | ||
@@ -2964,35 +2833,32 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req) | |||
2964 | "to close it physically.\n", | 2833 | "to close it physically.\n", |
2965 | port->wwpn, | 2834 | port->wwpn, |
2966 | zfcp_get_busid_by_port(port)); | 2835 | zfcp_get_busid_by_port(port)); |
2967 | debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_pboxed"); | 2836 | zfcp_erp_port_boxed(port, 50, fsf_req); |
2968 | zfcp_erp_port_boxed(port); | ||
2969 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | | 2837 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | |
2970 | ZFCP_STATUS_FSFREQ_RETRY; | 2838 | ZFCP_STATUS_FSFREQ_RETRY; |
2839 | |||
2840 | /* can't use generic zfcp_erp_modify_port_status because | ||
2841 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ | ||
2842 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); | ||
2843 | list_for_each_entry(unit, &port->unit_list_head, list) | ||
2844 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, | ||
2845 | &unit->status); | ||
2971 | break; | 2846 | break; |
2972 | 2847 | ||
2973 | case FSF_ADAPTER_STATUS_AVAILABLE: | 2848 | case FSF_ADAPTER_STATUS_AVAILABLE: |
2974 | switch (header->fsf_status_qual.word[0]) { | 2849 | switch (header->fsf_status_qual.word[0]) { |
2975 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 2850 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
2976 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | ||
2977 | "fsf_sq_ltest"); | ||
2978 | /* This will now be escalated by ERP */ | 2851 | /* This will now be escalated by ERP */ |
2979 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2852 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2980 | break; | 2853 | break; |
2981 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 2854 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
2982 | /* ERP strategy will escalate */ | 2855 | /* ERP strategy will escalate */ |
2983 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | ||
2984 | "fsf_sq_ulp"); | ||
2985 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2856 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2986 | break; | 2857 | break; |
2987 | default: | 2858 | default: |
2988 | ZFCP_LOG_NORMAL | 2859 | ZFCP_LOG_NORMAL |
2989 | ("bug: Wrong status qualifier 0x%x arrived.\n", | 2860 | ("bug: Wrong status qualifier 0x%x arrived.\n", |
2990 | header->fsf_status_qual.word[0]); | 2861 | header->fsf_status_qual.word[0]); |
2991 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | ||
2992 | "fsf_sq_inval:"); | ||
2993 | debug_exception( | ||
2994 | fsf_req->adapter->erp_dbf, 0, | ||
2995 | &header->fsf_status_qual.word[0], sizeof (u32)); | ||
2996 | break; | 2862 | break; |
2997 | } | 2863 | } |
2998 | break; | 2864 | break; |
@@ -3015,9 +2881,6 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req) | |||
3015 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " | 2881 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " |
3016 | "(debug info 0x%x)\n", | 2882 | "(debug info 0x%x)\n", |
3017 | header->fsf_status); | 2883 | header->fsf_status); |
3018 | debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:"); | ||
3019 | debug_exception(fsf_req->adapter->erp_dbf, 0, | ||
3020 | &header->fsf_status, sizeof (u32)); | ||
3021 | break; | 2884 | break; |
3022 | } | 2885 | } |
3023 | 2886 | ||
@@ -3149,8 +3012,7 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3149 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, | 3012 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, |
3150 | (char *) &header->fsf_status_qual, | 3013 | (char *) &header->fsf_status_qual, |
3151 | sizeof (union fsf_status_qual)); | 3014 | sizeof (union fsf_status_qual)); |
3152 | debug_text_event(adapter->erp_dbf, 1, "fsf_s_ph_nv"); | 3015 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, fsf_req); |
3153 | zfcp_erp_adapter_reopen(unit->port->adapter, 0); | ||
3154 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3016 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3155 | break; | 3017 | break; |
3156 | 3018 | ||
@@ -3159,8 +3021,6 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3159 | "remote port 0x%016Lx on adapter %s twice.\n", | 3021 | "remote port 0x%016Lx on adapter %s twice.\n", |
3160 | unit->fcp_lun, | 3022 | unit->fcp_lun, |
3161 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); | 3023 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); |
3162 | debug_text_exception(adapter->erp_dbf, 0, | ||
3163 | "fsf_s_uopen"); | ||
3164 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3024 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3165 | break; | 3025 | break; |
3166 | 3026 | ||
@@ -3182,8 +3042,7 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3182 | break; | 3042 | break; |
3183 | } | 3043 | } |
3184 | } | 3044 | } |
3185 | debug_text_event(adapter->erp_dbf, 1, "fsf_s_access"); | 3045 | zfcp_erp_unit_access_denied(unit, 59, fsf_req); |
3186 | zfcp_erp_unit_access_denied(unit); | ||
3187 | atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); | 3046 | atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); |
3188 | atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); | 3047 | atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); |
3189 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3048 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -3193,8 +3052,7 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3193 | ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " | 3052 | ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " |
3194 | "needs to be reopened\n", | 3053 | "needs to be reopened\n", |
3195 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); | 3054 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); |
3196 | debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed"); | 3055 | zfcp_erp_port_boxed(unit->port, 51, fsf_req); |
3197 | zfcp_erp_port_boxed(unit->port); | ||
3198 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | | 3056 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | |
3199 | ZFCP_STATUS_FSFREQ_RETRY; | 3057 | ZFCP_STATUS_FSFREQ_RETRY; |
3200 | break; | 3058 | break; |
@@ -3234,9 +3092,7 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3234 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, | 3092 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, |
3235 | (char *) &header->fsf_status_qual, | 3093 | (char *) &header->fsf_status_qual, |
3236 | sizeof (union fsf_status_qual)); | 3094 | sizeof (union fsf_status_qual)); |
3237 | debug_text_event(adapter->erp_dbf, 2, | 3095 | zfcp_erp_unit_access_denied(unit, 60, fsf_req); |
3238 | "fsf_s_l_sh_vio"); | ||
3239 | zfcp_erp_unit_access_denied(unit); | ||
3240 | atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); | 3096 | atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); |
3241 | atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); | 3097 | atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); |
3242 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3098 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -3250,9 +3106,7 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3250 | unit->fcp_lun, | 3106 | unit->fcp_lun, |
3251 | unit->port->wwpn, | 3107 | unit->port->wwpn, |
3252 | zfcp_get_busid_by_unit(unit)); | 3108 | zfcp_get_busid_by_unit(unit)); |
3253 | debug_text_event(adapter->erp_dbf, 1, | 3109 | zfcp_erp_unit_failed(unit, 34, fsf_req); |
3254 | "fsf_s_max_units"); | ||
3255 | zfcp_erp_unit_failed(unit); | ||
3256 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3110 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3257 | break; | 3111 | break; |
3258 | 3112 | ||
@@ -3260,26 +3114,17 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3260 | switch (header->fsf_status_qual.word[0]) { | 3114 | switch (header->fsf_status_qual.word[0]) { |
3261 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 3115 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
3262 | /* Re-establish link to port */ | 3116 | /* Re-establish link to port */ |
3263 | debug_text_event(adapter->erp_dbf, 1, | ||
3264 | "fsf_sq_ltest"); | ||
3265 | zfcp_test_link(unit->port); | 3117 | zfcp_test_link(unit->port); |
3266 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3118 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3267 | break; | 3119 | break; |
3268 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 3120 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
3269 | /* ERP strategy will escalate */ | 3121 | /* ERP strategy will escalate */ |
3270 | debug_text_event(adapter->erp_dbf, 1, | ||
3271 | "fsf_sq_ulp"); | ||
3272 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3122 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3273 | break; | 3123 | break; |
3274 | default: | 3124 | default: |
3275 | ZFCP_LOG_NORMAL | 3125 | ZFCP_LOG_NORMAL |
3276 | ("bug: Wrong status qualifier 0x%x arrived.\n", | 3126 | ("bug: Wrong status qualifier 0x%x arrived.\n", |
3277 | header->fsf_status_qual.word[0]); | 3127 | header->fsf_status_qual.word[0]); |
3278 | debug_text_event(adapter->erp_dbf, 0, | ||
3279 | "fsf_sq_inval:"); | ||
3280 | debug_exception(adapter->erp_dbf, 0, | ||
3281 | &header->fsf_status_qual.word[0], | ||
3282 | sizeof (u32)); | ||
3283 | } | 3128 | } |
3284 | break; | 3129 | break; |
3285 | 3130 | ||
@@ -3331,15 +3176,15 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3331 | if (exclusive && !readwrite) { | 3176 | if (exclusive && !readwrite) { |
3332 | ZFCP_LOG_NORMAL("exclusive access of read-only " | 3177 | ZFCP_LOG_NORMAL("exclusive access of read-only " |
3333 | "unit not supported\n"); | 3178 | "unit not supported\n"); |
3334 | zfcp_erp_unit_failed(unit); | 3179 | zfcp_erp_unit_failed(unit, 35, fsf_req); |
3335 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3180 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3336 | zfcp_erp_unit_shutdown(unit, 0); | 3181 | zfcp_erp_unit_shutdown(unit, 0, 80, fsf_req); |
3337 | } else if (!exclusive && readwrite) { | 3182 | } else if (!exclusive && readwrite) { |
3338 | ZFCP_LOG_NORMAL("shared access of read-write " | 3183 | ZFCP_LOG_NORMAL("shared access of read-write " |
3339 | "unit not supported\n"); | 3184 | "unit not supported\n"); |
3340 | zfcp_erp_unit_failed(unit); | 3185 | zfcp_erp_unit_failed(unit, 36, fsf_req); |
3341 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3186 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3342 | zfcp_erp_unit_shutdown(unit, 0); | 3187 | zfcp_erp_unit_shutdown(unit, 0, 81, fsf_req); |
3343 | } | 3188 | } |
3344 | } | 3189 | } |
3345 | 3190 | ||
@@ -3350,9 +3195,6 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3350 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " | 3195 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " |
3351 | "(debug info 0x%x)\n", | 3196 | "(debug info 0x%x)\n", |
3352 | header->fsf_status); | 3197 | header->fsf_status); |
3353 | debug_text_event(adapter->erp_dbf, 0, "fsf_s_inval:"); | ||
3354 | debug_exception(adapter->erp_dbf, 0, | ||
3355 | &header->fsf_status, sizeof (u32)); | ||
3356 | break; | 3198 | break; |
3357 | } | 3199 | } |
3358 | 3200 | ||
@@ -3465,9 +3307,7 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3465 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, | 3307 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, |
3466 | (char *) &fsf_req->qtcb->header.fsf_status_qual, | 3308 | (char *) &fsf_req->qtcb->header.fsf_status_qual, |
3467 | sizeof (union fsf_status_qual)); | 3309 | sizeof (union fsf_status_qual)); |
3468 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 3310 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, fsf_req); |
3469 | "fsf_s_phand_nv"); | ||
3470 | zfcp_erp_adapter_reopen(unit->port->adapter, 0); | ||
3471 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3311 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3472 | break; | 3312 | break; |
3473 | 3313 | ||
@@ -3483,9 +3323,7 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3483 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, | 3323 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, |
3484 | (char *) &fsf_req->qtcb->header.fsf_status_qual, | 3324 | (char *) &fsf_req->qtcb->header.fsf_status_qual, |
3485 | sizeof (union fsf_status_qual)); | 3325 | sizeof (union fsf_status_qual)); |
3486 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 3326 | zfcp_erp_port_reopen(unit->port, 0, 111, fsf_req); |
3487 | "fsf_s_lhand_nv"); | ||
3488 | zfcp_erp_port_reopen(unit->port, 0); | ||
3489 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3327 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3490 | break; | 3328 | break; |
3491 | 3329 | ||
@@ -3494,8 +3332,7 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3494 | "needs to be reopened\n", | 3332 | "needs to be reopened\n", |
3495 | unit->port->wwpn, | 3333 | unit->port->wwpn, |
3496 | zfcp_get_busid_by_unit(unit)); | 3334 | zfcp_get_busid_by_unit(unit)); |
3497 | debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_pboxed"); | 3335 | zfcp_erp_port_boxed(unit->port, 52, fsf_req); |
3498 | zfcp_erp_port_boxed(unit->port); | ||
3499 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | | 3336 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | |
3500 | ZFCP_STATUS_FSFREQ_RETRY; | 3337 | ZFCP_STATUS_FSFREQ_RETRY; |
3501 | break; | 3338 | break; |
@@ -3504,27 +3341,17 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3504 | switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { | 3341 | switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { |
3505 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 3342 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
3506 | /* re-establish link to port */ | 3343 | /* re-establish link to port */ |
3507 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | ||
3508 | "fsf_sq_ltest"); | ||
3509 | zfcp_test_link(unit->port); | 3344 | zfcp_test_link(unit->port); |
3510 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3345 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3511 | break; | 3346 | break; |
3512 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 3347 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
3513 | /* ERP strategy will escalate */ | 3348 | /* ERP strategy will escalate */ |
3514 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | ||
3515 | "fsf_sq_ulp"); | ||
3516 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3349 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3517 | break; | 3350 | break; |
3518 | default: | 3351 | default: |
3519 | ZFCP_LOG_NORMAL | 3352 | ZFCP_LOG_NORMAL |
3520 | ("bug: Wrong status qualifier 0x%x arrived.\n", | 3353 | ("bug: Wrong status qualifier 0x%x arrived.\n", |
3521 | fsf_req->qtcb->header.fsf_status_qual.word[0]); | 3354 | fsf_req->qtcb->header.fsf_status_qual.word[0]); |
3522 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | ||
3523 | "fsf_sq_inval:"); | ||
3524 | debug_exception( | ||
3525 | fsf_req->adapter->erp_dbf, 0, | ||
3526 | &fsf_req->qtcb->header.fsf_status_qual.word[0], | ||
3527 | sizeof (u32)); | ||
3528 | break; | 3355 | break; |
3529 | } | 3356 | } |
3530 | break; | 3357 | break; |
@@ -3545,10 +3372,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req) | |||
3545 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " | 3372 | ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " |
3546 | "(debug info 0x%x)\n", | 3373 | "(debug info 0x%x)\n", |
3547 | fsf_req->qtcb->header.fsf_status); | 3374 | fsf_req->qtcb->header.fsf_status); |
3548 | debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:"); | ||
3549 | debug_exception(fsf_req->adapter->erp_dbf, 0, | ||
3550 | &fsf_req->qtcb->header.fsf_status, | ||
3551 | sizeof (u32)); | ||
3552 | break; | 3375 | break; |
3553 | } | 3376 | } |
3554 | 3377 | ||
@@ -3703,7 +3526,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, | |||
3703 | zfcp_get_busid_by_unit(unit), | 3526 | zfcp_get_busid_by_unit(unit), |
3704 | unit->port->wwpn, | 3527 | unit->port->wwpn, |
3705 | unit->fcp_lun); | 3528 | unit->fcp_lun); |
3706 | zfcp_erp_unit_shutdown(unit, 0); | 3529 | zfcp_erp_unit_shutdown(unit, 0, 131, fsf_req); |
3707 | retval = -EINVAL; | 3530 | retval = -EINVAL; |
3708 | } | 3531 | } |
3709 | goto no_fit; | 3532 | goto no_fit; |
@@ -3739,8 +3562,8 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, | |||
3739 | send_failed: | 3562 | send_failed: |
3740 | no_fit: | 3563 | no_fit: |
3741 | failed_scsi_cmnd: | 3564 | failed_scsi_cmnd: |
3742 | unit_blocked: | ||
3743 | zfcp_unit_put(unit); | 3565 | zfcp_unit_put(unit); |
3566 | unit_blocked: | ||
3744 | zfcp_fsf_req_free(fsf_req); | 3567 | zfcp_fsf_req_free(fsf_req); |
3745 | fsf_req = NULL; | 3568 | fsf_req = NULL; |
3746 | scsi_cmnd->host_scribble = NULL; | 3569 | scsi_cmnd->host_scribble = NULL; |
@@ -3861,9 +3684,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3861 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, | 3684 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, |
3862 | (char *) &header->fsf_status_qual, | 3685 | (char *) &header->fsf_status_qual, |
3863 | sizeof (union fsf_status_qual)); | 3686 | sizeof (union fsf_status_qual)); |
3864 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 3687 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, fsf_req); |
3865 | "fsf_s_phand_nv"); | ||
3866 | zfcp_erp_adapter_reopen(unit->port->adapter, 0); | ||
3867 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3688 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3868 | break; | 3689 | break; |
3869 | 3690 | ||
@@ -3879,9 +3700,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3879 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, | 3700 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, |
3880 | (char *) &header->fsf_status_qual, | 3701 | (char *) &header->fsf_status_qual, |
3881 | sizeof (union fsf_status_qual)); | 3702 | sizeof (union fsf_status_qual)); |
3882 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 3703 | zfcp_erp_port_reopen(unit->port, 0, 113, fsf_req); |
3883 | "fsf_s_uhand_nv"); | ||
3884 | zfcp_erp_port_reopen(unit->port, 0); | ||
3885 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3704 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3886 | break; | 3705 | break; |
3887 | 3706 | ||
@@ -3897,9 +3716,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3897 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, | 3716 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, |
3898 | (char *) &header->fsf_status_qual, | 3717 | (char *) &header->fsf_status_qual, |
3899 | sizeof (union fsf_status_qual)); | 3718 | sizeof (union fsf_status_qual)); |
3900 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 3719 | zfcp_erp_adapter_reopen(unit->port->adapter, 0, 114, fsf_req); |
3901 | "fsf_s_hand_mis"); | ||
3902 | zfcp_erp_adapter_reopen(unit->port->adapter, 0); | ||
3903 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3720 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3904 | break; | 3721 | break; |
3905 | 3722 | ||
@@ -3909,9 +3726,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3909 | zfcp_get_busid_by_unit(unit), | 3726 | zfcp_get_busid_by_unit(unit), |
3910 | ZFCP_FC_SERVICE_CLASS_DEFAULT); | 3727 | ZFCP_FC_SERVICE_CLASS_DEFAULT); |
3911 | /* stop operation for this adapter */ | 3728 | /* stop operation for this adapter */ |
3912 | debug_text_exception(fsf_req->adapter->erp_dbf, 0, | 3729 | zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 132, fsf_req); |
3913 | "fsf_s_class_nsup"); | ||
3914 | zfcp_erp_adapter_shutdown(unit->port->adapter, 0); | ||
3915 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3730 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3916 | break; | 3731 | break; |
3917 | 3732 | ||
@@ -3927,9 +3742,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3927 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, | 3742 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, |
3928 | (char *) &header->fsf_status_qual, | 3743 | (char *) &header->fsf_status_qual, |
3929 | sizeof (union fsf_status_qual)); | 3744 | sizeof (union fsf_status_qual)); |
3930 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | 3745 | zfcp_erp_port_reopen(unit->port, 0, 115, fsf_req); |
3931 | "fsf_s_fcp_lun_nv"); | ||
3932 | zfcp_erp_port_reopen(unit->port, 0); | ||
3933 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3746 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3934 | break; | 3747 | break; |
3935 | 3748 | ||
@@ -3951,8 +3764,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3951 | break; | 3764 | break; |
3952 | } | 3765 | } |
3953 | } | 3766 | } |
3954 | debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access"); | 3767 | zfcp_erp_unit_access_denied(unit, 61, fsf_req); |
3955 | zfcp_erp_unit_access_denied(unit); | ||
3956 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3768 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3957 | break; | 3769 | break; |
3958 | 3770 | ||
@@ -3965,9 +3777,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3965 | zfcp_get_busid_by_unit(unit), | 3777 | zfcp_get_busid_by_unit(unit), |
3966 | fsf_req->qtcb->bottom.io.data_direction); | 3778 | fsf_req->qtcb->bottom.io.data_direction); |
3967 | /* stop operation for this adapter */ | 3779 | /* stop operation for this adapter */ |
3968 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | 3780 | zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, fsf_req); |
3969 | "fsf_s_dir_ind_nv"); | ||
3970 | zfcp_erp_adapter_shutdown(unit->port->adapter, 0); | ||
3971 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3781 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3972 | break; | 3782 | break; |
3973 | 3783 | ||
@@ -3980,9 +3790,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3980 | zfcp_get_busid_by_unit(unit), | 3790 | zfcp_get_busid_by_unit(unit), |
3981 | fsf_req->qtcb->bottom.io.fcp_cmnd_length); | 3791 | fsf_req->qtcb->bottom.io.fcp_cmnd_length); |
3982 | /* stop operation for this adapter */ | 3792 | /* stop operation for this adapter */ |
3983 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | 3793 | zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, fsf_req); |
3984 | "fsf_s_cmd_len_nv"); | ||
3985 | zfcp_erp_adapter_shutdown(unit->port->adapter, 0); | ||
3986 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3794 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
3987 | break; | 3795 | break; |
3988 | 3796 | ||
@@ -3990,8 +3798,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
3990 | ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " | 3798 | ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " |
3991 | "needs to be reopened\n", | 3799 | "needs to be reopened\n", |
3992 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); | 3800 | unit->port->wwpn, zfcp_get_busid_by_unit(unit)); |
3993 | debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_pboxed"); | 3801 | zfcp_erp_port_boxed(unit->port, 53, fsf_req); |
3994 | zfcp_erp_port_boxed(unit->port); | ||
3995 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | | 3802 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | |
3996 | ZFCP_STATUS_FSFREQ_RETRY; | 3803 | ZFCP_STATUS_FSFREQ_RETRY; |
3997 | break; | 3804 | break; |
@@ -4001,8 +3808,7 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
4001 | "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", | 3808 | "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", |
4002 | zfcp_get_busid_by_unit(unit), | 3809 | zfcp_get_busid_by_unit(unit), |
4003 | unit->port->wwpn, unit->fcp_lun); | 3810 | unit->port->wwpn, unit->fcp_lun); |
4004 | debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed"); | 3811 | zfcp_erp_unit_boxed(unit, 54, fsf_req); |
4005 | zfcp_erp_unit_boxed(unit); | ||
4006 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | 3812 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
4007 | | ZFCP_STATUS_FSFREQ_RETRY; | 3813 | | ZFCP_STATUS_FSFREQ_RETRY; |
4008 | break; | 3814 | break; |
@@ -4011,25 +3817,16 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
4011 | switch (header->fsf_status_qual.word[0]) { | 3817 | switch (header->fsf_status_qual.word[0]) { |
4012 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: | 3818 | case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: |
4013 | /* re-establish link to port */ | 3819 | /* re-establish link to port */ |
4014 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | ||
4015 | "fsf_sq_ltest"); | ||
4016 | zfcp_test_link(unit->port); | 3820 | zfcp_test_link(unit->port); |
4017 | break; | 3821 | break; |
4018 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: | 3822 | case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: |
4019 | /* FIXME(hw) need proper specs for proper action */ | 3823 | /* FIXME(hw) need proper specs for proper action */ |
4020 | /* let scsi stack deal with retries and escalation */ | 3824 | /* let scsi stack deal with retries and escalation */ |
4021 | debug_text_event(fsf_req->adapter->erp_dbf, 1, | ||
4022 | "fsf_sq_ulp"); | ||
4023 | break; | 3825 | break; |
4024 | default: | 3826 | default: |
4025 | ZFCP_LOG_NORMAL | 3827 | ZFCP_LOG_NORMAL |
4026 | ("Unknown status qualifier 0x%x arrived.\n", | 3828 | ("Unknown status qualifier 0x%x arrived.\n", |
4027 | header->fsf_status_qual.word[0]); | 3829 | header->fsf_status_qual.word[0]); |
4028 | debug_text_event(fsf_req->adapter->erp_dbf, 0, | ||
4029 | "fsf_sq_inval:"); | ||
4030 | debug_exception(fsf_req->adapter->erp_dbf, 0, | ||
4031 | &header->fsf_status_qual.word[0], | ||
4032 | sizeof(u32)); | ||
4033 | break; | 3830 | break; |
4034 | } | 3831 | } |
4035 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 3832 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -4040,12 +3837,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) | |||
4040 | 3837 | ||
4041 | case FSF_FCP_RSP_AVAILABLE: | 3838 | case FSF_FCP_RSP_AVAILABLE: |
4042 | break; | 3839 | break; |
4043 | |||
4044 | default: | ||
4045 | debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_inval:"); | ||
4046 | debug_exception(fsf_req->adapter->erp_dbf, 0, | ||
4047 | &header->fsf_status, sizeof(u32)); | ||
4048 | break; | ||
4049 | } | 3840 | } |
4050 | 3841 | ||
4051 | skip_fsfstatus: | 3842 | skip_fsfstatus: |
@@ -4625,9 +4416,6 @@ zfcp_fsf_control_file_handler(struct zfcp_fsf_req *fsf_req) | |||
4625 | "was presented on the adapter %s\n", | 4416 | "was presented on the adapter %s\n", |
4626 | header->fsf_status, | 4417 | header->fsf_status, |
4627 | zfcp_get_busid_by_adapter(adapter)); | 4418 | zfcp_get_busid_by_adapter(adapter)); |
4628 | debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval"); | ||
4629 | debug_exception(fsf_req->adapter->erp_dbf, 0, | ||
4630 | &header->fsf_status_qual.word[0], sizeof(u32)); | ||
4631 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 4419 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
4632 | retval = -EINVAL; | 4420 | retval = -EINVAL; |
4633 | break; | 4421 | break; |
@@ -4817,7 +4605,6 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) | |||
4817 | volatile struct qdio_buffer_element *sbale; | 4605 | volatile struct qdio_buffer_element *sbale; |
4818 | int inc_seq_no; | 4606 | int inc_seq_no; |
4819 | int new_distance_from_int; | 4607 | int new_distance_from_int; |
4820 | u64 dbg_tmp[2]; | ||
4821 | int retval = 0; | 4608 | int retval = 0; |
4822 | 4609 | ||
4823 | adapter = fsf_req->adapter; | 4610 | adapter = fsf_req->adapter; |
@@ -4867,10 +4654,6 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) | |||
4867 | QDIO_FLAG_SYNC_OUTPUT, | 4654 | QDIO_FLAG_SYNC_OUTPUT, |
4868 | 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); | 4655 | 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); |
4869 | 4656 | ||
4870 | dbg_tmp[0] = (unsigned long) sbale[0].addr; | ||
4871 | dbg_tmp[1] = (u64) retval; | ||
4872 | debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); | ||
4873 | |||
4874 | if (unlikely(retval)) { | 4657 | if (unlikely(retval)) { |
4875 | /* Queues are down..... */ | 4658 | /* Queues are down..... */ |
4876 | retval = -EIO; | 4659 | retval = -EIO; |
@@ -4885,7 +4668,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) | |||
4885 | req_queue->free_index -= fsf_req->sbal_number; | 4668 | req_queue->free_index -= fsf_req->sbal_number; |
4886 | req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; | 4669 | req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; |
4887 | req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ | 4670 | req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ |
4888 | zfcp_erp_adapter_reopen(adapter, 0); | 4671 | zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req); |
4889 | } else { | 4672 | } else { |
4890 | req_queue->distance_from_int = new_distance_from_int; | 4673 | req_queue->distance_from_int = new_distance_from_int; |
4891 | /* | 4674 | /* |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 22fdc17e0d0e..8ca5f074c687 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -175,8 +175,9 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, | |||
175 | * which is set again in case we have missed by a mile. | 175 | * which is set again in case we have missed by a mile. |
176 | */ | 176 | */ |
177 | zfcp_erp_adapter_reopen(adapter, | 177 | zfcp_erp_adapter_reopen(adapter, |
178 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | 178 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
179 | ZFCP_STATUS_COMMON_ERP_FAILED); | 179 | ZFCP_STATUS_COMMON_ERP_FAILED, 140, |
180 | NULL); | ||
180 | } | 181 | } |
181 | return retval; | 182 | return retval; |
182 | } | 183 | } |
@@ -239,8 +240,6 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | |||
239 | struct zfcp_fsf_req *fsf_req; | 240 | struct zfcp_fsf_req *fsf_req; |
240 | unsigned long flags; | 241 | unsigned long flags; |
241 | 242 | ||
242 | debug_long_event(adapter->erp_dbf, 4, req_id); | ||
243 | |||
244 | spin_lock_irqsave(&adapter->req_list_lock, flags); | 243 | spin_lock_irqsave(&adapter->req_list_lock, flags); |
245 | fsf_req = zfcp_reqlist_find(adapter, req_id); | 244 | fsf_req = zfcp_reqlist_find(adapter, req_id); |
246 | 245 | ||
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index b9daf5c05862..f81850624eed 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -31,6 +31,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *, | |||
31 | void (*done) (struct scsi_cmnd *)); | 31 | void (*done) (struct scsi_cmnd *)); |
32 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); | 32 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); |
33 | static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); | 33 | static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); |
34 | static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *); | ||
34 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); | 35 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); |
35 | static int zfcp_task_management_function(struct zfcp_unit *, u8, | 36 | static int zfcp_task_management_function(struct zfcp_unit *, u8, |
36 | struct scsi_cmnd *); | 37 | struct scsi_cmnd *); |
@@ -51,6 +52,7 @@ struct zfcp_data zfcp_data = { | |||
51 | .queuecommand = zfcp_scsi_queuecommand, | 52 | .queuecommand = zfcp_scsi_queuecommand, |
52 | .eh_abort_handler = zfcp_scsi_eh_abort_handler, | 53 | .eh_abort_handler = zfcp_scsi_eh_abort_handler, |
53 | .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, | 54 | .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, |
55 | .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, | ||
54 | .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, | 56 | .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, |
55 | .can_queue = 4096, | 57 | .can_queue = 4096, |
56 | .this_id = -1, | 58 | .this_id = -1, |
@@ -179,11 +181,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | |||
179 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; | 181 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; |
180 | 182 | ||
181 | if (unit) { | 183 | if (unit) { |
182 | zfcp_erp_wait(unit->port->adapter); | ||
183 | atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); | 184 | atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); |
184 | sdpnt->hostdata = NULL; | 185 | sdpnt->hostdata = NULL; |
185 | unit->device = NULL; | 186 | unit->device = NULL; |
186 | zfcp_erp_unit_failed(unit); | 187 | zfcp_erp_unit_failed(unit, 12, NULL); |
187 | zfcp_unit_put(unit); | 188 | zfcp_unit_put(unit); |
188 | } else | 189 | } else |
189 | ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " | 190 | ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " |
@@ -442,58 +443,32 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
442 | return retval; | 443 | return retval; |
443 | } | 444 | } |
444 | 445 | ||
445 | static int | 446 | static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) |
446 | zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) | ||
447 | { | 447 | { |
448 | int retval; | 448 | int retval; |
449 | struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata; | 449 | struct zfcp_unit *unit = scpnt->device->hostdata; |
450 | 450 | ||
451 | if (!unit) { | 451 | if (!unit) { |
452 | ZFCP_LOG_NORMAL("bug: Tried reset for nonexistent unit\n"); | 452 | WARN_ON(1); |
453 | retval = SUCCESS; | 453 | return SUCCESS; |
454 | goto out; | ||
455 | } | 454 | } |
456 | ZFCP_LOG_NORMAL("resetting unit 0x%016Lx on port 0x%016Lx, adapter %s\n", | 455 | retval = zfcp_task_management_function(unit, |
457 | unit->fcp_lun, unit->port->wwpn, | 456 | FCP_LOGICAL_UNIT_RESET, |
458 | zfcp_get_busid_by_adapter(unit->port->adapter)); | 457 | scpnt); |
458 | return retval ? FAILED : SUCCESS; | ||
459 | } | ||
459 | 460 | ||
460 | /* | 461 | static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) |
461 | * If we do not know whether the unit supports 'logical unit reset' | 462 | { |
462 | * then try 'logical unit reset' and proceed with 'target reset' | 463 | int retval; |
463 | * if 'logical unit reset' fails. | 464 | struct zfcp_unit *unit = scpnt->device->hostdata; |
464 | * If the unit is known not to support 'logical unit reset' then | 465 | |
465 | * skip 'logical unit reset' and try 'target reset' immediately. | 466 | if (!unit) { |
466 | */ | 467 | WARN_ON(1); |
467 | if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET, | 468 | return SUCCESS; |
468 | &unit->status)) { | ||
469 | retval = zfcp_task_management_function(unit, | ||
470 | FCP_LOGICAL_UNIT_RESET, | ||
471 | scpnt); | ||
472 | if (retval) { | ||
473 | ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit); | ||
474 | if (retval == -ENOTSUPP) | ||
475 | atomic_set_mask | ||
476 | (ZFCP_STATUS_UNIT_NOTSUPPUNITRESET, | ||
477 | &unit->status); | ||
478 | /* fall through and try 'target reset' next */ | ||
479 | } else { | ||
480 | ZFCP_LOG_DEBUG("unit reset succeeded (unit=%p)\n", | ||
481 | unit); | ||
482 | /* avoid 'target reset' */ | ||
483 | retval = SUCCESS; | ||
484 | goto out; | ||
485 | } | ||
486 | } | 469 | } |
487 | retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt); | 470 | retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt); |
488 | if (retval) { | 471 | return retval ? FAILED : SUCCESS; |
489 | ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit); | ||
490 | retval = FAILED; | ||
491 | } else { | ||
492 | ZFCP_LOG_DEBUG("target reset succeeded (unit=%p)\n", unit); | ||
493 | retval = SUCCESS; | ||
494 | } | ||
495 | out: | ||
496 | return retval; | ||
497 | } | 472 | } |
498 | 473 | ||
499 | static int | 474 | static int |
@@ -553,7 +528,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) | |||
553 | unit->fcp_lun, unit->port->wwpn, | 528 | unit->fcp_lun, unit->port->wwpn, |
554 | zfcp_get_busid_by_adapter(unit->port->adapter)); | 529 | zfcp_get_busid_by_adapter(unit->port->adapter)); |
555 | 530 | ||
556 | zfcp_erp_adapter_reopen(adapter, 0); | 531 | zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt); |
557 | zfcp_erp_wait(adapter); | 532 | zfcp_erp_wait(adapter); |
558 | 533 | ||
559 | return SUCCESS; | 534 | return SUCCESS; |
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c index 705c6d4428f3..ccbba4dd3a77 100644 --- a/drivers/s390/scsi/zfcp_sysfs_adapter.c +++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c | |||
@@ -89,7 +89,7 @@ zfcp_sysfs_port_add_store(struct device *dev, struct device_attribute *attr, con | |||
89 | 89 | ||
90 | retval = 0; | 90 | retval = 0; |
91 | 91 | ||
92 | zfcp_erp_port_reopen(port, 0); | 92 | zfcp_erp_port_reopen(port, 0, 91, NULL); |
93 | zfcp_erp_wait(port->adapter); | 93 | zfcp_erp_wait(port->adapter); |
94 | zfcp_port_put(port); | 94 | zfcp_port_put(port); |
95 | out: | 95 | out: |
@@ -147,7 +147,7 @@ zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, | |||
147 | goto out; | 147 | goto out; |
148 | } | 148 | } |
149 | 149 | ||
150 | zfcp_erp_port_shutdown(port, 0); | 150 | zfcp_erp_port_shutdown(port, 0, 92, NULL); |
151 | zfcp_erp_wait(adapter); | 151 | zfcp_erp_wait(adapter); |
152 | zfcp_port_put(port); | 152 | zfcp_port_put(port); |
153 | zfcp_port_dequeue(port); | 153 | zfcp_port_dequeue(port); |
@@ -191,9 +191,10 @@ zfcp_sysfs_adapter_failed_store(struct device *dev, struct device_attribute *att | |||
191 | goto out; | 191 | goto out; |
192 | } | 192 | } |
193 | 193 | ||
194 | zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, | 194 | zfcp_erp_modify_adapter_status(adapter, 44, NULL, |
195 | ZFCP_SET); | 195 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); |
196 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); | 196 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 93, |
197 | NULL); | ||
197 | zfcp_erp_wait(adapter); | 198 | zfcp_erp_wait(adapter); |
198 | out: | 199 | out: |
199 | up(&zfcp_data.config_sema); | 200 | up(&zfcp_data.config_sema); |
diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c index 1320c0591431..703c1b5cb602 100644 --- a/drivers/s390/scsi/zfcp_sysfs_port.c +++ b/drivers/s390/scsi/zfcp_sysfs_port.c | |||
@@ -94,7 +94,7 @@ zfcp_sysfs_unit_add_store(struct device *dev, struct device_attribute *attr, con | |||
94 | 94 | ||
95 | retval = 0; | 95 | retval = 0; |
96 | 96 | ||
97 | zfcp_erp_unit_reopen(unit, 0); | 97 | zfcp_erp_unit_reopen(unit, 0, 94, NULL); |
98 | zfcp_erp_wait(unit->port->adapter); | 98 | zfcp_erp_wait(unit->port->adapter); |
99 | zfcp_unit_put(unit); | 99 | zfcp_unit_put(unit); |
100 | out: | 100 | out: |
@@ -150,7 +150,7 @@ zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, | |||
150 | goto out; | 150 | goto out; |
151 | } | 151 | } |
152 | 152 | ||
153 | zfcp_erp_unit_shutdown(unit, 0); | 153 | zfcp_erp_unit_shutdown(unit, 0, 95, NULL); |
154 | zfcp_erp_wait(unit->port->adapter); | 154 | zfcp_erp_wait(unit->port->adapter); |
155 | zfcp_unit_put(unit); | 155 | zfcp_unit_put(unit); |
156 | zfcp_unit_dequeue(unit); | 156 | zfcp_unit_dequeue(unit); |
@@ -193,8 +193,9 @@ zfcp_sysfs_port_failed_store(struct device *dev, struct device_attribute *attr, | |||
193 | goto out; | 193 | goto out; |
194 | } | 194 | } |
195 | 195 | ||
196 | zfcp_erp_modify_port_status(port, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); | 196 | zfcp_erp_modify_port_status(port, 45, NULL, |
197 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED); | 197 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); |
198 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 96, NULL); | ||
198 | zfcp_erp_wait(port->adapter); | 199 | zfcp_erp_wait(port->adapter); |
199 | out: | 200 | out: |
200 | up(&zfcp_data.config_sema); | 201 | up(&zfcp_data.config_sema); |
diff --git a/drivers/s390/scsi/zfcp_sysfs_unit.c b/drivers/s390/scsi/zfcp_sysfs_unit.c index 63f75ee95c33..80fb2c2cf48a 100644 --- a/drivers/s390/scsi/zfcp_sysfs_unit.c +++ b/drivers/s390/scsi/zfcp_sysfs_unit.c | |||
@@ -94,8 +94,9 @@ zfcp_sysfs_unit_failed_store(struct device *dev, struct device_attribute *attr, | |||
94 | goto out; | 94 | goto out; |
95 | } | 95 | } |
96 | 96 | ||
97 | zfcp_erp_modify_unit_status(unit, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); | 97 | zfcp_erp_modify_unit_status(unit, 46, NULL, |
98 | zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED); | 98 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); |
99 | zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, 97, NULL); | ||
99 | zfcp_erp_wait(unit->port->adapter); | 100 | zfcp_erp_wait(unit->port->adapter); |
100 | out: | 101 | out: |
101 | up(&zfcp_data.config_sema); | 102 | up(&zfcp_data.config_sema); |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b4912d1cee2a..51c3ebf1c7d1 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1838,12 +1838,11 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, | |||
1838 | if (scsi_sg_count(srb)) { | 1838 | if (scsi_sg_count(srb)) { |
1839 | if ((scsi_sg_count(srb) == 1) && | 1839 | if ((scsi_sg_count(srb) == 1) && |
1840 | (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { | 1840 | (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { |
1841 | if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) { | 1841 | if (srb->sc_data_direction == DMA_TO_DEVICE || |
1842 | struct scatterlist *sg = scsi_sglist(srb); | 1842 | srb->sc_data_direction == DMA_BIDIRECTIONAL) |
1843 | char *buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; | 1843 | scsi_sg_copy_to_buffer(srb, |
1844 | memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length); | 1844 | tw_dev->generic_buffer_virt[request_id], |
1845 | kunmap_atomic(buf - sg->offset, KM_IRQ0); | 1845 | TW_SECTOR_SIZE); |
1846 | } | ||
1847 | command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); | 1846 | command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); |
1848 | command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); | 1847 | command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); |
1849 | } else { | 1848 | } else { |
@@ -1915,13 +1914,11 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re | |||
1915 | (cmd->sc_data_direction == DMA_FROM_DEVICE || | 1914 | (cmd->sc_data_direction == DMA_FROM_DEVICE || |
1916 | cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { | 1915 | cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { |
1917 | if (scsi_sg_count(cmd) == 1) { | 1916 | if (scsi_sg_count(cmd) == 1) { |
1918 | struct scatterlist *sg = scsi_sglist(tw_dev->srb[request_id]); | 1917 | unsigned long flags; |
1919 | char *buf; | 1918 | void *buf = tw_dev->generic_buffer_virt[request_id]; |
1920 | unsigned long flags = 0; | 1919 | |
1921 | local_irq_save(flags); | 1920 | local_irq_save(flags); |
1922 | buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; | 1921 | scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); |
1923 | memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length); | ||
1924 | kunmap_atomic(buf - sg->offset, KM_IRQ0); | ||
1925 | local_irq_restore(flags); | 1922 | local_irq_restore(flags); |
1926 | } | 1923 | } |
1927 | } | 1924 | } |
@@ -2028,8 +2025,6 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id | |||
2028 | } | 2025 | } |
2029 | tw_dev = (TW_Device_Extension *)host->hostdata; | 2026 | tw_dev = (TW_Device_Extension *)host->hostdata; |
2030 | 2027 | ||
2031 | memset(tw_dev, 0, sizeof(TW_Device_Extension)); | ||
2032 | |||
2033 | /* Save values to device extension */ | 2028 | /* Save values to device extension */ |
2034 | tw_dev->host = host; | 2029 | tw_dev->host = host; |
2035 | tw_dev->tw_pci_dev = pdev; | 2030 | tw_dev->tw_pci_dev = pdev; |
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index d09532162217..adb98a297210 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
@@ -1463,18 +1463,10 @@ static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id, | |||
1463 | void *data, unsigned int len) | 1463 | void *data, unsigned int len) |
1464 | { | 1464 | { |
1465 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | 1465 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; |
1466 | void *buf; | 1466 | unsigned long flags; |
1467 | unsigned int transfer_len; | ||
1468 | unsigned long flags = 0; | ||
1469 | struct scatterlist *sg = scsi_sglist(cmd); | ||
1470 | 1467 | ||
1471 | local_irq_save(flags); | 1468 | local_irq_save(flags); |
1472 | buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; | 1469 | scsi_sg_copy_from_buffer(cmd, data, len); |
1473 | transfer_len = min(sg->length, len); | ||
1474 | |||
1475 | memcpy(buf, data, transfer_len); | ||
1476 | |||
1477 | kunmap_atomic(buf - sg->offset, KM_IRQ0); | ||
1478 | local_irq_restore(flags); | 1470 | local_irq_restore(flags); |
1479 | } | 1471 | } |
1480 | 1472 | ||
@@ -2294,8 +2286,6 @@ static int __devinit tw_probe(struct pci_dev *pdev, const struct pci_device_id * | |||
2294 | } | 2286 | } |
2295 | tw_dev = (TW_Device_Extension *)host->hostdata; | 2287 | tw_dev = (TW_Device_Extension *)host->hostdata; |
2296 | 2288 | ||
2297 | memset(tw_dev, 0, sizeof(TW_Device_Extension)); | ||
2298 | |||
2299 | /* Save values to device extension */ | 2289 | /* Save values to device extension */ |
2300 | tw_dev->host = host; | 2290 | tw_dev->host = host; |
2301 | tw_dev->tw_pci_dev = pdev; | 2291 | tw_dev->tw_pci_dev = pdev; |
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 4d3ebb1af490..2d689af24664 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c | |||
@@ -896,7 +896,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda | |||
896 | IRQ_Channel = PCI_Device->irq; | 896 | IRQ_Channel = PCI_Device->irq; |
897 | IO_Address = BaseAddress0 = pci_resource_start(PCI_Device, 0); | 897 | IO_Address = BaseAddress0 = pci_resource_start(PCI_Device, 0); |
898 | PCI_Address = BaseAddress1 = pci_resource_start(PCI_Device, 1); | 898 | PCI_Address = BaseAddress1 = pci_resource_start(PCI_Device, 1); |
899 | #ifndef CONFIG_SCSI_OMIT_FLASHPOINT | 899 | #ifdef CONFIG_SCSI_FLASHPOINT |
900 | if (pci_resource_flags(PCI_Device, 0) & IORESOURCE_MEM) { | 900 | if (pci_resource_flags(PCI_Device, 0) & IORESOURCE_MEM) { |
901 | BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, BaseAddress0); | 901 | BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, BaseAddress0); |
902 | BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, Bus, Device, IO_Address); | 902 | BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, Bus, Device, IO_Address); |
@@ -1006,6 +1006,9 @@ static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter | |||
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | 1008 | ||
1009 | #else | ||
1010 | #define BusLogic_InitializeProbeInfoList(adapter) \ | ||
1011 | BusLogic_InitializeProbeInfoListISA(adapter) | ||
1009 | #endif /* CONFIG_PCI */ | 1012 | #endif /* CONFIG_PCI */ |
1010 | 1013 | ||
1011 | 1014 | ||
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h index bfbfb5c3a8f6..73f237a1ed94 100644 --- a/drivers/scsi/BusLogic.h +++ b/drivers/scsi/BusLogic.h | |||
@@ -34,23 +34,6 @@ | |||
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | /* | 36 | /* |
37 | FlashPoint support is only available for the Intel x86 Architecture with | ||
38 | CONFIG_PCI set. | ||
39 | */ | ||
40 | |||
41 | #ifndef __i386__ | ||
42 | #undef CONFIG_SCSI_OMIT_FLASHPOINT | ||
43 | #define CONFIG_SCSI_OMIT_FLASHPOINT | ||
44 | #endif | ||
45 | |||
46 | #ifndef CONFIG_PCI | ||
47 | #undef CONFIG_SCSI_OMIT_FLASHPOINT | ||
48 | #define CONFIG_SCSI_OMIT_FLASHPOINT | ||
49 | #define BusLogic_InitializeProbeInfoListISA BusLogic_InitializeProbeInfoList | ||
50 | #endif | ||
51 | |||
52 | |||
53 | /* | ||
54 | Define the maximum number of BusLogic Host Adapters supported by this driver. | 37 | Define the maximum number of BusLogic Host Adapters supported by this driver. |
55 | */ | 38 | */ |
56 | 39 | ||
@@ -178,7 +161,7 @@ static int BusLogic_HostAdapterAddressCount[3] = { 0, BusLogic_MultiMasterAddres | |||
178 | Define macros for testing the Host Adapter Type. | 161 | Define macros for testing the Host Adapter Type. |
179 | */ | 162 | */ |
180 | 163 | ||
181 | #ifndef CONFIG_SCSI_OMIT_FLASHPOINT | 164 | #ifdef CONFIG_SCSI_FLASHPOINT |
182 | 165 | ||
183 | #define BusLogic_MultiMasterHostAdapterP(HostAdapter) \ | 166 | #define BusLogic_MultiMasterHostAdapterP(HostAdapter) \ |
184 | (HostAdapter->HostAdapterType == BusLogic_MultiMaster) | 167 | (HostAdapter->HostAdapterType == BusLogic_MultiMaster) |
@@ -871,7 +854,7 @@ struct BusLogic_CCB { | |||
871 | void (*CallbackFunction) (struct BusLogic_CCB *); /* Bytes 40-43 */ | 854 | void (*CallbackFunction) (struct BusLogic_CCB *); /* Bytes 40-43 */ |
872 | u32 BaseAddress; /* Bytes 44-47 */ | 855 | u32 BaseAddress; /* Bytes 44-47 */ |
873 | enum BusLogic_CompletionCode CompletionCode; /* Byte 48 */ | 856 | enum BusLogic_CompletionCode CompletionCode; /* Byte 48 */ |
874 | #ifndef CONFIG_SCSI_OMIT_FLASHPOINT | 857 | #ifdef CONFIG_SCSI_FLASHPOINT |
875 | unsigned char:8; /* Byte 49 */ | 858 | unsigned char:8; /* Byte 49 */ |
876 | unsigned short OS_Flags; /* Bytes 50-51 */ | 859 | unsigned short OS_Flags; /* Bytes 50-51 */ |
877 | unsigned char Private[48]; /* Bytes 52-99 */ | 860 | unsigned char Private[48]; /* Bytes 52-99 */ |
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c index 1c9078191d9e..b374e457e5e2 100644 --- a/drivers/scsi/FlashPoint.c +++ b/drivers/scsi/FlashPoint.c | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | 18 | ||
19 | #ifndef CONFIG_SCSI_OMIT_FLASHPOINT | 19 | #ifdef CONFIG_SCSI_FLASHPOINT |
20 | 20 | ||
21 | #define MAX_CARDS 8 | 21 | #define MAX_CARDS 8 |
22 | #undef BUSTYPE_PCI | 22 | #undef BUSTYPE_PCI |
@@ -7626,7 +7626,7 @@ FlashPoint__HandleInterrupt(FlashPoint_CardHandle_T CardHandle) | |||
7626 | #define FlashPoint_InterruptPending FlashPoint__InterruptPending | 7626 | #define FlashPoint_InterruptPending FlashPoint__InterruptPending |
7627 | #define FlashPoint_HandleInterrupt FlashPoint__HandleInterrupt | 7627 | #define FlashPoint_HandleInterrupt FlashPoint__HandleInterrupt |
7628 | 7628 | ||
7629 | #else /* CONFIG_SCSI_OMIT_FLASHPOINT */ | 7629 | #else /* !CONFIG_SCSI_FLASHPOINT */ |
7630 | 7630 | ||
7631 | /* | 7631 | /* |
7632 | Define prototypes for the FlashPoint SCCB Manager Functions. | 7632 | Define prototypes for the FlashPoint SCCB Manager Functions. |
@@ -7641,4 +7641,4 @@ extern bool FlashPoint_InterruptPending(FlashPoint_CardHandle_T); | |||
7641 | extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T); | 7641 | extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T); |
7642 | extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T); | 7642 | extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T); |
7643 | 7643 | ||
7644 | #endif /* CONFIG_SCSI_OMIT_FLASHPOINT */ | 7644 | #endif /* CONFIG_SCSI_FLASHPOINT */ |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index b9d374082b65..7f78e3ea517d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -588,18 +588,20 @@ config SCSI_BUSLOGIC | |||
588 | <http://www.tldp.org/docs.html#howto>, and the files | 588 | <http://www.tldp.org/docs.html#howto>, and the files |
589 | <file:Documentation/scsi/BusLogic.txt> and | 589 | <file:Documentation/scsi/BusLogic.txt> and |
590 | <file:Documentation/scsi/FlashPoint.txt> for more information. | 590 | <file:Documentation/scsi/FlashPoint.txt> for more information. |
591 | Note that support for FlashPoint is only available for 32-bit | ||
592 | x86 configurations. | ||
591 | 593 | ||
592 | To compile this driver as a module, choose M here: the | 594 | To compile this driver as a module, choose M here: the |
593 | module will be called BusLogic. | 595 | module will be called BusLogic. |
594 | 596 | ||
595 | config SCSI_OMIT_FLASHPOINT | 597 | config SCSI_FLASHPOINT |
596 | bool "Omit FlashPoint support" | 598 | bool "FlashPoint support" |
597 | depends on SCSI_BUSLOGIC | 599 | depends on SCSI_BUSLOGIC && PCI && X86_32 |
598 | help | 600 | help |
599 | This option allows you to omit the FlashPoint support from the | 601 | This option allows you to add FlashPoint support to the |
600 | BusLogic SCSI driver. The FlashPoint SCCB Manager code is | 602 | BusLogic SCSI driver. The FlashPoint SCCB Manager code is |
601 | substantial, so users of MultiMaster Host Adapters may wish to omit | 603 | substantial, so users of MultiMaster Host Adapters may not |
602 | it. | 604 | wish to include it. |
603 | 605 | ||
604 | config SCSI_DMX3191D | 606 | config SCSI_DMX3191D |
605 | tristate "DMX3191D SCSI support" | 607 | tristate "DMX3191D SCSI support" |
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index 5ac3a3e8dfaf..07d572feceed 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c | |||
@@ -179,6 +179,9 @@ int __init a2091_detect(struct scsi_host_template *tpnt) | |||
179 | DMA(instance)->DAWR = DAWR_A2091; | 179 | DMA(instance)->DAWR = DAWR_A2091; |
180 | regs.SASR = &(DMA(instance)->SASR); | 180 | regs.SASR = &(DMA(instance)->SASR); |
181 | regs.SCMD = &(DMA(instance)->SCMD); | 181 | regs.SCMD = &(DMA(instance)->SCMD); |
182 | HDATA(instance)->no_sync = 0xff; | ||
183 | HDATA(instance)->fast = 0; | ||
184 | HDATA(instance)->dma_mode = CTRL_DMA; | ||
182 | wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); | 185 | wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); |
183 | request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", | 186 | request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", |
184 | instance); | 187 | instance); |
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index 3aeec963940b..8b449d8acacd 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c | |||
@@ -178,6 +178,9 @@ int __init a3000_detect(struct scsi_host_template *tpnt) | |||
178 | DMA(a3000_host)->DAWR = DAWR_A3000; | 178 | DMA(a3000_host)->DAWR = DAWR_A3000; |
179 | regs.SASR = &(DMA(a3000_host)->SASR); | 179 | regs.SASR = &(DMA(a3000_host)->SASR); |
180 | regs.SCMD = &(DMA(a3000_host)->SCMD); | 180 | regs.SCMD = &(DMA(a3000_host)->SCMD); |
181 | HDATA(a3000_host)->no_sync = 0xff; | ||
182 | HDATA(a3000_host)->fast = 0; | ||
183 | HDATA(a3000_host)->dma_mode = CTRL_DMA; | ||
181 | wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15); | 184 | wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15); |
182 | if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI", | 185 | if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI", |
183 | a3000_intr)) | 186 | a3000_intr)) |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index c05092fd3a9d..369fcf78f396 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -205,7 +205,7 @@ MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" | |||
205 | 205 | ||
206 | int aac_check_reset = 1; | 206 | int aac_check_reset = 1; |
207 | module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); | 207 | module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); |
208 | MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the" | 208 | MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the" |
209 | " adapter. a value of -1 forces the reset to adapters programmed to" | 209 | " adapter. a value of -1 forces the reset to adapters programmed to" |
210 | " ignore it."); | 210 | " ignore it."); |
211 | 211 | ||
@@ -379,24 +379,6 @@ int aac_get_containers(struct aac_dev *dev) | |||
379 | return status; | 379 | return status; |
380 | } | 380 | } |
381 | 381 | ||
382 | static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len) | ||
383 | { | ||
384 | void *buf; | ||
385 | int transfer_len; | ||
386 | struct scatterlist *sg = scsi_sglist(scsicmd); | ||
387 | |||
388 | buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; | ||
389 | transfer_len = min(sg->length, len + offset); | ||
390 | |||
391 | transfer_len -= offset; | ||
392 | if (buf && transfer_len > 0) | ||
393 | memcpy(buf + offset, data, transfer_len); | ||
394 | |||
395 | flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset)); | ||
396 | kunmap_atomic(buf - sg->offset, KM_IRQ0); | ||
397 | |||
398 | } | ||
399 | |||
400 | static void get_container_name_callback(void *context, struct fib * fibptr) | 382 | static void get_container_name_callback(void *context, struct fib * fibptr) |
401 | { | 383 | { |
402 | struct aac_get_name_resp * get_name_reply; | 384 | struct aac_get_name_resp * get_name_reply; |
@@ -419,14 +401,17 @@ static void get_container_name_callback(void *context, struct fib * fibptr) | |||
419 | while (*sp == ' ') | 401 | while (*sp == ' ') |
420 | ++sp; | 402 | ++sp; |
421 | if (*sp) { | 403 | if (*sp) { |
404 | struct inquiry_data inq; | ||
422 | char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)]; | 405 | char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)]; |
423 | int count = sizeof(d); | 406 | int count = sizeof(d); |
424 | char *dp = d; | 407 | char *dp = d; |
425 | do { | 408 | do { |
426 | *dp++ = (*sp) ? *sp++ : ' '; | 409 | *dp++ = (*sp) ? *sp++ : ' '; |
427 | } while (--count > 0); | 410 | } while (--count > 0); |
428 | aac_internal_transfer(scsicmd, d, | 411 | |
429 | offsetof(struct inquiry_data, inqd_pid), sizeof(d)); | 412 | scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq)); |
413 | memcpy(inq.inqd_pid, d, sizeof(d)); | ||
414 | scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq)); | ||
430 | } | 415 | } |
431 | } | 416 | } |
432 | 417 | ||
@@ -811,7 +796,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr) | |||
811 | sp[2] = 0; | 796 | sp[2] = 0; |
812 | sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", | 797 | sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", |
813 | le32_to_cpu(get_serial_reply->uid)); | 798 | le32_to_cpu(get_serial_reply->uid)); |
814 | aac_internal_transfer(scsicmd, sp, 0, sizeof(sp)); | 799 | scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp)); |
815 | } | 800 | } |
816 | 801 | ||
817 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; | 802 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
@@ -1986,8 +1971,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
1986 | arr[4] = 0x0; | 1971 | arr[4] = 0x0; |
1987 | arr[5] = 0x80; | 1972 | arr[5] = 0x80; |
1988 | arr[1] = scsicmd->cmnd[2]; | 1973 | arr[1] = scsicmd->cmnd[2]; |
1989 | aac_internal_transfer(scsicmd, &inq_data, 0, | 1974 | scsi_sg_copy_from_buffer(scsicmd, &inq_data, |
1990 | sizeof(inq_data)); | 1975 | sizeof(inq_data)); |
1991 | scsicmd->result = DID_OK << 16 | | 1976 | scsicmd->result = DID_OK << 16 | |
1992 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; | 1977 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
1993 | } else if (scsicmd->cmnd[2] == 0x80) { | 1978 | } else if (scsicmd->cmnd[2] == 0x80) { |
@@ -1995,8 +1980,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
1995 | arr[3] = setinqserial(dev, &arr[4], | 1980 | arr[3] = setinqserial(dev, &arr[4], |
1996 | scmd_id(scsicmd)); | 1981 | scmd_id(scsicmd)); |
1997 | arr[1] = scsicmd->cmnd[2]; | 1982 | arr[1] = scsicmd->cmnd[2]; |
1998 | aac_internal_transfer(scsicmd, &inq_data, 0, | 1983 | scsi_sg_copy_from_buffer(scsicmd, &inq_data, |
1999 | sizeof(inq_data)); | 1984 | sizeof(inq_data)); |
2000 | return aac_get_container_serial(scsicmd); | 1985 | return aac_get_container_serial(scsicmd); |
2001 | } else { | 1986 | } else { |
2002 | /* vpd page not implemented */ | 1987 | /* vpd page not implemented */ |
@@ -2027,7 +2012,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2027 | if (cid == host->this_id) { | 2012 | if (cid == host->this_id) { |
2028 | setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types)); | 2013 | setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types)); |
2029 | inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ | 2014 | inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ |
2030 | aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); | 2015 | scsi_sg_copy_from_buffer(scsicmd, &inq_data, |
2016 | sizeof(inq_data)); | ||
2031 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; | 2017 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
2032 | scsicmd->scsi_done(scsicmd); | 2018 | scsicmd->scsi_done(scsicmd); |
2033 | return 0; | 2019 | return 0; |
@@ -2036,7 +2022,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2036 | return -1; | 2022 | return -1; |
2037 | setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); | 2023 | setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); |
2038 | inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ | 2024 | inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ |
2039 | aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); | 2025 | scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); |
2040 | return aac_get_container_name(scsicmd); | 2026 | return aac_get_container_name(scsicmd); |
2041 | } | 2027 | } |
2042 | case SERVICE_ACTION_IN: | 2028 | case SERVICE_ACTION_IN: |
@@ -2047,6 +2033,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2047 | { | 2033 | { |
2048 | u64 capacity; | 2034 | u64 capacity; |
2049 | char cp[13]; | 2035 | char cp[13]; |
2036 | unsigned int alloc_len; | ||
2050 | 2037 | ||
2051 | dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n")); | 2038 | dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n")); |
2052 | capacity = fsa_dev_ptr[cid].size - 1; | 2039 | capacity = fsa_dev_ptr[cid].size - 1; |
@@ -2063,18 +2050,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2063 | cp[10] = 2; | 2050 | cp[10] = 2; |
2064 | cp[11] = 0; | 2051 | cp[11] = 0; |
2065 | cp[12] = 0; | 2052 | cp[12] = 0; |
2066 | aac_internal_transfer(scsicmd, cp, 0, | ||
2067 | min_t(size_t, scsicmd->cmnd[13], sizeof(cp))); | ||
2068 | if (sizeof(cp) < scsicmd->cmnd[13]) { | ||
2069 | unsigned int len, offset = sizeof(cp); | ||
2070 | 2053 | ||
2071 | memset(cp, 0, offset); | 2054 | alloc_len = ((scsicmd->cmnd[10] << 24) |
2072 | do { | 2055 | + (scsicmd->cmnd[11] << 16) |
2073 | len = min_t(size_t, scsicmd->cmnd[13] - offset, | 2056 | + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]); |
2074 | sizeof(cp)); | 2057 | |
2075 | aac_internal_transfer(scsicmd, cp, offset, len); | 2058 | alloc_len = min_t(size_t, alloc_len, sizeof(cp)); |
2076 | } while ((offset += len) < scsicmd->cmnd[13]); | 2059 | scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len); |
2077 | } | 2060 | if (alloc_len < scsi_bufflen(scsicmd)) |
2061 | scsi_set_resid(scsicmd, | ||
2062 | scsi_bufflen(scsicmd) - alloc_len); | ||
2078 | 2063 | ||
2079 | /* Do not cache partition table for arrays */ | 2064 | /* Do not cache partition table for arrays */ |
2080 | scsicmd->device->removable = 1; | 2065 | scsicmd->device->removable = 1; |
@@ -2104,7 +2089,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2104 | cp[5] = 0; | 2089 | cp[5] = 0; |
2105 | cp[6] = 2; | 2090 | cp[6] = 2; |
2106 | cp[7] = 0; | 2091 | cp[7] = 0; |
2107 | aac_internal_transfer(scsicmd, cp, 0, sizeof(cp)); | 2092 | scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); |
2108 | /* Do not cache partition table for arrays */ | 2093 | /* Do not cache partition table for arrays */ |
2109 | scsicmd->device->removable = 1; | 2094 | scsicmd->device->removable = 1; |
2110 | 2095 | ||
@@ -2139,7 +2124,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2139 | if (mode_buf_length > scsicmd->cmnd[4]) | 2124 | if (mode_buf_length > scsicmd->cmnd[4]) |
2140 | mode_buf_length = scsicmd->cmnd[4]; | 2125 | mode_buf_length = scsicmd->cmnd[4]; |
2141 | } | 2126 | } |
2142 | aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length); | 2127 | scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length); |
2143 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; | 2128 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
2144 | scsicmd->scsi_done(scsicmd); | 2129 | scsicmd->scsi_done(scsicmd); |
2145 | 2130 | ||
@@ -2174,7 +2159,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2174 | if (mode_buf_length > scsicmd->cmnd[8]) | 2159 | if (mode_buf_length > scsicmd->cmnd[8]) |
2175 | mode_buf_length = scsicmd->cmnd[8]; | 2160 | mode_buf_length = scsicmd->cmnd[8]; |
2176 | } | 2161 | } |
2177 | aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length); | 2162 | scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length); |
2178 | 2163 | ||
2179 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; | 2164 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
2180 | scsicmd->scsi_done(scsicmd); | 2165 | scsicmd->scsi_done(scsicmd); |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 47434499e82b..23a8e9f8dcb4 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -515,10 +515,12 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
515 | } | 515 | } |
516 | udelay(5); | 516 | udelay(5); |
517 | } | 517 | } |
518 | } else | 518 | } else if (down_interruptible(&fibptr->event_wait) == 0) { |
519 | (void)down_interruptible(&fibptr->event_wait); | 519 | fibptr->done = 2; |
520 | up(&fibptr->event_wait); | ||
521 | } | ||
520 | spin_lock_irqsave(&fibptr->event_lock, flags); | 522 | spin_lock_irqsave(&fibptr->event_lock, flags); |
521 | if (fibptr->done == 0) { | 523 | if ((fibptr->done == 0) || (fibptr->done == 2)) { |
522 | fibptr->done = 2; /* Tell interrupt we aborted */ | 524 | fibptr->done = 2; /* Tell interrupt we aborted */ |
523 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 525 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
524 | return -EINTR; | 526 | return -EINTR; |
@@ -594,7 +596,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) | |||
594 | if (le32_to_cpu(*q->headers.consumer) >= q->entries) | 596 | if (le32_to_cpu(*q->headers.consumer) >= q->entries) |
595 | *q->headers.consumer = cpu_to_le32(1); | 597 | *q->headers.consumer = cpu_to_le32(1); |
596 | else | 598 | else |
597 | *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); | 599 | le32_add_cpu(q->headers.consumer, 1); |
598 | 600 | ||
599 | if (wasfull) { | 601 | if (wasfull) { |
600 | switch (qid) { | 602 | switch (qid) { |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 72fccd9f40df..0081aa357c8b 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c | |||
@@ -1413,6 +1413,10 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev, | |||
1413 | unsigned long flags; | 1413 | unsigned long flags; |
1414 | int nseg; | 1414 | int nseg; |
1415 | 1415 | ||
1416 | nseg = scsi_dma_map(cmd); | ||
1417 | if (nseg < 0) | ||
1418 | return SCSI_MLQUEUE_HOST_BUSY; | ||
1419 | |||
1416 | ahd_lock(ahd, &flags); | 1420 | ahd_lock(ahd, &flags); |
1417 | 1421 | ||
1418 | /* | 1422 | /* |
@@ -1430,6 +1434,7 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev, | |||
1430 | if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { | 1434 | if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { |
1431 | ahd->flags |= AHD_RESOURCE_SHORTAGE; | 1435 | ahd->flags |= AHD_RESOURCE_SHORTAGE; |
1432 | ahd_unlock(ahd, &flags); | 1436 | ahd_unlock(ahd, &flags); |
1437 | scsi_dma_unmap(cmd); | ||
1433 | return SCSI_MLQUEUE_HOST_BUSY; | 1438 | return SCSI_MLQUEUE_HOST_BUSY; |
1434 | } | 1439 | } |
1435 | 1440 | ||
@@ -1485,8 +1490,6 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev, | |||
1485 | ahd_set_sense_residual(scb, 0); | 1490 | ahd_set_sense_residual(scb, 0); |
1486 | scb->sg_count = 0; | 1491 | scb->sg_count = 0; |
1487 | 1492 | ||
1488 | nseg = scsi_dma_map(cmd); | ||
1489 | BUG_ON(nseg < 0); | ||
1490 | if (nseg > 0) { | 1493 | if (nseg > 0) { |
1491 | void *sg = scb->sg_list; | 1494 | void *sg = scb->sg_list; |
1492 | struct scatterlist *cur_seg; | 1495 | struct scatterlist *cur_seg; |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 282aff6f852e..42ad48e09f02 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
@@ -1398,12 +1398,18 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, | |||
1398 | return SCSI_MLQUEUE_DEVICE_BUSY; | 1398 | return SCSI_MLQUEUE_DEVICE_BUSY; |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | nseg = scsi_dma_map(cmd); | ||
1402 | if (nseg < 0) | ||
1403 | return SCSI_MLQUEUE_HOST_BUSY; | ||
1404 | |||
1401 | /* | 1405 | /* |
1402 | * Get an scb to use. | 1406 | * Get an scb to use. |
1403 | */ | 1407 | */ |
1404 | scb = ahc_get_scb(ahc); | 1408 | scb = ahc_get_scb(ahc); |
1405 | if (!scb) | 1409 | if (!scb) { |
1410 | scsi_dma_unmap(cmd); | ||
1406 | return SCSI_MLQUEUE_HOST_BUSY; | 1411 | return SCSI_MLQUEUE_HOST_BUSY; |
1412 | } | ||
1407 | 1413 | ||
1408 | scb->io_ctx = cmd; | 1414 | scb->io_ctx = cmd; |
1409 | scb->platform_data->dev = dev; | 1415 | scb->platform_data->dev = dev; |
@@ -1464,8 +1470,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, | |||
1464 | ahc_set_sense_residual(scb, 0); | 1470 | ahc_set_sense_residual(scb, 0); |
1465 | scb->sg_count = 0; | 1471 | scb->sg_count = 0; |
1466 | 1472 | ||
1467 | nseg = scsi_dma_map(cmd); | ||
1468 | BUG_ON(nseg < 0); | ||
1469 | if (nseg > 0) { | 1473 | if (nseg > 0) { |
1470 | struct ahc_dma_seg *sg; | 1474 | struct ahc_dma_seg *sg; |
1471 | struct scatterlist *cur_seg; | 1475 | struct scatterlist *cur_seg; |
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y index 6066998ed562..702e2dbd11fb 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y | |||
@@ -1837,7 +1837,7 @@ type_check(symbol_t *symbol, expression_t *expression, int opcode) | |||
1837 | int and_op; | 1837 | int and_op; |
1838 | 1838 | ||
1839 | and_op = FALSE; | 1839 | and_op = FALSE; |
1840 | if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || AIC_OP_JZ) | 1840 | if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || opcode == AIC_OP_JZ) |
1841 | and_op = TRUE; | 1841 | and_op = TRUE; |
1842 | 1842 | ||
1843 | /* | 1843 | /* |
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h index eb8efdcefe48..2ef459e9cda1 100644 --- a/drivers/scsi/aic94xx/aic94xx.h +++ b/drivers/scsi/aic94xx/aic94xx.h | |||
@@ -58,7 +58,6 @@ | |||
58 | 58 | ||
59 | extern struct kmem_cache *asd_dma_token_cache; | 59 | extern struct kmem_cache *asd_dma_token_cache; |
60 | extern struct kmem_cache *asd_ascb_cache; | 60 | extern struct kmem_cache *asd_ascb_cache; |
61 | extern char sas_addr_str[2*SAS_ADDR_SIZE + 1]; | ||
62 | 61 | ||
63 | static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr) | 62 | static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr) |
64 | { | 63 | { |
@@ -68,21 +67,6 @@ static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr) | |||
68 | *p = '\0'; | 67 | *p = '\0'; |
69 | } | 68 | } |
70 | 69 | ||
71 | static inline void asd_destringify_sas_addr(u8 *sas_addr, const char *p) | ||
72 | { | ||
73 | int i; | ||
74 | for (i = 0; i < SAS_ADDR_SIZE; i++) { | ||
75 | u8 h, l; | ||
76 | if (!*p) | ||
77 | break; | ||
78 | h = isdigit(*p) ? *p-'0' : *p-'A'+10; | ||
79 | p++; | ||
80 | l = isdigit(*p) ? *p-'0' : *p-'A'+10; | ||
81 | p++; | ||
82 | sas_addr[i] = (h<<4) | l; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | struct asd_ha_struct; | 70 | struct asd_ha_struct; |
87 | struct asd_ascb; | 71 | struct asd_ascb; |
88 | 72 | ||
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c index 72042cae7768..2e2ddec9c0b6 100644 --- a/drivers/scsi/aic94xx/aic94xx_dev.c +++ b/drivers/scsi/aic94xx/aic94xx_dev.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) | 35 | #define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) |
36 | #define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) | 36 | #define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) |
37 | 37 | ||
38 | static inline int asd_get_ddb(struct asd_ha_struct *asd_ha) | 38 | static int asd_get_ddb(struct asd_ha_struct *asd_ha) |
39 | { | 39 | { |
40 | int ddb, i; | 40 | int ddb, i; |
41 | 41 | ||
@@ -71,7 +71,7 @@ out: | |||
71 | #define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr) | 71 | #define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr) |
72 | #define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout) | 72 | #define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout) |
73 | 73 | ||
74 | static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) | 74 | static void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) |
75 | { | 75 | { |
76 | if (!ddb || ddb >= 0xFFFF) | 76 | if (!ddb || ddb >= 0xFFFF) |
77 | return; | 77 | return; |
@@ -79,7 +79,7 @@ static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) | |||
79 | CLEAR_DDB(ddb, asd_ha); | 79 | CLEAR_DDB(ddb, asd_ha); |
80 | } | 80 | } |
81 | 81 | ||
82 | static inline void asd_set_ddb_type(struct domain_device *dev) | 82 | static void asd_set_ddb_type(struct domain_device *dev) |
83 | { | 83 | { |
84 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; | 84 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; |
85 | int ddb = (int) (unsigned long) dev->lldd_dev; | 85 | int ddb = (int) (unsigned long) dev->lldd_dev; |
@@ -109,7 +109,7 @@ static int asd_init_sata_tag_ddb(struct domain_device *dev) | |||
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline int asd_init_sata(struct domain_device *dev) | 112 | static int asd_init_sata(struct domain_device *dev) |
113 | { | 113 | { |
114 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; | 114 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; |
115 | int ddb = (int) (unsigned long) dev->lldd_dev; | 115 | int ddb = (int) (unsigned long) dev->lldd_dev; |
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c index 3d8c4ff1f2ef..67eeba3bdb06 100644 --- a/drivers/scsi/aic94xx/aic94xx_dump.c +++ b/drivers/scsi/aic94xx/aic94xx_dump.c | |||
@@ -738,6 +738,8 @@ static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq) | |||
738 | PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS); | 738 | PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS); |
739 | } | 739 | } |
740 | 740 | ||
741 | #if 0 | ||
742 | |||
741 | /** | 743 | /** |
742 | * asd_dump_ddb_site -- dump a CSEQ DDB site | 744 | * asd_dump_ddb_site -- dump a CSEQ DDB site |
743 | * @asd_ha: pointer to host adapter structure | 745 | * @asd_ha: pointer to host adapter structure |
@@ -880,6 +882,8 @@ void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) | |||
880 | } | 882 | } |
881 | } | 883 | } |
882 | 884 | ||
885 | #endif /* 0 */ | ||
886 | |||
883 | /** | 887 | /** |
884 | * ads_dump_seq_state -- dump CSEQ and LSEQ states | 888 | * ads_dump_seq_state -- dump CSEQ and LSEQ states |
885 | * @asd_ha: pointer to host adapter structure | 889 | * @asd_ha: pointer to host adapter structure |
@@ -922,7 +926,9 @@ void asd_dump_frame_rcvd(struct asd_phy *phy, | |||
922 | spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); | 926 | spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); |
923 | } | 927 | } |
924 | 928 | ||
925 | static inline void asd_dump_scb(struct asd_ascb *ascb, int ind) | 929 | #if 0 |
930 | |||
931 | static void asd_dump_scb(struct asd_ascb *ascb, int ind) | ||
926 | { | 932 | { |
927 | asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, " | 933 | asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, " |
928 | "index:%d, opcode:0x%02x\n", | 934 | "index:%d, opcode:0x%02x\n", |
@@ -956,4 +962,6 @@ void asd_dump_scb_list(struct asd_ascb *ascb, int num) | |||
956 | } | 962 | } |
957 | } | 963 | } |
958 | 964 | ||
965 | #endif /* 0 */ | ||
966 | |||
959 | #endif /* ASD_DEBUG */ | 967 | #endif /* ASD_DEBUG */ |
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h index 0c388e7da6bb..191a753d42a7 100644 --- a/drivers/scsi/aic94xx/aic94xx_dump.h +++ b/drivers/scsi/aic94xx/aic94xx_dump.h | |||
@@ -29,24 +29,15 @@ | |||
29 | 29 | ||
30 | #ifdef ASD_DEBUG | 30 | #ifdef ASD_DEBUG |
31 | 31 | ||
32 | void asd_dump_ddb_0(struct asd_ha_struct *asd_ha); | ||
33 | void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no); | ||
34 | void asd_dump_scb_sites(struct asd_ha_struct *asd_ha); | ||
35 | void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask); | 32 | void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask); |
36 | void asd_dump_frame_rcvd(struct asd_phy *phy, | 33 | void asd_dump_frame_rcvd(struct asd_phy *phy, |
37 | struct done_list_struct *dl); | 34 | struct done_list_struct *dl); |
38 | void asd_dump_scb_list(struct asd_ascb *ascb, int num); | ||
39 | #else /* ASD_DEBUG */ | 35 | #else /* ASD_DEBUG */ |
40 | 36 | ||
41 | static inline void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) { } | ||
42 | static inline void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, | ||
43 | u16 site_no) { } | ||
44 | static inline void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) { } | ||
45 | static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha, | 37 | static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha, |
46 | u8 lseq_mask) { } | 38 | u8 lseq_mask) { } |
47 | static inline void asd_dump_frame_rcvd(struct asd_phy *phy, | 39 | static inline void asd_dump_frame_rcvd(struct asd_phy *phy, |
48 | struct done_list_struct *dl) { } | 40 | struct done_list_struct *dl) { } |
49 | static inline void asd_dump_scb_list(struct asd_ascb *ascb, int num) { } | ||
50 | #endif /* ASD_DEBUG */ | 41 | #endif /* ASD_DEBUG */ |
51 | 42 | ||
52 | #endif /* _AIC94XX_DUMP_H_ */ | 43 | #endif /* _AIC94XX_DUMP_H_ */ |
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 098b5f39cd31..83a78222896d 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/firmware.h> | ||
30 | 31 | ||
31 | #include "aic94xx.h" | 32 | #include "aic94xx.h" |
32 | #include "aic94xx_reg.h" | 33 | #include "aic94xx_reg.h" |
@@ -38,16 +39,14 @@ u32 MBAR0_SWB_SIZE; | |||
38 | 39 | ||
39 | /* ---------- Initialization ---------- */ | 40 | /* ---------- Initialization ---------- */ |
40 | 41 | ||
41 | static void asd_get_user_sas_addr(struct asd_ha_struct *asd_ha) | 42 | static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha) |
42 | { | 43 | { |
43 | extern char sas_addr_str[]; | 44 | /* adapter came with a sas address */ |
44 | /* If the user has specified a WWN it overrides other settings | 45 | if (asd_ha->hw_prof.sas_addr[0]) |
45 | */ | 46 | return 0; |
46 | if (sas_addr_str[0] != '\0') | 47 | |
47 | asd_destringify_sas_addr(asd_ha->hw_prof.sas_addr, | 48 | return sas_request_addr(asd_ha->sas_ha.core.shost, |
48 | sas_addr_str); | 49 | asd_ha->hw_prof.sas_addr); |
49 | else if (asd_ha->hw_prof.sas_addr[0] != 0) | ||
50 | asd_stringify_sas_addr(sas_addr_str, asd_ha->hw_prof.sas_addr); | ||
51 | } | 50 | } |
52 | 51 | ||
53 | static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha) | 52 | static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha) |
@@ -251,7 +250,7 @@ static int asd_init_scbs(struct asd_ha_struct *asd_ha) | |||
251 | return 0; | 250 | return 0; |
252 | } | 251 | } |
253 | 252 | ||
254 | static inline void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha) | 253 | static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha) |
255 | { | 254 | { |
256 | asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE; | 255 | asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE; |
257 | asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE; | 256 | asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE; |
@@ -657,8 +656,7 @@ int asd_init_hw(struct asd_ha_struct *asd_ha) | |||
657 | 656 | ||
658 | asd_init_ctxmem(asd_ha); | 657 | asd_init_ctxmem(asd_ha); |
659 | 658 | ||
660 | asd_get_user_sas_addr(asd_ha); | 659 | if (asd_get_user_sas_addr(asd_ha)) { |
661 | if (!asd_ha->hw_prof.sas_addr[0]) { | ||
662 | asd_printk("No SAS Address provided for %s\n", | 660 | asd_printk("No SAS Address provided for %s\n", |
663 | pci_name(asd_ha->pcidev)); | 661 | pci_name(asd_ha->pcidev)); |
664 | err = -ENODEV; | 662 | err = -ENODEV; |
@@ -773,7 +771,7 @@ static void asd_dl_tasklet_handler(unsigned long data) | |||
773 | * asd_process_donelist_isr -- schedule processing of done list entries | 771 | * asd_process_donelist_isr -- schedule processing of done list entries |
774 | * @asd_ha: pointer to host adapter structure | 772 | * @asd_ha: pointer to host adapter structure |
775 | */ | 773 | */ |
776 | static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) | 774 | static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) |
777 | { | 775 | { |
778 | tasklet_schedule(&asd_ha->seq.dl_tasklet); | 776 | tasklet_schedule(&asd_ha->seq.dl_tasklet); |
779 | } | 777 | } |
@@ -782,7 +780,7 @@ static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) | |||
782 | * asd_com_sas_isr -- process device communication interrupt (COMINT) | 780 | * asd_com_sas_isr -- process device communication interrupt (COMINT) |
783 | * @asd_ha: pointer to host adapter structure | 781 | * @asd_ha: pointer to host adapter structure |
784 | */ | 782 | */ |
785 | static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha) | 783 | static void asd_com_sas_isr(struct asd_ha_struct *asd_ha) |
786 | { | 784 | { |
787 | u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT); | 785 | u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT); |
788 | 786 | ||
@@ -821,7 +819,7 @@ static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha) | |||
821 | asd_chip_reset(asd_ha); | 819 | asd_chip_reset(asd_ha); |
822 | } | 820 | } |
823 | 821 | ||
824 | static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) | 822 | static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) |
825 | { | 823 | { |
826 | static const char *halt_code[256] = { | 824 | static const char *halt_code[256] = { |
827 | "UNEXPECTED_INTERRUPT0", | 825 | "UNEXPECTED_INTERRUPT0", |
@@ -908,7 +906,7 @@ static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) | |||
908 | * asd_dch_sas_isr -- process device channel interrupt (DEVINT) | 906 | * asd_dch_sas_isr -- process device channel interrupt (DEVINT) |
909 | * @asd_ha: pointer to host adapter structure | 907 | * @asd_ha: pointer to host adapter structure |
910 | */ | 908 | */ |
911 | static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) | 909 | static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) |
912 | { | 910 | { |
913 | u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS); | 911 | u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS); |
914 | 912 | ||
@@ -923,7 +921,7 @@ static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) | |||
923 | * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR) | 921 | * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR) |
924 | * @asd_ha: pointer to host adapter structure | 922 | * @asd_ha: pointer to host adapter structure |
925 | */ | 923 | */ |
926 | static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) | 924 | static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) |
927 | { | 925 | { |
928 | u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R); | 926 | u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R); |
929 | 927 | ||
@@ -971,7 +969,7 @@ static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) | |||
971 | * | 969 | * |
972 | * Asserted on PCIX errors: target abort, etc. | 970 | * Asserted on PCIX errors: target abort, etc. |
973 | */ | 971 | */ |
974 | static inline void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha) | 972 | static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha) |
975 | { | 973 | { |
976 | u16 status; | 974 | u16 status; |
977 | u32 pcix_status; | 975 | u32 pcix_status; |
@@ -1044,8 +1042,8 @@ irqreturn_t asd_hw_isr(int irq, void *dev_id) | |||
1044 | 1042 | ||
1045 | /* ---------- SCB handling ---------- */ | 1043 | /* ---------- SCB handling ---------- */ |
1046 | 1044 | ||
1047 | static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, | 1045 | static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, |
1048 | gfp_t gfp_flags) | 1046 | gfp_t gfp_flags) |
1049 | { | 1047 | { |
1050 | extern struct kmem_cache *asd_ascb_cache; | 1048 | extern struct kmem_cache *asd_ascb_cache; |
1051 | struct asd_seq_data *seq = &asd_ha->seq; | 1049 | struct asd_seq_data *seq = &asd_ha->seq; |
@@ -1144,8 +1142,8 @@ struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct | |||
1144 | * | 1142 | * |
1145 | * LOCKING: called with the pending list lock held. | 1143 | * LOCKING: called with the pending list lock held. |
1146 | */ | 1144 | */ |
1147 | static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha, | 1145 | static void asd_swap_head_scb(struct asd_ha_struct *asd_ha, |
1148 | struct asd_ascb *ascb) | 1146 | struct asd_ascb *ascb) |
1149 | { | 1147 | { |
1150 | struct asd_seq_data *seq = &asd_ha->seq; | 1148 | struct asd_seq_data *seq = &asd_ha->seq; |
1151 | struct asd_ascb *last = list_entry(ascb->list.prev, | 1149 | struct asd_ascb *last = list_entry(ascb->list.prev, |
@@ -1171,7 +1169,7 @@ static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha, | |||
1171 | * intended to be called from asd_post_ascb_list(), just prior to | 1169 | * intended to be called from asd_post_ascb_list(), just prior to |
1172 | * posting the SCBs to the sequencer. | 1170 | * posting the SCBs to the sequencer. |
1173 | */ | 1171 | */ |
1174 | static inline void asd_start_scb_timers(struct list_head *list) | 1172 | static void asd_start_scb_timers(struct list_head *list) |
1175 | { | 1173 | { |
1176 | struct asd_ascb *ascb; | 1174 | struct asd_ascb *ascb; |
1177 | list_for_each_entry(ascb, list, list) { | 1175 | list_for_each_entry(ascb, list, list) { |
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h index abc757559c1a..8c1c28239e93 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.h +++ b/drivers/scsi/aic94xx/aic94xx_hwi.h | |||
@@ -391,8 +391,6 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc); | |||
391 | void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op); | 391 | void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op); |
392 | void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op); | 392 | void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op); |
393 | int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask); | 393 | int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask); |
394 | void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, | ||
395 | u8 subfunc); | ||
396 | 394 | ||
397 | void asd_ascb_timedout(unsigned long data); | 395 | void asd_ascb_timedout(unsigned long data); |
398 | int asd_chip_hardrst(struct asd_ha_struct *asd_ha); | 396 | int asd_chip_hardrst(struct asd_ha_struct *asd_ha); |
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 88d1e731b65e..90f5e0a6f2e3 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c | |||
@@ -56,8 +56,6 @@ MODULE_PARM_DESC(collector, "\n" | |||
56 | "\tThe aic94xx SAS LLDD supports both modes.\n" | 56 | "\tThe aic94xx SAS LLDD supports both modes.\n" |
57 | "\tDefault: 0 (Direct Mode).\n"); | 57 | "\tDefault: 0 (Direct Mode).\n"); |
58 | 58 | ||
59 | char sas_addr_str[2*SAS_ADDR_SIZE + 1] = ""; | ||
60 | |||
61 | static struct scsi_transport_template *aic94xx_transport_template; | 59 | static struct scsi_transport_template *aic94xx_transport_template; |
62 | static int asd_scan_finished(struct Scsi_Host *, unsigned long); | 60 | static int asd_scan_finished(struct Scsi_Host *, unsigned long); |
63 | static void asd_scan_start(struct Scsi_Host *); | 61 | static void asd_scan_start(struct Scsi_Host *); |
@@ -547,7 +545,7 @@ static struct asd_pcidev_struct { | |||
547 | }, | 545 | }, |
548 | }; | 546 | }; |
549 | 547 | ||
550 | static inline int asd_create_ha_caches(struct asd_ha_struct *asd_ha) | 548 | static int asd_create_ha_caches(struct asd_ha_struct *asd_ha) |
551 | { | 549 | { |
552 | asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool", | 550 | asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool", |
553 | &asd_ha->pcidev->dev, | 551 | &asd_ha->pcidev->dev, |
@@ -565,7 +563,7 @@ static inline int asd_create_ha_caches(struct asd_ha_struct *asd_ha) | |||
565 | * asd_free_edbs -- free empty data buffers | 563 | * asd_free_edbs -- free empty data buffers |
566 | * asd_ha: pointer to host adapter structure | 564 | * asd_ha: pointer to host adapter structure |
567 | */ | 565 | */ |
568 | static inline void asd_free_edbs(struct asd_ha_struct *asd_ha) | 566 | static void asd_free_edbs(struct asd_ha_struct *asd_ha) |
569 | { | 567 | { |
570 | struct asd_seq_data *seq = &asd_ha->seq; | 568 | struct asd_seq_data *seq = &asd_ha->seq; |
571 | int i; | 569 | int i; |
@@ -576,7 +574,7 @@ static inline void asd_free_edbs(struct asd_ha_struct *asd_ha) | |||
576 | seq->edb_arr = NULL; | 574 | seq->edb_arr = NULL; |
577 | } | 575 | } |
578 | 576 | ||
579 | static inline void asd_free_escbs(struct asd_ha_struct *asd_ha) | 577 | static void asd_free_escbs(struct asd_ha_struct *asd_ha) |
580 | { | 578 | { |
581 | struct asd_seq_data *seq = &asd_ha->seq; | 579 | struct asd_seq_data *seq = &asd_ha->seq; |
582 | int i; | 580 | int i; |
@@ -591,7 +589,7 @@ static inline void asd_free_escbs(struct asd_ha_struct *asd_ha) | |||
591 | seq->escb_arr = NULL; | 589 | seq->escb_arr = NULL; |
592 | } | 590 | } |
593 | 591 | ||
594 | static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha) | 592 | static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha) |
595 | { | 593 | { |
596 | int i; | 594 | int i; |
597 | 595 | ||
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c index f210dac3203d..56b17c22526e 100644 --- a/drivers/scsi/aic94xx/aic94xx_reg.c +++ b/drivers/scsi/aic94xx/aic94xx_reg.c | |||
@@ -32,8 +32,8 @@ | |||
32 | * Offset comes before value to remind that the operation of | 32 | * Offset comes before value to remind that the operation of |
33 | * this function is *offs = val. | 33 | * this function is *offs = val. |
34 | */ | 34 | */ |
35 | static inline void asd_write_byte(struct asd_ha_struct *asd_ha, | 35 | static void asd_write_byte(struct asd_ha_struct *asd_ha, |
36 | unsigned long offs, u8 val) | 36 | unsigned long offs, u8 val) |
37 | { | 37 | { |
38 | if (unlikely(asd_ha->iospace)) | 38 | if (unlikely(asd_ha->iospace)) |
39 | outb(val, | 39 | outb(val, |
@@ -43,8 +43,8 @@ static inline void asd_write_byte(struct asd_ha_struct *asd_ha, | |||
43 | wmb(); | 43 | wmb(); |
44 | } | 44 | } |
45 | 45 | ||
46 | static inline void asd_write_word(struct asd_ha_struct *asd_ha, | 46 | static void asd_write_word(struct asd_ha_struct *asd_ha, |
47 | unsigned long offs, u16 val) | 47 | unsigned long offs, u16 val) |
48 | { | 48 | { |
49 | if (unlikely(asd_ha->iospace)) | 49 | if (unlikely(asd_ha->iospace)) |
50 | outw(val, | 50 | outw(val, |
@@ -54,8 +54,8 @@ static inline void asd_write_word(struct asd_ha_struct *asd_ha, | |||
54 | wmb(); | 54 | wmb(); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline void asd_write_dword(struct asd_ha_struct *asd_ha, | 57 | static void asd_write_dword(struct asd_ha_struct *asd_ha, |
58 | unsigned long offs, u32 val) | 58 | unsigned long offs, u32 val) |
59 | { | 59 | { |
60 | if (unlikely(asd_ha->iospace)) | 60 | if (unlikely(asd_ha->iospace)) |
61 | outl(val, | 61 | outl(val, |
@@ -67,8 +67,7 @@ static inline void asd_write_dword(struct asd_ha_struct *asd_ha, | |||
67 | 67 | ||
68 | /* Reading from device address space. | 68 | /* Reading from device address space. |
69 | */ | 69 | */ |
70 | static inline u8 asd_read_byte(struct asd_ha_struct *asd_ha, | 70 | static u8 asd_read_byte(struct asd_ha_struct *asd_ha, unsigned long offs) |
71 | unsigned long offs) | ||
72 | { | 71 | { |
73 | u8 val; | 72 | u8 val; |
74 | if (unlikely(asd_ha->iospace)) | 73 | if (unlikely(asd_ha->iospace)) |
@@ -80,8 +79,8 @@ static inline u8 asd_read_byte(struct asd_ha_struct *asd_ha, | |||
80 | return val; | 79 | return val; |
81 | } | 80 | } |
82 | 81 | ||
83 | static inline u16 asd_read_word(struct asd_ha_struct *asd_ha, | 82 | static u16 asd_read_word(struct asd_ha_struct *asd_ha, |
84 | unsigned long offs) | 83 | unsigned long offs) |
85 | { | 84 | { |
86 | u16 val; | 85 | u16 val; |
87 | if (unlikely(asd_ha->iospace)) | 86 | if (unlikely(asd_ha->iospace)) |
@@ -93,8 +92,8 @@ static inline u16 asd_read_word(struct asd_ha_struct *asd_ha, | |||
93 | return val; | 92 | return val; |
94 | } | 93 | } |
95 | 94 | ||
96 | static inline u32 asd_read_dword(struct asd_ha_struct *asd_ha, | 95 | static u32 asd_read_dword(struct asd_ha_struct *asd_ha, |
97 | unsigned long offs) | 96 | unsigned long offs) |
98 | { | 97 | { |
99 | u32 val; | 98 | u32 val; |
100 | if (unlikely(asd_ha->iospace)) | 99 | if (unlikely(asd_ha->iospace)) |
@@ -124,22 +123,22 @@ static inline u32 asd_mem_offs_swb(void) | |||
124 | /* We know that the register wanted is in the range | 123 | /* We know that the register wanted is in the range |
125 | * of the sliding window. | 124 | * of the sliding window. |
126 | */ | 125 | */ |
127 | #define ASD_READ_SW(ww, type, ord) \ | 126 | #define ASD_READ_SW(ww, type, ord) \ |
128 | static inline type asd_read_##ww##_##ord (struct asd_ha_struct *asd_ha,\ | 127 | static type asd_read_##ww##_##ord(struct asd_ha_struct *asd_ha, \ |
129 | u32 reg) \ | 128 | u32 reg) \ |
130 | { \ | 129 | { \ |
131 | struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ | 130 | struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ |
132 | u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\ | 131 | u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\ |
133 | return asd_read_##ord (asd_ha, (unsigned long) map_offs); \ | 132 | return asd_read_##ord(asd_ha, (unsigned long)map_offs); \ |
134 | } | 133 | } |
135 | 134 | ||
136 | #define ASD_WRITE_SW(ww, type, ord) \ | 135 | #define ASD_WRITE_SW(ww, type, ord) \ |
137 | static inline void asd_write_##ww##_##ord (struct asd_ha_struct *asd_ha,\ | 136 | static void asd_write_##ww##_##ord(struct asd_ha_struct *asd_ha, \ |
138 | u32 reg, type val) \ | 137 | u32 reg, type val) \ |
139 | { \ | 138 | { \ |
140 | struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ | 139 | struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ |
141 | u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\ | 140 | u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\ |
142 | asd_write_##ord (asd_ha, (unsigned long) map_offs, val); \ | 141 | asd_write_##ord(asd_ha, (unsigned long)map_offs, val); \ |
143 | } | 142 | } |
144 | 143 | ||
145 | ASD_READ_SW(swa, u8, byte); | 144 | ASD_READ_SW(swa, u8, byte); |
@@ -186,7 +185,7 @@ ASD_WRITE_SW(swc, u32, dword); | |||
186 | * @asd_ha: pointer to host adapter structure | 185 | * @asd_ha: pointer to host adapter structure |
187 | * @reg: register desired to be within range of the new window | 186 | * @reg: register desired to be within range of the new window |
188 | */ | 187 | */ |
189 | static inline void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg) | 188 | static void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg) |
190 | { | 189 | { |
191 | u32 base = reg & ~(MBAR0_SWB_SIZE-1); | 190 | u32 base = reg & ~(MBAR0_SWB_SIZE-1); |
192 | pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base); | 191 | pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base); |
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index ab350504ca5a..46643319c520 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c | |||
@@ -50,7 +50,7 @@ | |||
50 | | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \ | 50 | | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \ |
51 | | CURRENT_OOB_ERROR) | 51 | | CURRENT_OOB_ERROR) |
52 | 52 | ||
53 | static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) | 53 | static void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) |
54 | { | 54 | { |
55 | struct sas_phy *sas_phy = phy->sas_phy.phy; | 55 | struct sas_phy *sas_phy = phy->sas_phy.phy; |
56 | 56 | ||
@@ -81,7 +81,7 @@ static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) | |||
81 | phy->sas_phy.oob_mode = SATA_OOB_MODE; | 81 | phy->sas_phy.oob_mode = SATA_OOB_MODE; |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline void asd_phy_event_tasklet(struct asd_ascb *ascb, | 84 | static void asd_phy_event_tasklet(struct asd_ascb *ascb, |
85 | struct done_list_struct *dl) | 85 | struct done_list_struct *dl) |
86 | { | 86 | { |
87 | struct asd_ha_struct *asd_ha = ascb->ha; | 87 | struct asd_ha_struct *asd_ha = ascb->ha; |
@@ -125,8 +125,7 @@ static inline void asd_phy_event_tasklet(struct asd_ascb *ascb, | |||
125 | } | 125 | } |
126 | 126 | ||
127 | /* If phys are enabled sparsely, this will do the right thing. */ | 127 | /* If phys are enabled sparsely, this will do the right thing. */ |
128 | static inline unsigned ord_phy(struct asd_ha_struct *asd_ha, | 128 | static unsigned ord_phy(struct asd_ha_struct *asd_ha, struct asd_phy *phy) |
129 | struct asd_phy *phy) | ||
130 | { | 129 | { |
131 | u8 enabled_mask = asd_ha->hw_prof.enabled_phys; | 130 | u8 enabled_mask = asd_ha->hw_prof.enabled_phys; |
132 | int i, k = 0; | 131 | int i, k = 0; |
@@ -151,7 +150,7 @@ static inline unsigned ord_phy(struct asd_ha_struct *asd_ha, | |||
151 | * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame | 150 | * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame |
152 | * buffer. | 151 | * buffer. |
153 | */ | 152 | */ |
154 | static inline void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr) | 153 | static void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr) |
155 | { | 154 | { |
156 | if (phy->sas_phy.frame_rcvd[0] == 0x34 | 155 | if (phy->sas_phy.frame_rcvd[0] == 0x34 |
157 | && phy->sas_phy.oob_mode == SATA_OOB_MODE) { | 156 | && phy->sas_phy.oob_mode == SATA_OOB_MODE) { |
@@ -232,9 +231,9 @@ static void asd_deform_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy) | |||
232 | spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags); | 231 | spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags); |
233 | } | 232 | } |
234 | 233 | ||
235 | static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb, | 234 | static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb, |
236 | struct done_list_struct *dl, | 235 | struct done_list_struct *dl, |
237 | int edb_id, int phy_id) | 236 | int edb_id, int phy_id) |
238 | { | 237 | { |
239 | unsigned long flags; | 238 | unsigned long flags; |
240 | int edb_el = edb_id + ascb->edb_index; | 239 | int edb_el = edb_id + ascb->edb_index; |
@@ -255,9 +254,9 @@ static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb, | |||
255 | sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED); | 254 | sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED); |
256 | } | 255 | } |
257 | 256 | ||
258 | static inline void asd_link_reset_err_tasklet(struct asd_ascb *ascb, | 257 | static void asd_link_reset_err_tasklet(struct asd_ascb *ascb, |
259 | struct done_list_struct *dl, | 258 | struct done_list_struct *dl, |
260 | int phy_id) | 259 | int phy_id) |
261 | { | 260 | { |
262 | struct asd_ha_struct *asd_ha = ascb->ha; | 261 | struct asd_ha_struct *asd_ha = ascb->ha; |
263 | struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; | 262 | struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; |
@@ -308,9 +307,9 @@ out: | |||
308 | ; | 307 | ; |
309 | } | 308 | } |
310 | 309 | ||
311 | static inline void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb, | 310 | static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb, |
312 | struct done_list_struct *dl, | 311 | struct done_list_struct *dl, |
313 | int phy_id) | 312 | int phy_id) |
314 | { | 313 | { |
315 | unsigned long flags; | 314 | unsigned long flags; |
316 | struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha; | 315 | struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha; |
@@ -715,7 +714,7 @@ out: | |||
715 | asd_ascb_free(ascb); | 714 | asd_ascb_free(ascb); |
716 | } | 715 | } |
717 | 716 | ||
718 | static inline void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) | 717 | static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) |
719 | { | 718 | { |
720 | /* disable all speeds, then enable defaults */ | 719 | /* disable all speeds, then enable defaults */ |
721 | *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS | 720 | *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS |
@@ -820,6 +819,8 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc) | |||
820 | 819 | ||
821 | /* ---------- INITIATE LINK ADM TASK ---------- */ | 820 | /* ---------- INITIATE LINK ADM TASK ---------- */ |
822 | 821 | ||
822 | #if 0 | ||
823 | |||
823 | static void link_adm_tasklet_complete(struct asd_ascb *ascb, | 824 | static void link_adm_tasklet_complete(struct asd_ascb *ascb, |
824 | struct done_list_struct *dl) | 825 | struct done_list_struct *dl) |
825 | { | 826 | { |
@@ -852,6 +853,8 @@ void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, | |||
852 | ascb->tasklet_complete = link_adm_tasklet_complete; | 853 | ascb->tasklet_complete = link_adm_tasklet_complete; |
853 | } | 854 | } |
854 | 855 | ||
856 | #endif /* 0 */ | ||
857 | |||
855 | /* ---------- SCB timer ---------- */ | 858 | /* ---------- SCB timer ---------- */ |
856 | 859 | ||
857 | /** | 860 | /** |
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c index 2a4c933eb89c..4446e3d584dc 100644 --- a/drivers/scsi/aic94xx/aic94xx_sds.c +++ b/drivers/scsi/aic94xx/aic94xx_sds.c | |||
@@ -590,8 +590,8 @@ static int asd_reset_flash(struct asd_ha_struct *asd_ha) | |||
590 | return err; | 590 | return err; |
591 | } | 591 | } |
592 | 592 | ||
593 | static inline int asd_read_flash_seg(struct asd_ha_struct *asd_ha, | 593 | static int asd_read_flash_seg(struct asd_ha_struct *asd_ha, |
594 | void *buffer, u32 offs, int size) | 594 | void *buffer, u32 offs, int size) |
595 | { | 595 | { |
596 | asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs, | 596 | asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs, |
597 | size); | 597 | size); |
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c index c750fbf7013b..f4272ac4c685 100644 --- a/drivers/scsi/aic94xx/aic94xx_seq.c +++ b/drivers/scsi/aic94xx/aic94xx_seq.c | |||
@@ -60,7 +60,7 @@ static u16 last_scb_site_no; | |||
60 | * | 60 | * |
61 | * Return 0 on success, negative on failure. | 61 | * Return 0 on success, negative on failure. |
62 | */ | 62 | */ |
63 | int asd_pause_cseq(struct asd_ha_struct *asd_ha) | 63 | static int asd_pause_cseq(struct asd_ha_struct *asd_ha) |
64 | { | 64 | { |
65 | int count = PAUSE_TRIES; | 65 | int count = PAUSE_TRIES; |
66 | u32 arp2ctl; | 66 | u32 arp2ctl; |
@@ -87,7 +87,7 @@ int asd_pause_cseq(struct asd_ha_struct *asd_ha) | |||
87 | * | 87 | * |
88 | * Return 0 on success, negative on error. | 88 | * Return 0 on success, negative on error. |
89 | */ | 89 | */ |
90 | int asd_unpause_cseq(struct asd_ha_struct *asd_ha) | 90 | static int asd_unpause_cseq(struct asd_ha_struct *asd_ha) |
91 | { | 91 | { |
92 | u32 arp2ctl; | 92 | u32 arp2ctl; |
93 | int count = PAUSE_TRIES; | 93 | int count = PAUSE_TRIES; |
@@ -115,7 +115,7 @@ int asd_unpause_cseq(struct asd_ha_struct *asd_ha) | |||
115 | * | 115 | * |
116 | * Return 0 on success, negative on error. | 116 | * Return 0 on success, negative on error. |
117 | */ | 117 | */ |
118 | static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq) | 118 | static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq) |
119 | { | 119 | { |
120 | u32 arp2ctl; | 120 | u32 arp2ctl; |
121 | int count = PAUSE_TRIES; | 121 | int count = PAUSE_TRIES; |
@@ -143,7 +143,7 @@ static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq) | |||
143 | * | 143 | * |
144 | * Return 0 on success, negative on failure. | 144 | * Return 0 on success, negative on failure. |
145 | */ | 145 | */ |
146 | int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) | 146 | static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) |
147 | { | 147 | { |
148 | int lseq; | 148 | int lseq; |
149 | int err = 0; | 149 | int err = 0; |
@@ -164,7 +164,7 @@ int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) | |||
164 | * | 164 | * |
165 | * Return 0 on success, negative on error. | 165 | * Return 0 on success, negative on error. |
166 | */ | 166 | */ |
167 | static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq) | 167 | static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq) |
168 | { | 168 | { |
169 | u32 arp2ctl; | 169 | u32 arp2ctl; |
170 | int count = PAUSE_TRIES; | 170 | int count = PAUSE_TRIES; |
@@ -186,27 +186,6 @@ static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq) | |||
186 | } | 186 | } |
187 | 187 | ||
188 | 188 | ||
189 | /** | ||
190 | * asd_unpause_lseq - unpause the link sequencer(s) | ||
191 | * @asd_ha: pointer to host adapter structure | ||
192 | * @lseq_mask: mask of link sequencers of interest | ||
193 | * | ||
194 | * Return 0 on success, negative on failure. | ||
195 | */ | ||
196 | int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) | ||
197 | { | ||
198 | int lseq; | ||
199 | int err = 0; | ||
200 | |||
201 | for_each_sequencer(lseq_mask, lseq_mask, lseq) { | ||
202 | err = asd_seq_unpause_lseq(asd_ha, lseq); | ||
203 | if (err) | ||
204 | return err; | ||
205 | } | ||
206 | |||
207 | return err; | ||
208 | } | ||
209 | |||
210 | /* ---------- Downloading CSEQ/LSEQ microcode ---------- */ | 189 | /* ---------- Downloading CSEQ/LSEQ microcode ---------- */ |
211 | 190 | ||
212 | static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog, | 191 | static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog, |
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h index 2ea6a0d52208..ad787c55525f 100644 --- a/drivers/scsi/aic94xx/aic94xx_seq.h +++ b/drivers/scsi/aic94xx/aic94xx_seq.h | |||
@@ -58,10 +58,6 @@ struct sequencer_file_header { | |||
58 | } __attribute__((packed)); | 58 | } __attribute__((packed)); |
59 | 59 | ||
60 | #ifdef __KERNEL__ | 60 | #ifdef __KERNEL__ |
61 | int asd_pause_cseq(struct asd_ha_struct *asd_ha); | ||
62 | int asd_unpause_cseq(struct asd_ha_struct *asd_ha); | ||
63 | int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); | ||
64 | int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); | ||
65 | int asd_init_seqs(struct asd_ha_struct *asd_ha); | 61 | int asd_init_seqs(struct asd_ha_struct *asd_ha); |
66 | int asd_start_seqs(struct asd_ha_struct *asd_ha); | 62 | int asd_start_seqs(struct asd_ha_struct *asd_ha); |
67 | int asd_release_firmware(void); | 63 | int asd_release_firmware(void); |
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 008df9ab92a5..326765c9caf8 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c | |||
@@ -33,7 +33,7 @@ static void asd_unbuild_ata_ascb(struct asd_ascb *a); | |||
33 | static void asd_unbuild_smp_ascb(struct asd_ascb *a); | 33 | static void asd_unbuild_smp_ascb(struct asd_ascb *a); |
34 | static void asd_unbuild_ssp_ascb(struct asd_ascb *a); | 34 | static void asd_unbuild_ssp_ascb(struct asd_ascb *a); |
35 | 35 | ||
36 | static inline void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num) | 36 | static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num) |
37 | { | 37 | { |
38 | unsigned long flags; | 38 | unsigned long flags; |
39 | 39 | ||
@@ -51,9 +51,9 @@ static const u8 data_dir_flags[] = { | |||
51 | [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ | 51 | [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static inline int asd_map_scatterlist(struct sas_task *task, | 54 | static int asd_map_scatterlist(struct sas_task *task, |
55 | struct sg_el *sg_arr, | 55 | struct sg_el *sg_arr, |
56 | gfp_t gfp_flags) | 56 | gfp_t gfp_flags) |
57 | { | 57 | { |
58 | struct asd_ascb *ascb = task->lldd_task; | 58 | struct asd_ascb *ascb = task->lldd_task; |
59 | struct asd_ha_struct *asd_ha = ascb->ha; | 59 | struct asd_ha_struct *asd_ha = ascb->ha; |
@@ -131,7 +131,7 @@ err_unmap: | |||
131 | return res; | 131 | return res; |
132 | } | 132 | } |
133 | 133 | ||
134 | static inline void asd_unmap_scatterlist(struct asd_ascb *ascb) | 134 | static void asd_unmap_scatterlist(struct asd_ascb *ascb) |
135 | { | 135 | { |
136 | struct asd_ha_struct *asd_ha = ascb->ha; | 136 | struct asd_ha_struct *asd_ha = ascb->ha; |
137 | struct sas_task *task = ascb->uldd_task; | 137 | struct sas_task *task = ascb->uldd_task; |
@@ -527,7 +527,7 @@ static void asd_unbuild_ssp_ascb(struct asd_ascb *a) | |||
527 | 527 | ||
528 | /* ---------- Execute Task ---------- */ | 528 | /* ---------- Execute Task ---------- */ |
529 | 529 | ||
530 | static inline int asd_can_queue(struct asd_ha_struct *asd_ha, int num) | 530 | static int asd_can_queue(struct asd_ha_struct *asd_ha, int num) |
531 | { | 531 | { |
532 | int res = 0; | 532 | int res = 0; |
533 | unsigned long flags; | 533 | unsigned long flags; |
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index b9ac8f703a1d..633ff40c736a 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c | |||
@@ -336,7 +336,7 @@ static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, | |||
336 | asd_ascb_free(ascb); | 336 | asd_ascb_free(ascb); |
337 | } | 337 | } |
338 | 338 | ||
339 | static inline int asd_clear_nexus(struct sas_task *task) | 339 | static int asd_clear_nexus(struct sas_task *task) |
340 | { | 340 | { |
341 | int res = TMF_RESP_FUNC_FAILED; | 341 | int res = TMF_RESP_FUNC_FAILED; |
342 | int leftover; | 342 | int leftover; |
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 3bedf2466bd1..8e53f02cc311 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c | |||
@@ -2983,7 +2983,6 @@ static struct scsi_host_template acornscsi_template = { | |||
2983 | .this_id = 7, | 2983 | .this_id = 7, |
2984 | .sg_tablesize = SG_ALL, | 2984 | .sg_tablesize = SG_ALL, |
2985 | .cmd_per_lun = 2, | 2985 | .cmd_per_lun = 2, |
2986 | .unchecked_isa_dma = 0, | ||
2987 | .use_clustering = DISABLE_CLUSTERING, | 2986 | .use_clustering = DISABLE_CLUSTERING, |
2988 | .proc_name = "acornscsi", | 2987 | .proc_name = "acornscsi", |
2989 | }; | 2988 | }; |
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index 49d838e90a24..a3398fe70a9c 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c | |||
@@ -222,7 +222,6 @@ static struct scsi_host_template cumanascsi_template = { | |||
222 | .this_id = 7, | 222 | .this_id = 7, |
223 | .sg_tablesize = SG_ALL, | 223 | .sg_tablesize = SG_ALL, |
224 | .cmd_per_lun = 2, | 224 | .cmd_per_lun = 2, |
225 | .unchecked_isa_dma = 0, | ||
226 | .use_clustering = DISABLE_CLUSTERING, | 225 | .use_clustering = DISABLE_CLUSTERING, |
227 | .proc_name = "CumanaSCSI-1", | 226 | .proc_name = "CumanaSCSI-1", |
228 | }; | 227 | }; |
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 7aad15436d24..92d1cb1b21cb 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c | |||
@@ -113,7 +113,7 @@ static const struct { | |||
113 | unsigned char asc; | 113 | unsigned char asc; |
114 | unsigned char ascq; | 114 | unsigned char ascq; |
115 | int errno; | 115 | int errno; |
116 | } err[] = { | 116 | } ch_err[] = { |
117 | /* Just filled in what looks right. Hav'nt checked any standard paper for | 117 | /* Just filled in what looks right. Hav'nt checked any standard paper for |
118 | these errno assignments, so they may be wrong... */ | 118 | these errno assignments, so they may be wrong... */ |
119 | { | 119 | { |
@@ -155,11 +155,11 @@ static int ch_find_errno(struct scsi_sense_hdr *sshdr) | |||
155 | /* Check to see if additional sense information is available */ | 155 | /* Check to see if additional sense information is available */ |
156 | if (scsi_sense_valid(sshdr) && | 156 | if (scsi_sense_valid(sshdr) && |
157 | sshdr->asc != 0) { | 157 | sshdr->asc != 0) { |
158 | for (i = 0; err[i].errno != 0; i++) { | 158 | for (i = 0; ch_err[i].errno != 0; i++) { |
159 | if (err[i].sense == sshdr->sense_key && | 159 | if (ch_err[i].sense == sshdr->sense_key && |
160 | err[i].asc == sshdr->asc && | 160 | ch_err[i].asc == sshdr->asc && |
161 | err[i].ascq == sshdr->ascq) { | 161 | ch_err[i].ascq == sshdr->ascq) { |
162 | errno = -err[i].errno; | 162 | errno = -ch_err[i].errno; |
163 | break; | 163 | break; |
164 | } | 164 | } |
165 | } | 165 | } |
@@ -721,8 +721,8 @@ static long ch_ioctl(struct file *file, | |||
721 | case CHIOGELEM: | 721 | case CHIOGELEM: |
722 | { | 722 | { |
723 | struct changer_get_element cge; | 723 | struct changer_get_element cge; |
724 | u_char cmd[12]; | 724 | u_char ch_cmd[12]; |
725 | u_char *buffer; | 725 | u_char *buffer; |
726 | unsigned int elem; | 726 | unsigned int elem; |
727 | int result,i; | 727 | int result,i; |
728 | 728 | ||
@@ -739,17 +739,18 @@ static long ch_ioctl(struct file *file, | |||
739 | mutex_lock(&ch->lock); | 739 | mutex_lock(&ch->lock); |
740 | 740 | ||
741 | voltag_retry: | 741 | voltag_retry: |
742 | memset(cmd,0,sizeof(cmd)); | 742 | memset(ch_cmd, 0, sizeof(ch_cmd)); |
743 | cmd[0] = READ_ELEMENT_STATUS; | 743 | ch_cmd[0] = READ_ELEMENT_STATUS; |
744 | cmd[1] = (ch->device->lun << 5) | | 744 | ch_cmd[1] = (ch->device->lun << 5) | |
745 | (ch->voltags ? 0x10 : 0) | | 745 | (ch->voltags ? 0x10 : 0) | |
746 | ch_elem_to_typecode(ch,elem); | 746 | ch_elem_to_typecode(ch,elem); |
747 | cmd[2] = (elem >> 8) & 0xff; | 747 | ch_cmd[2] = (elem >> 8) & 0xff; |
748 | cmd[3] = elem & 0xff; | 748 | ch_cmd[3] = elem & 0xff; |
749 | cmd[5] = 1; | 749 | ch_cmd[5] = 1; |
750 | cmd[9] = 255; | 750 | ch_cmd[9] = 255; |
751 | 751 | ||
752 | if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) { | 752 | result = ch_do_scsi(ch, ch_cmd, buffer, 256, DMA_FROM_DEVICE); |
753 | if (!result) { | ||
753 | cge.cge_status = buffer[18]; | 754 | cge.cge_status = buffer[18]; |
754 | cge.cge_flags = 0; | 755 | cge.cge_flags = 0; |
755 | if (buffer[18] & CESTATUS_EXCEPT) { | 756 | if (buffer[18] & CESTATUS_EXCEPT) { |
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index e351db6c0077..075e2397273c 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
@@ -4761,7 +4761,6 @@ static struct scsi_host_template dc395x_driver_template = { | |||
4761 | .cmd_per_lun = DC395x_MAX_CMD_PER_LUN, | 4761 | .cmd_per_lun = DC395x_MAX_CMD_PER_LUN, |
4762 | .eh_abort_handler = dc395x_eh_abort, | 4762 | .eh_abort_handler = dc395x_eh_abort, |
4763 | .eh_bus_reset_handler = dc395x_eh_bus_reset, | 4763 | .eh_bus_reset_handler = dc395x_eh_bus_reset, |
4764 | .unchecked_isa_dma = 0, | ||
4765 | .use_clustering = DISABLE_CLUSTERING, | 4764 | .use_clustering = DISABLE_CLUSTERING, |
4766 | }; | 4765 | }; |
4767 | 4766 | ||
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index b5a60926e556..952505c006df 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c | |||
@@ -815,8 +815,6 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev | |||
815 | else | 815 | else |
816 | hd->primary = 1; | 816 | hd->primary = 1; |
817 | 817 | ||
818 | sh->unchecked_isa_dma = 0; /* We can only do PIO */ | ||
819 | |||
820 | hd->next = NULL; /* build a linked list of all HBAs */ | 818 | hd->next = NULL; /* build a linked list of all HBAs */ |
821 | hd->prev = last_HBA; | 819 | hd->prev = last_HBA; |
822 | if (hd->prev != NULL) | 820 | if (hd->prev != NULL) |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 0b2080d33575..c6d6e7c6559a 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -85,10 +85,10 @@ | |||
85 | 85 | ||
86 | /* The meaning of the Scsi_Pointer members in this driver is as follows: | 86 | /* The meaning of the Scsi_Pointer members in this driver is as follows: |
87 | * ptr: Chaining | 87 | * ptr: Chaining |
88 | * this_residual: gdth_bufflen | 88 | * this_residual: unused |
89 | * buffer: gdth_sglist | 89 | * buffer: unused |
90 | * dma_handle: unused | 90 | * dma_handle: unused |
91 | * buffers_residual: gdth_sg_count | 91 | * buffers_residual: unused |
92 | * Status: unused | 92 | * Status: unused |
93 | * Message: unused | 93 | * Message: unused |
94 | * have_data_in: unused | 94 | * have_data_in: unused |
@@ -372,47 +372,6 @@ static const struct file_operations gdth_fops = { | |||
372 | .release = gdth_close, | 372 | .release = gdth_close, |
373 | }; | 373 | }; |
374 | 374 | ||
375 | /* | ||
376 | * gdth scsi_command access wrappers. | ||
377 | * below 6 functions are used throughout the driver to access scsi_command's | ||
378 | * io parameters. The reason we do not use the regular accessors from | ||
379 | * scsi_cmnd.h is because of gdth_execute(). Since it is unrecommended for | ||
380 | * llds to directly set scsi_cmnd's IO members. This driver will use SCp | ||
381 | * members for IO parameters, and will copy scsi_cmnd's members to Scp | ||
382 | * members in queuecommand. For internal commands through gdth_execute() | ||
383 | * SCp's members will be set directly. | ||
384 | */ | ||
385 | static inline unsigned gdth_bufflen(struct scsi_cmnd *cmd) | ||
386 | { | ||
387 | return (unsigned)cmd->SCp.this_residual; | ||
388 | } | ||
389 | |||
390 | static inline void gdth_set_bufflen(struct scsi_cmnd *cmd, unsigned bufflen) | ||
391 | { | ||
392 | cmd->SCp.this_residual = bufflen; | ||
393 | } | ||
394 | |||
395 | static inline unsigned gdth_sg_count(struct scsi_cmnd *cmd) | ||
396 | { | ||
397 | return (unsigned)cmd->SCp.buffers_residual; | ||
398 | } | ||
399 | |||
400 | static inline void gdth_set_sg_count(struct scsi_cmnd *cmd, unsigned sg_count) | ||
401 | { | ||
402 | cmd->SCp.buffers_residual = sg_count; | ||
403 | } | ||
404 | |||
405 | static inline struct scatterlist *gdth_sglist(struct scsi_cmnd *cmd) | ||
406 | { | ||
407 | return cmd->SCp.buffer; | ||
408 | } | ||
409 | |||
410 | static inline void gdth_set_sglist(struct scsi_cmnd *cmd, | ||
411 | struct scatterlist *sglist) | ||
412 | { | ||
413 | cmd->SCp.buffer = sglist; | ||
414 | } | ||
415 | |||
416 | #include "gdth_proc.h" | 375 | #include "gdth_proc.h" |
417 | #include "gdth_proc.c" | 376 | #include "gdth_proc.c" |
418 | 377 | ||
@@ -591,125 +550,111 @@ static int __init gdth_search_isa(ulong32 bios_adr) | |||
591 | #endif /* CONFIG_ISA */ | 550 | #endif /* CONFIG_ISA */ |
592 | 551 | ||
593 | #ifdef CONFIG_PCI | 552 | #ifdef CONFIG_PCI |
594 | static void gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt, | 553 | static bool gdth_pci_registered; |
595 | ushort vendor, ushort dev); | ||
596 | 554 | ||
597 | static int __init gdth_search_pci(gdth_pci_str *pcistr) | 555 | static bool gdth_search_vortex(ushort device) |
598 | { | 556 | { |
599 | ushort device, cnt; | 557 | if (device <= PCI_DEVICE_ID_VORTEX_GDT6555) |
600 | 558 | return true; | |
601 | TRACE(("gdth_search_pci()\n")); | 559 | if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP && |
602 | 560 | device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP) | |
603 | cnt = 0; | 561 | return true; |
604 | for (device = 0; device <= PCI_DEVICE_ID_VORTEX_GDT6555; ++device) | 562 | if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX || |
605 | gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, device); | 563 | device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2) |
606 | for (device = PCI_DEVICE_ID_VORTEX_GDT6x17RP; | 564 | return true; |
607 | device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP; ++device) | 565 | return false; |
608 | gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, device); | ||
609 | gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, | ||
610 | PCI_DEVICE_ID_VORTEX_GDTNEWRX); | ||
611 | gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, | ||
612 | PCI_DEVICE_ID_VORTEX_GDTNEWRX2); | ||
613 | gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_INTEL, | ||
614 | PCI_DEVICE_ID_INTEL_SRC); | ||
615 | gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_INTEL, | ||
616 | PCI_DEVICE_ID_INTEL_SRC_XSCALE); | ||
617 | return cnt; | ||
618 | } | 566 | } |
619 | 567 | ||
568 | static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out); | ||
569 | static int gdth_pci_init_one(struct pci_dev *pdev, | ||
570 | const struct pci_device_id *ent); | ||
571 | static void gdth_pci_remove_one(struct pci_dev *pdev); | ||
572 | static void gdth_remove_one(gdth_ha_str *ha); | ||
573 | |||
620 | /* Vortex only makes RAID controllers. | 574 | /* Vortex only makes RAID controllers. |
621 | * We do not really want to specify all 550 ids here, so wildcard match. | 575 | * We do not really want to specify all 550 ids here, so wildcard match. |
622 | */ | 576 | */ |
623 | static struct pci_device_id gdthtable[] __maybe_unused = { | 577 | static const struct pci_device_id gdthtable[] = { |
624 | {PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID}, | 578 | { PCI_VDEVICE(VORTEX, PCI_ANY_ID) }, |
625 | {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID}, | 579 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) }, |
626 | {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID}, | 580 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) }, |
627 | {0} | 581 | { } /* terminate list */ |
582 | }; | ||
583 | MODULE_DEVICE_TABLE(pci, gdthtable); | ||
584 | |||
585 | static struct pci_driver gdth_pci_driver = { | ||
586 | .name = "gdth", | ||
587 | .id_table = gdthtable, | ||
588 | .probe = gdth_pci_init_one, | ||
589 | .remove = gdth_pci_remove_one, | ||
628 | }; | 590 | }; |
629 | MODULE_DEVICE_TABLE(pci,gdthtable); | ||
630 | 591 | ||
631 | static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt, | 592 | static void gdth_pci_remove_one(struct pci_dev *pdev) |
632 | ushort vendor, ushort device) | ||
633 | { | 593 | { |
634 | ulong base0, base1, base2; | 594 | gdth_ha_str *ha = pci_get_drvdata(pdev); |
635 | struct pci_dev *pdev; | 595 | |
596 | pci_set_drvdata(pdev, NULL); | ||
597 | |||
598 | list_del(&ha->list); | ||
599 | gdth_remove_one(ha); | ||
600 | |||
601 | pci_disable_device(pdev); | ||
602 | } | ||
603 | |||
604 | static int gdth_pci_init_one(struct pci_dev *pdev, | ||
605 | const struct pci_device_id *ent) | ||
606 | { | ||
607 | ushort vendor = pdev->vendor; | ||
608 | ushort device = pdev->device; | ||
609 | ulong base0, base1, base2; | ||
610 | int rc; | ||
611 | gdth_pci_str gdth_pcistr; | ||
612 | gdth_ha_str *ha = NULL; | ||
636 | 613 | ||
637 | TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n", | 614 | TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n", |
638 | *cnt, vendor, device)); | 615 | gdth_ctr_count, vendor, device)); |
639 | 616 | ||
640 | pdev = NULL; | 617 | memset(&gdth_pcistr, 0, sizeof(gdth_pcistr)); |
641 | while ((pdev = pci_get_device(vendor, device, pdev)) | 618 | |
642 | != NULL) { | 619 | if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device)) |
643 | if (pci_enable_device(pdev)) | 620 | return -ENODEV; |
644 | continue; | 621 | |
645 | if (*cnt >= MAXHA) { | 622 | rc = pci_enable_device(pdev); |
646 | pci_dev_put(pdev); | 623 | if (rc) |
647 | return; | 624 | return rc; |
648 | } | 625 | |
626 | if (gdth_ctr_count >= MAXHA) | ||
627 | return -EBUSY; | ||
649 | 628 | ||
650 | /* GDT PCI controller found, resources are already in pdev */ | 629 | /* GDT PCI controller found, resources are already in pdev */ |
651 | pcistr[*cnt].pdev = pdev; | 630 | gdth_pcistr.pdev = pdev; |
652 | pcistr[*cnt].irq = pdev->irq; | ||
653 | base0 = pci_resource_flags(pdev, 0); | 631 | base0 = pci_resource_flags(pdev, 0); |
654 | base1 = pci_resource_flags(pdev, 1); | 632 | base1 = pci_resource_flags(pdev, 1); |
655 | base2 = pci_resource_flags(pdev, 2); | 633 | base2 = pci_resource_flags(pdev, 2); |
656 | if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */ | 634 | if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */ |
657 | device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */ | 635 | device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */ |
658 | if (!(base0 & IORESOURCE_MEM)) | 636 | if (!(base0 & IORESOURCE_MEM)) |
659 | continue; | 637 | return -ENODEV; |
660 | pcistr[*cnt].dpmem = pci_resource_start(pdev, 0); | 638 | gdth_pcistr.dpmem = pci_resource_start(pdev, 0); |
661 | } else { /* GDT6110, GDT6120, .. */ | 639 | } else { /* GDT6110, GDT6120, .. */ |
662 | if (!(base0 & IORESOURCE_MEM) || | 640 | if (!(base0 & IORESOURCE_MEM) || |
663 | !(base2 & IORESOURCE_MEM) || | 641 | !(base2 & IORESOURCE_MEM) || |
664 | !(base1 & IORESOURCE_IO)) | 642 | !(base1 & IORESOURCE_IO)) |
665 | continue; | 643 | return -ENODEV; |
666 | pcistr[*cnt].dpmem = pci_resource_start(pdev, 2); | 644 | gdth_pcistr.dpmem = pci_resource_start(pdev, 2); |
667 | pcistr[*cnt].io_mm = pci_resource_start(pdev, 0); | 645 | gdth_pcistr.io = pci_resource_start(pdev, 1); |
668 | pcistr[*cnt].io = pci_resource_start(pdev, 1); | ||
669 | } | 646 | } |
670 | TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n", | 647 | TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n", |
671 | pcistr[*cnt].pdev->bus->number, | 648 | gdth_pcistr.pdev->bus->number, |
672 | PCI_SLOT(pcistr[*cnt].pdev->devfn), | 649 | PCI_SLOT(gdth_pcistr.pdev->devfn), |
673 | pcistr[*cnt].irq, pcistr[*cnt].dpmem)); | 650 | gdth_pcistr.irq, |
674 | (*cnt)++; | 651 | gdth_pcistr.dpmem)); |
675 | } | ||
676 | } | ||
677 | 652 | ||
678 | static void __init gdth_sort_pci(gdth_pci_str *pcistr, int cnt) | 653 | rc = gdth_pci_probe_one(&gdth_pcistr, &ha); |
679 | { | 654 | if (rc) |
680 | gdth_pci_str temp; | 655 | return rc; |
681 | int i, changed; | ||
682 | |||
683 | TRACE(("gdth_sort_pci() cnt %d\n",cnt)); | ||
684 | if (cnt == 0) | ||
685 | return; | ||
686 | 656 | ||
687 | do { | 657 | return 0; |
688 | changed = FALSE; | ||
689 | for (i = 0; i < cnt-1; ++i) { | ||
690 | if (!reverse_scan) { | ||
691 | if ((pcistr[i].pdev->bus->number > pcistr[i+1].pdev->bus->number) || | ||
692 | (pcistr[i].pdev->bus->number == pcistr[i+1].pdev->bus->number && | ||
693 | PCI_SLOT(pcistr[i].pdev->devfn) > | ||
694 | PCI_SLOT(pcistr[i+1].pdev->devfn))) { | ||
695 | temp = pcistr[i]; | ||
696 | pcistr[i] = pcistr[i+1]; | ||
697 | pcistr[i+1] = temp; | ||
698 | changed = TRUE; | ||
699 | } | ||
700 | } else { | ||
701 | if ((pcistr[i].pdev->bus->number < pcistr[i+1].pdev->bus->number) || | ||
702 | (pcistr[i].pdev->bus->number == pcistr[i+1].pdev->bus->number && | ||
703 | PCI_SLOT(pcistr[i].pdev->devfn) < | ||
704 | PCI_SLOT(pcistr[i+1].pdev->devfn))) { | ||
705 | temp = pcistr[i]; | ||
706 | pcistr[i] = pcistr[i+1]; | ||
707 | pcistr[i+1] = temp; | ||
708 | changed = TRUE; | ||
709 | } | ||
710 | } | ||
711 | } | ||
712 | } while (changed); | ||
713 | } | 658 | } |
714 | #endif /* CONFIG_PCI */ | 659 | #endif /* CONFIG_PCI */ |
715 | 660 | ||
@@ -909,7 +854,8 @@ static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha) | |||
909 | #endif /* CONFIG_ISA */ | 854 | #endif /* CONFIG_ISA */ |
910 | 855 | ||
911 | #ifdef CONFIG_PCI | 856 | #ifdef CONFIG_PCI |
912 | static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha) | 857 | static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, |
858 | gdth_ha_str *ha) | ||
913 | { | 859 | { |
914 | register gdt6_dpram_str __iomem *dp6_ptr; | 860 | register gdt6_dpram_str __iomem *dp6_ptr; |
915 | register gdt6c_dpram_str __iomem *dp6c_ptr; | 861 | register gdt6c_dpram_str __iomem *dp6c_ptr; |
@@ -921,14 +867,14 @@ static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha) | |||
921 | 867 | ||
922 | TRACE(("gdth_init_pci()\n")); | 868 | TRACE(("gdth_init_pci()\n")); |
923 | 869 | ||
924 | if (pcistr->pdev->vendor == PCI_VENDOR_ID_INTEL) | 870 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) |
925 | ha->oem_id = OEM_ID_INTEL; | 871 | ha->oem_id = OEM_ID_INTEL; |
926 | else | 872 | else |
927 | ha->oem_id = OEM_ID_ICP; | 873 | ha->oem_id = OEM_ID_ICP; |
928 | ha->brd_phys = (pcistr->pdev->bus->number << 8) | (pcistr->pdev->devfn & 0xf8); | 874 | ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8); |
929 | ha->stype = (ulong32)pcistr->pdev->device; | 875 | ha->stype = (ulong32)pdev->device; |
930 | ha->irq = pcistr->irq; | 876 | ha->irq = pdev->irq; |
931 | ha->pdev = pcistr->pdev; | 877 | ha->pdev = pdev; |
932 | 878 | ||
933 | if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */ | 879 | if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */ |
934 | TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq)); | 880 | TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq)); |
@@ -956,8 +902,7 @@ static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha) | |||
956 | continue; | 902 | continue; |
957 | } | 903 | } |
958 | iounmap(ha->brd); | 904 | iounmap(ha->brd); |
959 | pci_write_config_dword(pcistr->pdev, | 905 | pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i); |
960 | PCI_BASE_ADDRESS_0, i); | ||
961 | ha->brd = ioremap(i, sizeof(gdt6_dpram_str)); | 906 | ha->brd = ioremap(i, sizeof(gdt6_dpram_str)); |
962 | if (ha->brd == NULL) { | 907 | if (ha->brd == NULL) { |
963 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); | 908 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); |
@@ -1066,8 +1011,7 @@ static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha) | |||
1066 | continue; | 1011 | continue; |
1067 | } | 1012 | } |
1068 | iounmap(ha->brd); | 1013 | iounmap(ha->brd); |
1069 | pci_write_config_dword(pcistr->pdev, | 1014 | pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i); |
1070 | PCI_BASE_ADDRESS_2, i); | ||
1071 | ha->brd = ioremap(i, sizeof(gdt6c_dpram_str)); | 1015 | ha->brd = ioremap(i, sizeof(gdt6c_dpram_str)); |
1072 | if (ha->brd == NULL) { | 1016 | if (ha->brd == NULL) { |
1073 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); | 1017 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); |
@@ -1159,16 +1103,16 @@ static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha) | |||
1159 | } | 1103 | } |
1160 | 1104 | ||
1161 | /* manipulate config. space to enable DPMEM, start RP controller */ | 1105 | /* manipulate config. space to enable DPMEM, start RP controller */ |
1162 | pci_read_config_word(pcistr->pdev, PCI_COMMAND, &command); | 1106 | pci_read_config_word(pdev, PCI_COMMAND, &command); |
1163 | command |= 6; | 1107 | command |= 6; |
1164 | pci_write_config_word(pcistr->pdev, PCI_COMMAND, command); | 1108 | pci_write_config_word(pdev, PCI_COMMAND, command); |
1165 | if (pci_resource_start(pcistr->pdev, 8) == 1UL) | 1109 | if (pci_resource_start(pdev, 8) == 1UL) |
1166 | pci_resource_start(pcistr->pdev, 8) = 0UL; | 1110 | pci_resource_start(pdev, 8) = 0UL; |
1167 | i = 0xFEFF0001UL; | 1111 | i = 0xFEFF0001UL; |
1168 | pci_write_config_dword(pcistr->pdev, PCI_ROM_ADDRESS, i); | 1112 | pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i); |
1169 | gdth_delay(1); | 1113 | gdth_delay(1); |
1170 | pci_write_config_dword(pcistr->pdev, PCI_ROM_ADDRESS, | 1114 | pci_write_config_dword(pdev, PCI_ROM_ADDRESS, |
1171 | pci_resource_start(pcistr->pdev, 8)); | 1115 | pci_resource_start(pdev, 8)); |
1172 | 1116 | ||
1173 | dp6m_ptr = ha->brd; | 1117 | dp6m_ptr = ha->brd; |
1174 | 1118 | ||
@@ -1195,8 +1139,7 @@ static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha) | |||
1195 | continue; | 1139 | continue; |
1196 | } | 1140 | } |
1197 | iounmap(ha->brd); | 1141 | iounmap(ha->brd); |
1198 | pci_write_config_dword(pcistr->pdev, | 1142 | pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i); |
1199 | PCI_BASE_ADDRESS_0, i); | ||
1200 | ha->brd = ioremap(i, sizeof(gdt6m_dpram_str)); | 1143 | ha->brd = ioremap(i, sizeof(gdt6m_dpram_str)); |
1201 | if (ha->brd == NULL) { | 1144 | if (ha->brd == NULL) { |
1202 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); | 1145 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); |
@@ -2353,12 +2296,12 @@ static void gdth_next(gdth_ha_str *ha) | |||
2353 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 2296 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
2354 | char *buffer, ushort count) | 2297 | char *buffer, ushort count) |
2355 | { | 2298 | { |
2356 | ushort cpcount,i, max_sg = gdth_sg_count(scp); | 2299 | ushort cpcount,i, max_sg = scsi_sg_count(scp); |
2357 | ushort cpsum,cpnow; | 2300 | ushort cpsum,cpnow; |
2358 | struct scatterlist *sl; | 2301 | struct scatterlist *sl; |
2359 | char *address; | 2302 | char *address; |
2360 | 2303 | ||
2361 | cpcount = min_t(ushort, count, gdth_bufflen(scp)); | 2304 | cpcount = min_t(ushort, count, scsi_bufflen(scp)); |
2362 | 2305 | ||
2363 | if (cpcount) { | 2306 | if (cpcount) { |
2364 | cpsum=0; | 2307 | cpsum=0; |
@@ -2366,7 +2309,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | |||
2366 | unsigned long flags; | 2309 | unsigned long flags; |
2367 | cpnow = (ushort)sl->length; | 2310 | cpnow = (ushort)sl->length; |
2368 | TRACE(("copy_internal() now %d sum %d count %d %d\n", | 2311 | TRACE(("copy_internal() now %d sum %d count %d %d\n", |
2369 | cpnow, cpsum, cpcount, gdth_bufflen(scp))); | 2312 | cpnow, cpsum, cpcount, scsi_bufflen(scp))); |
2370 | if (cpsum+cpnow > cpcount) | 2313 | if (cpsum+cpnow > cpcount) |
2371 | cpnow = cpcount - cpsum; | 2314 | cpnow = cpcount - cpsum; |
2372 | cpsum += cpnow; | 2315 | cpsum += cpnow; |
@@ -2589,10 +2532,10 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | |||
2589 | cmdp->u.cache.BlockCnt = blockcnt; | 2532 | cmdp->u.cache.BlockCnt = blockcnt; |
2590 | } | 2533 | } |
2591 | 2534 | ||
2592 | if (gdth_bufflen(scp)) { | 2535 | if (scsi_bufflen(scp)) { |
2593 | cmndinfo->dma_dir = (read_write == 1 ? | 2536 | cmndinfo->dma_dir = (read_write == 1 ? |
2594 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); | 2537 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); |
2595 | sgcnt = pci_map_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp), | 2538 | sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), |
2596 | cmndinfo->dma_dir); | 2539 | cmndinfo->dma_dir); |
2597 | if (mode64) { | 2540 | if (mode64) { |
2598 | struct scatterlist *sl; | 2541 | struct scatterlist *sl; |
@@ -2739,7 +2682,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2739 | cmdp->u.raw64.lun = l; | 2682 | cmdp->u.raw64.lun = l; |
2740 | cmdp->u.raw64.bus = b; | 2683 | cmdp->u.raw64.bus = b; |
2741 | cmdp->u.raw64.priority = 0; | 2684 | cmdp->u.raw64.priority = 0; |
2742 | cmdp->u.raw64.sdlen = gdth_bufflen(scp); | 2685 | cmdp->u.raw64.sdlen = scsi_bufflen(scp); |
2743 | cmdp->u.raw64.sense_len = 16; | 2686 | cmdp->u.raw64.sense_len = 16; |
2744 | cmdp->u.raw64.sense_data = sense_paddr; | 2687 | cmdp->u.raw64.sense_data = sense_paddr; |
2745 | cmdp->u.raw64.direction = | 2688 | cmdp->u.raw64.direction = |
@@ -2756,7 +2699,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2756 | cmdp->u.raw.bus = b; | 2699 | cmdp->u.raw.bus = b; |
2757 | cmdp->u.raw.priority = 0; | 2700 | cmdp->u.raw.priority = 0; |
2758 | cmdp->u.raw.link_p = 0; | 2701 | cmdp->u.raw.link_p = 0; |
2759 | cmdp->u.raw.sdlen = gdth_bufflen(scp); | 2702 | cmdp->u.raw.sdlen = scsi_bufflen(scp); |
2760 | cmdp->u.raw.sense_len = 16; | 2703 | cmdp->u.raw.sense_len = 16; |
2761 | cmdp->u.raw.sense_data = sense_paddr; | 2704 | cmdp->u.raw.sense_data = sense_paddr; |
2762 | cmdp->u.raw.direction = | 2705 | cmdp->u.raw.direction = |
@@ -2765,9 +2708,9 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2765 | cmdp->u.raw.sg_ranz = 0; | 2708 | cmdp->u.raw.sg_ranz = 0; |
2766 | } | 2709 | } |
2767 | 2710 | ||
2768 | if (gdth_bufflen(scp)) { | 2711 | if (scsi_bufflen(scp)) { |
2769 | cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL; | 2712 | cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL; |
2770 | sgcnt = pci_map_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp), | 2713 | sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), |
2771 | cmndinfo->dma_dir); | 2714 | cmndinfo->dma_dir); |
2772 | if (mode64) { | 2715 | if (mode64) { |
2773 | struct scatterlist *sl; | 2716 | struct scatterlist *sl; |
@@ -3388,8 +3331,8 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, | |||
3388 | /* retry */ | 3331 | /* retry */ |
3389 | return 2; | 3332 | return 2; |
3390 | } | 3333 | } |
3391 | if (gdth_bufflen(scp)) | 3334 | if (scsi_bufflen(scp)) |
3392 | pci_unmap_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp), | 3335 | pci_unmap_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), |
3393 | cmndinfo->dma_dir); | 3336 | cmndinfo->dma_dir); |
3394 | 3337 | ||
3395 | if (cmndinfo->sense_paddr) | 3338 | if (cmndinfo->sense_paddr) |
@@ -4031,10 +3974,6 @@ static int gdth_queuecommand(struct scsi_cmnd *scp, | |||
4031 | gdth_update_timeout(scp, scp->timeout_per_command * 6); | 3974 | gdth_update_timeout(scp, scp->timeout_per_command * 6); |
4032 | cmndinfo->priority = DEFAULT_PRI; | 3975 | cmndinfo->priority = DEFAULT_PRI; |
4033 | 3976 | ||
4034 | gdth_set_bufflen(scp, scsi_bufflen(scp)); | ||
4035 | gdth_set_sg_count(scp, scsi_sg_count(scp)); | ||
4036 | gdth_set_sglist(scp, scsi_sglist(scp)); | ||
4037 | |||
4038 | return __gdth_queuecommand(ha, scp, cmndinfo); | 3977 | return __gdth_queuecommand(ha, scp, cmndinfo); |
4039 | } | 3978 | } |
4040 | 3979 | ||
@@ -4955,12 +4894,16 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot) | |||
4955 | #endif /* CONFIG_EISA */ | 4894 | #endif /* CONFIG_EISA */ |
4956 | 4895 | ||
4957 | #ifdef CONFIG_PCI | 4896 | #ifdef CONFIG_PCI |
4958 | static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) | 4897 | static int gdth_pci_probe_one(gdth_pci_str *pcistr, |
4898 | gdth_ha_str **ha_out) | ||
4959 | { | 4899 | { |
4960 | struct Scsi_Host *shp; | 4900 | struct Scsi_Host *shp; |
4961 | gdth_ha_str *ha; | 4901 | gdth_ha_str *ha; |
4962 | dma_addr_t scratch_dma_handle = 0; | 4902 | dma_addr_t scratch_dma_handle = 0; |
4963 | int error, i; | 4903 | int error, i; |
4904 | struct pci_dev *pdev = pcistr->pdev; | ||
4905 | |||
4906 | *ha_out = NULL; | ||
4964 | 4907 | ||
4965 | shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str)); | 4908 | shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str)); |
4966 | if (!shp) | 4909 | if (!shp) |
@@ -4968,13 +4911,13 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) | |||
4968 | ha = shost_priv(shp); | 4911 | ha = shost_priv(shp); |
4969 | 4912 | ||
4970 | error = -ENODEV; | 4913 | error = -ENODEV; |
4971 | if (!gdth_init_pci(&pcistr[ctr],ha)) | 4914 | if (!gdth_init_pci(pdev, pcistr, ha)) |
4972 | goto out_host_put; | 4915 | goto out_host_put; |
4973 | 4916 | ||
4974 | /* controller found and initialized */ | 4917 | /* controller found and initialized */ |
4975 | printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n", | 4918 | printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n", |
4976 | pcistr[ctr].pdev->bus->number, | 4919 | pdev->bus->number, |
4977 | PCI_SLOT(pcistr[ctr].pdev->devfn), | 4920 | PCI_SLOT(pdev->devfn), |
4978 | ha->irq); | 4921 | ha->irq); |
4979 | 4922 | ||
4980 | error = request_irq(ha->irq, gdth_interrupt, | 4923 | error = request_irq(ha->irq, gdth_interrupt, |
@@ -5019,7 +4962,7 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) | |||
5019 | 4962 | ||
5020 | ha->scratch_busy = FALSE; | 4963 | ha->scratch_busy = FALSE; |
5021 | ha->req_first = NULL; | 4964 | ha->req_first = NULL; |
5022 | ha->tid_cnt = pcistr[ctr].pdev->device >= 0x200 ? MAXID : MAX_HDRIVES; | 4965 | ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES; |
5023 | if (max_ids > 0 && max_ids < ha->tid_cnt) | 4966 | if (max_ids > 0 && max_ids < ha->tid_cnt) |
5024 | ha->tid_cnt = max_ids; | 4967 | ha->tid_cnt = max_ids; |
5025 | for (i = 0; i < GDTH_MAXCMDS; ++i) | 4968 | for (i = 0; i < GDTH_MAXCMDS; ++i) |
@@ -5039,16 +4982,16 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) | |||
5039 | /* 64-bit DMA only supported from FW >= x.43 */ | 4982 | /* 64-bit DMA only supported from FW >= x.43 */ |
5040 | if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) || | 4983 | if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) || |
5041 | !ha->dma64_support) { | 4984 | !ha->dma64_support) { |
5042 | if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) { | 4985 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { |
5043 | printk(KERN_WARNING "GDT-PCI %d: " | 4986 | printk(KERN_WARNING "GDT-PCI %d: " |
5044 | "Unable to set 32-bit DMA\n", ha->hanum); | 4987 | "Unable to set 32-bit DMA\n", ha->hanum); |
5045 | goto out_free_coal_stat; | 4988 | goto out_free_coal_stat; |
5046 | } | 4989 | } |
5047 | } else { | 4990 | } else { |
5048 | shp->max_cmd_len = 16; | 4991 | shp->max_cmd_len = 16; |
5049 | if (!pci_set_dma_mask(pcistr[ctr].pdev, DMA_64BIT_MASK)) { | 4992 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { |
5050 | printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum); | 4993 | printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum); |
5051 | } else if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) { | 4994 | } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { |
5052 | printk(KERN_WARNING "GDT-PCI %d: " | 4995 | printk(KERN_WARNING "GDT-PCI %d: " |
5053 | "Unable to set 64/32-bit DMA\n", ha->hanum); | 4996 | "Unable to set 64/32-bit DMA\n", ha->hanum); |
5054 | goto out_free_coal_stat; | 4997 | goto out_free_coal_stat; |
@@ -5062,13 +5005,17 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) | |||
5062 | spin_lock_init(&ha->smp_lock); | 5005 | spin_lock_init(&ha->smp_lock); |
5063 | gdth_enable_int(ha); | 5006 | gdth_enable_int(ha); |
5064 | 5007 | ||
5065 | error = scsi_add_host(shp, &pcistr[ctr].pdev->dev); | 5008 | error = scsi_add_host(shp, &pdev->dev); |
5066 | if (error) | 5009 | if (error) |
5067 | goto out_free_coal_stat; | 5010 | goto out_free_coal_stat; |
5068 | list_add_tail(&ha->list, &gdth_instances); | 5011 | list_add_tail(&ha->list, &gdth_instances); |
5069 | 5012 | ||
5013 | pci_set_drvdata(ha->pdev, ha); | ||
5014 | |||
5070 | scsi_scan_host(shp); | 5015 | scsi_scan_host(shp); |
5071 | 5016 | ||
5017 | *ha_out = ha; | ||
5018 | |||
5072 | return 0; | 5019 | return 0; |
5073 | 5020 | ||
5074 | out_free_coal_stat: | 5021 | out_free_coal_stat: |
@@ -5185,16 +5132,8 @@ static int __init gdth_init(void) | |||
5185 | 5132 | ||
5186 | #ifdef CONFIG_PCI | 5133 | #ifdef CONFIG_PCI |
5187 | /* scanning for PCI controllers */ | 5134 | /* scanning for PCI controllers */ |
5188 | { | 5135 | if (pci_register_driver(&gdth_pci_driver) == 0) |
5189 | gdth_pci_str pcistr[MAXHA]; | 5136 | gdth_pci_registered = true; |
5190 | int cnt,ctr; | ||
5191 | |||
5192 | cnt = gdth_search_pci(pcistr); | ||
5193 | printk("GDT-HA: Found %d PCI Storage RAID Controllers\n", cnt); | ||
5194 | gdth_sort_pci(pcistr,cnt); | ||
5195 | for (ctr = 0; ctr < cnt; ++ctr) | ||
5196 | gdth_pci_probe_one(pcistr, ctr); | ||
5197 | } | ||
5198 | #endif /* CONFIG_PCI */ | 5137 | #endif /* CONFIG_PCI */ |
5199 | 5138 | ||
5200 | TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count)); | 5139 | TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count)); |
@@ -5227,6 +5166,11 @@ static void __exit gdth_exit(void) | |||
5227 | del_timer_sync(&gdth_timer); | 5166 | del_timer_sync(&gdth_timer); |
5228 | #endif | 5167 | #endif |
5229 | 5168 | ||
5169 | #ifdef CONFIG_PCI | ||
5170 | if (gdth_pci_registered) | ||
5171 | pci_unregister_driver(&gdth_pci_driver); | ||
5172 | #endif | ||
5173 | |||
5230 | list_for_each_entry(ha, &gdth_instances, list) | 5174 | list_for_each_entry(ha, &gdth_instances, list) |
5231 | gdth_remove_one(ha); | 5175 | gdth_remove_one(ha); |
5232 | } | 5176 | } |
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index 26e4e92515e0..ca92476727cf 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h | |||
@@ -839,8 +839,6 @@ typedef struct { | |||
839 | struct pci_dev *pdev; | 839 | struct pci_dev *pdev; |
840 | ulong dpmem; /* DPRAM address */ | 840 | ulong dpmem; /* DPRAM address */ |
841 | ulong io; /* IO address */ | 841 | ulong io; /* IO address */ |
842 | ulong io_mm; /* IO address mem. mapped */ | ||
843 | unchar irq; /* IRQ */ | ||
844 | } gdth_pci_str; | 842 | } gdth_pci_str; |
845 | 843 | ||
846 | 844 | ||
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index 91f85226d08f..ca7363752401 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c | |||
@@ -322,6 +322,9 @@ int __init gvp11_detect(struct scsi_host_template *tpnt) | |||
322 | */ | 322 | */ |
323 | regs.SASR = &(DMA(instance)->SASR); | 323 | regs.SASR = &(DMA(instance)->SASR); |
324 | regs.SCMD = &(DMA(instance)->SCMD); | 324 | regs.SCMD = &(DMA(instance)->SCMD); |
325 | HDATA(instance)->no_sync = 0xff; | ||
326 | HDATA(instance)->fast = 0; | ||
327 | HDATA(instance)->dma_mode = CTRL_DMA; | ||
325 | wd33c93_init(instance, regs, dma_setup, dma_stop, | 328 | wd33c93_init(instance, regs, dma_setup, dma_stop, |
326 | (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 | 329 | (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 |
327 | : WD33C93_FS_12_15); | 330 | : WD33C93_FS_12_15); |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index ed7e0a1fc34d..1592640a87b5 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -347,7 +347,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
347 | shost->unchecked_isa_dma = sht->unchecked_isa_dma; | 347 | shost->unchecked_isa_dma = sht->unchecked_isa_dma; |
348 | shost->use_clustering = sht->use_clustering; | 348 | shost->use_clustering = sht->use_clustering; |
349 | shost->ordered_tag = sht->ordered_tag; | 349 | shost->ordered_tag = sht->ordered_tag; |
350 | shost->active_mode = sht->supported_mode; | ||
351 | 350 | ||
352 | if (sht->supported_mode == MODE_UNKNOWN) | 351 | if (sht->supported_mode == MODE_UNKNOWN) |
353 | /* means we didn't set it ... default to INITIATOR */ | 352 | /* means we didn't set it ... default to INITIATOR */ |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index ff149ad6bc4e..beecda991682 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -338,7 +338,8 @@ static int iop_get_config_mv(struct hptiop_hba *hba, | |||
338 | req->header.size = | 338 | req->header.size = |
339 | cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); | 339 | cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); |
340 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | 340 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); |
341 | req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_GET_CONFIG<<5); | 341 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); |
342 | req->header.context_hi32 = 0; | ||
342 | 343 | ||
343 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | 344 | if (iop_send_sync_request_mv(hba, 0, 20000)) { |
344 | dprintk("Get config send cmd failed\n"); | 345 | dprintk("Get config send cmd failed\n"); |
@@ -392,7 +393,8 @@ static int iop_set_config_mv(struct hptiop_hba *hba, | |||
392 | req->header.size = | 393 | req->header.size = |
393 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); | 394 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); |
394 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | 395 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); |
395 | req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_SET_CONFIG<<5); | 396 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); |
397 | req->header.context_hi32 = 0; | ||
396 | 398 | ||
397 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | 399 | if (iop_send_sync_request_mv(hba, 0, 20000)) { |
398 | dprintk("Set config send cmd failed\n"); | 400 | dprintk("Set config send cmd failed\n"); |
@@ -903,7 +905,6 @@ static struct scsi_host_template driver_template = { | |||
903 | .eh_device_reset_handler = hptiop_reset, | 905 | .eh_device_reset_handler = hptiop_reset, |
904 | .eh_bus_reset_handler = hptiop_reset, | 906 | .eh_bus_reset_handler = hptiop_reset, |
905 | .info = hptiop_info, | 907 | .info = hptiop_info, |
906 | .unchecked_isa_dma = 0, | ||
907 | .emulated = 0, | 908 | .emulated = 0, |
908 | .use_clustering = ENABLE_CLUSTERING, | 909 | .use_clustering = ENABLE_CLUSTERING, |
909 | .proc_name = driver_name, | 910 | .proc_name = driver_name, |
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 0cc8868ea35d..dbae3fdb8506 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c | |||
@@ -2581,8 +2581,8 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c | |||
2581 | /* Map the sense buffer into bus memory */ | 2581 | /* Map the sense buffer into bus memory */ |
2582 | dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer, | 2582 | dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer, |
2583 | SENSE_SIZE, DMA_FROM_DEVICE); | 2583 | SENSE_SIZE, DMA_FROM_DEVICE); |
2584 | cblk->senseptr = cpu_to_le32((u32)dma_addr); | 2584 | cblk->senseptr = (u32)dma_addr; |
2585 | cblk->senselen = cpu_to_le32(SENSE_SIZE); | 2585 | cblk->senselen = SENSE_SIZE; |
2586 | cmnd->SCp.ptr = (char *)(unsigned long)dma_addr; | 2586 | cmnd->SCp.ptr = (char *)(unsigned long)dma_addr; |
2587 | cblk->cdblen = cmnd->cmd_len; | 2587 | cblk->cdblen = cmnd->cmd_len; |
2588 | 2588 | ||
@@ -2606,7 +2606,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c | |||
2606 | dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0], | 2606 | dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0], |
2607 | sizeof(struct sg_entry) * TOTAL_SG_ENTRY, | 2607 | sizeof(struct sg_entry) * TOTAL_SG_ENTRY, |
2608 | DMA_BIDIRECTIONAL); | 2608 | DMA_BIDIRECTIONAL); |
2609 | cblk->bufptr = cpu_to_le32((u32)dma_addr); | 2609 | cblk->bufptr = (u32)dma_addr; |
2610 | cmnd->SCp.dma_handle = dma_addr; | 2610 | cmnd->SCp.dma_handle = dma_addr; |
2611 | 2611 | ||
2612 | cblk->sglen = nseg; | 2612 | cblk->sglen = nseg; |
@@ -2616,7 +2616,8 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c | |||
2616 | sg = &cblk->sglist[0]; | 2616 | sg = &cblk->sglist[0]; |
2617 | scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) { | 2617 | scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) { |
2618 | sg->data = cpu_to_le32((u32)sg_dma_address(sglist)); | 2618 | sg->data = cpu_to_le32((u32)sg_dma_address(sglist)); |
2619 | total_len += sg->len = cpu_to_le32((u32)sg_dma_len(sglist)); | 2619 | sg->len = cpu_to_le32((u32)sg_dma_len(sglist)); |
2620 | total_len += sg_dma_len(sglist); | ||
2620 | ++sg; | 2621 | ++sg; |
2621 | } | 2622 | } |
2622 | 2623 | ||
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 7ed568f180ae..7c615c70ec5c 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -2377,7 +2377,7 @@ ips_get_bios_version(ips_ha_t * ha, int intr) | |||
2377 | if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) | 2377 | if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) |
2378 | return; | 2378 | return; |
2379 | 2379 | ||
2380 | outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); | 2380 | outl(1, ha->io_addr + IPS_REG_FLAP); |
2381 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) | 2381 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) |
2382 | udelay(25); /* 25 us */ | 2382 | udelay(25); /* 25 us */ |
2383 | 2383 | ||
@@ -2385,21 +2385,21 @@ ips_get_bios_version(ips_ha_t * ha, int intr) | |||
2385 | return; | 2385 | return; |
2386 | 2386 | ||
2387 | /* Get Major version */ | 2387 | /* Get Major version */ |
2388 | outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP); | 2388 | outl(0x1FF, ha->io_addr + IPS_REG_FLAP); |
2389 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) | 2389 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) |
2390 | udelay(25); /* 25 us */ | 2390 | udelay(25); /* 25 us */ |
2391 | 2391 | ||
2392 | major = inb(ha->io_addr + IPS_REG_FLDP); | 2392 | major = inb(ha->io_addr + IPS_REG_FLDP); |
2393 | 2393 | ||
2394 | /* Get Minor version */ | 2394 | /* Get Minor version */ |
2395 | outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP); | 2395 | outl(0x1FE, ha->io_addr + IPS_REG_FLAP); |
2396 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) | 2396 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) |
2397 | udelay(25); /* 25 us */ | 2397 | udelay(25); /* 25 us */ |
2398 | 2398 | ||
2399 | minor = inb(ha->io_addr + IPS_REG_FLDP); | 2399 | minor = inb(ha->io_addr + IPS_REG_FLDP); |
2400 | 2400 | ||
2401 | /* Get SubMinor version */ | 2401 | /* Get SubMinor version */ |
2402 | outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP); | 2402 | outl(0x1FD, ha->io_addr + IPS_REG_FLAP); |
2403 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) | 2403 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) |
2404 | udelay(25); /* 25 us */ | 2404 | udelay(25); /* 25 us */ |
2405 | 2405 | ||
@@ -3502,27 +3502,11 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr) | |||
3502 | static void | 3502 | static void |
3503 | ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) | 3503 | ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) |
3504 | { | 3504 | { |
3505 | int i; | 3505 | unsigned long flags; |
3506 | unsigned int min_cnt, xfer_cnt; | ||
3507 | char *cdata = (char *) data; | ||
3508 | unsigned char *buffer; | ||
3509 | unsigned long flags; | ||
3510 | struct scatterlist *sg = scsi_sglist(scmd); | ||
3511 | |||
3512 | for (i = 0, xfer_cnt = 0; | ||
3513 | (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) { | ||
3514 | min_cnt = min(count - xfer_cnt, sg[i].length); | ||
3515 | |||
3516 | /* kmap_atomic() ensures addressability of the data buffer.*/ | ||
3517 | /* local_irq_save() protects the KM_IRQ0 address slot. */ | ||
3518 | local_irq_save(flags); | ||
3519 | buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset; | ||
3520 | memcpy(buffer, &cdata[xfer_cnt], min_cnt); | ||
3521 | kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); | ||
3522 | local_irq_restore(flags); | ||
3523 | 3506 | ||
3524 | xfer_cnt += min_cnt; | 3507 | local_irq_save(flags); |
3525 | } | 3508 | scsi_sg_copy_from_buffer(scmd, data, count); |
3509 | local_irq_restore(flags); | ||
3526 | } | 3510 | } |
3527 | 3511 | ||
3528 | /****************************************************************************/ | 3512 | /****************************************************************************/ |
@@ -3535,27 +3519,11 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) | |||
3535 | static void | 3519 | static void |
3536 | ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count) | 3520 | ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count) |
3537 | { | 3521 | { |
3538 | int i; | 3522 | unsigned long flags; |
3539 | unsigned int min_cnt, xfer_cnt; | ||
3540 | char *cdata = (char *) data; | ||
3541 | unsigned char *buffer; | ||
3542 | unsigned long flags; | ||
3543 | struct scatterlist *sg = scsi_sglist(scmd); | ||
3544 | |||
3545 | for (i = 0, xfer_cnt = 0; | ||
3546 | (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) { | ||
3547 | min_cnt = min(count - xfer_cnt, sg[i].length); | ||
3548 | |||
3549 | /* kmap_atomic() ensures addressability of the data buffer.*/ | ||
3550 | /* local_irq_save() protects the KM_IRQ0 address slot. */ | ||
3551 | local_irq_save(flags); | ||
3552 | buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset; | ||
3553 | memcpy(&cdata[xfer_cnt], buffer, min_cnt); | ||
3554 | kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); | ||
3555 | local_irq_restore(flags); | ||
3556 | 3523 | ||
3557 | xfer_cnt += min_cnt; | 3524 | local_irq_save(flags); |
3558 | } | 3525 | scsi_sg_copy_to_buffer(scmd, data, count); |
3526 | local_irq_restore(flags); | ||
3559 | } | 3527 | } |
3560 | 3528 | ||
3561 | /****************************************************************************/ | 3529 | /****************************************************************************/ |
@@ -3696,9 +3664,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) | |||
3696 | scb->cmd.basic_io.sg_count = scb->sg_len; | 3664 | scb->cmd.basic_io.sg_count = scb->sg_len; |
3697 | 3665 | ||
3698 | if (scb->cmd.basic_io.lba) | 3666 | if (scb->cmd.basic_io.lba) |
3699 | scb->cmd.basic_io.lba = | 3667 | le32_add_cpu(&scb->cmd.basic_io.lba, |
3700 | cpu_to_le32(le32_to_cpu | ||
3701 | (scb->cmd.basic_io.lba) + | ||
3702 | le16_to_cpu(scb->cmd.basic_io. | 3668 | le16_to_cpu(scb->cmd.basic_io. |
3703 | sector_count)); | 3669 | sector_count)); |
3704 | else | 3670 | else |
@@ -3744,9 +3710,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) | |||
3744 | scb->cmd.basic_io.sg_count = scb->sg_len; | 3710 | scb->cmd.basic_io.sg_count = scb->sg_len; |
3745 | 3711 | ||
3746 | if (scb->cmd.basic_io.lba) | 3712 | if (scb->cmd.basic_io.lba) |
3747 | scb->cmd.basic_io.lba = | 3713 | le32_add_cpu(&scb->cmd.basic_io.lba, |
3748 | cpu_to_le32(le32_to_cpu | ||
3749 | (scb->cmd.basic_io.lba) + | ||
3750 | le16_to_cpu(scb->cmd.basic_io. | 3714 | le16_to_cpu(scb->cmd.basic_io. |
3751 | sector_count)); | 3715 | sector_count)); |
3752 | else | 3716 | else |
@@ -4888,7 +4852,7 @@ ips_init_copperhead(ips_ha_t * ha) | |||
4888 | return (0); | 4852 | return (0); |
4889 | 4853 | ||
4890 | /* setup CCCR */ | 4854 | /* setup CCCR */ |
4891 | outl(cpu_to_le32(0x1010), ha->io_addr + IPS_REG_CCCR); | 4855 | outl(0x1010, ha->io_addr + IPS_REG_CCCR); |
4892 | 4856 | ||
4893 | /* Enable busmastering */ | 4857 | /* Enable busmastering */ |
4894 | outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); | 4858 | outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR); |
@@ -5270,12 +5234,12 @@ ips_statinit(ips_ha_t * ha) | |||
5270 | ha->adapt->p_status_tail = ha->adapt->status; | 5234 | ha->adapt->p_status_tail = ha->adapt->status; |
5271 | 5235 | ||
5272 | phys_status_start = ha->adapt->hw_status_start; | 5236 | phys_status_start = ha->adapt->hw_status_start; |
5273 | outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQSR); | 5237 | outl(phys_status_start, ha->io_addr + IPS_REG_SQSR); |
5274 | outl(cpu_to_le32(phys_status_start + IPS_STATUS_Q_SIZE), | 5238 | outl(phys_status_start + IPS_STATUS_Q_SIZE, |
5275 | ha->io_addr + IPS_REG_SQER); | 5239 | ha->io_addr + IPS_REG_SQER); |
5276 | outl(cpu_to_le32(phys_status_start + IPS_STATUS_SIZE), | 5240 | outl(phys_status_start + IPS_STATUS_SIZE, |
5277 | ha->io_addr + IPS_REG_SQHR); | 5241 | ha->io_addr + IPS_REG_SQHR); |
5278 | outl(cpu_to_le32(phys_status_start), ha->io_addr + IPS_REG_SQTR); | 5242 | outl(phys_status_start, ha->io_addr + IPS_REG_SQTR); |
5279 | 5243 | ||
5280 | ha->adapt->hw_status_tail = phys_status_start; | 5244 | ha->adapt->hw_status_tail = phys_status_start; |
5281 | } | 5245 | } |
@@ -5332,7 +5296,7 @@ ips_statupd_copperhead(ips_ha_t * ha) | |||
5332 | ha->adapt->hw_status_tail = ha->adapt->hw_status_start; | 5296 | ha->adapt->hw_status_tail = ha->adapt->hw_status_start; |
5333 | } | 5297 | } |
5334 | 5298 | ||
5335 | outl(cpu_to_le32(ha->adapt->hw_status_tail), | 5299 | outl(ha->adapt->hw_status_tail, |
5336 | ha->io_addr + IPS_REG_SQTR); | 5300 | ha->io_addr + IPS_REG_SQTR); |
5337 | 5301 | ||
5338 | return (ha->adapt->p_status_tail->value); | 5302 | return (ha->adapt->p_status_tail->value); |
@@ -5434,8 +5398,8 @@ ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb) | |||
5434 | } /* end if */ | 5398 | } /* end if */ |
5435 | } /* end while */ | 5399 | } /* end while */ |
5436 | 5400 | ||
5437 | outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_CCSAR); | 5401 | outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR); |
5438 | outw(cpu_to_le32(IPS_BIT_START_CMD), ha->io_addr + IPS_REG_CCCR); | 5402 | outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR); |
5439 | 5403 | ||
5440 | return (IPS_SUCCESS); | 5404 | return (IPS_SUCCESS); |
5441 | } | 5405 | } |
@@ -5520,7 +5484,7 @@ ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb) | |||
5520 | ips_name, ha->host_num, scb->cmd.basic_io.command_id); | 5484 | ips_name, ha->host_num, scb->cmd.basic_io.command_id); |
5521 | } | 5485 | } |
5522 | 5486 | ||
5523 | outl(cpu_to_le32(scb->scb_busaddr), ha->io_addr + IPS_REG_I2O_INMSGQ); | 5487 | outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ); |
5524 | 5488 | ||
5525 | return (IPS_SUCCESS); | 5489 | return (IPS_SUCCESS); |
5526 | } | 5490 | } |
@@ -6412,7 +6376,7 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, | |||
6412 | 6376 | ||
6413 | for (i = 0; i < buffersize; i++) { | 6377 | for (i = 0; i < buffersize; i++) { |
6414 | /* write a byte */ | 6378 | /* write a byte */ |
6415 | outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); | 6379 | outl(i + offset, ha->io_addr + IPS_REG_FLAP); |
6416 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) | 6380 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) |
6417 | udelay(25); /* 25 us */ | 6381 | udelay(25); /* 25 us */ |
6418 | 6382 | ||
@@ -6597,7 +6561,7 @@ ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, | |||
6597 | if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) | 6561 | if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55) |
6598 | return (1); | 6562 | return (1); |
6599 | 6563 | ||
6600 | outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP); | 6564 | outl(1, ha->io_addr + IPS_REG_FLAP); |
6601 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) | 6565 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) |
6602 | udelay(25); /* 25 us */ | 6566 | udelay(25); /* 25 us */ |
6603 | if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) | 6567 | if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA) |
@@ -6606,7 +6570,7 @@ ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize, | |||
6606 | checksum = 0xff; | 6570 | checksum = 0xff; |
6607 | for (i = 2; i < buffersize; i++) { | 6571 | for (i = 2; i < buffersize; i++) { |
6608 | 6572 | ||
6609 | outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP); | 6573 | outl(i + offset, ha->io_addr + IPS_REG_FLAP); |
6610 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) | 6574 | if (ha->pcidev->revision == IPS_REVID_TROMBONE64) |
6611 | udelay(25); /* 25 us */ | 6575 | udelay(25); /* 25 us */ |
6612 | 6576 | ||
@@ -6842,7 +6806,6 @@ ips_register_scsi(int index) | |||
6842 | sh->sg_tablesize = sh->hostt->sg_tablesize; | 6806 | sh->sg_tablesize = sh->hostt->sg_tablesize; |
6843 | sh->can_queue = sh->hostt->can_queue; | 6807 | sh->can_queue = sh->hostt->can_queue; |
6844 | sh->cmd_per_lun = sh->hostt->cmd_per_lun; | 6808 | sh->cmd_per_lun = sh->hostt->cmd_per_lun; |
6845 | sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; | ||
6846 | sh->use_clustering = sh->hostt->use_clustering; | 6809 | sh->use_clustering = sh->hostt->use_clustering; |
6847 | sh->max_sectors = 128; | 6810 | sh->max_sectors = 128; |
6848 | 6811 | ||
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 8a178674cb18..72b9b2a0eba3 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -528,6 +528,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
528 | struct iscsi_session *session = conn->session; | 528 | struct iscsi_session *session = conn->session; |
529 | struct scsi_cmnd *sc = ctask->sc; | 529 | struct scsi_cmnd *sc = ctask->sc; |
530 | int datasn = be32_to_cpu(rhdr->datasn); | 530 | int datasn = be32_to_cpu(rhdr->datasn); |
531 | unsigned total_in_length = scsi_in(sc)->length; | ||
531 | 532 | ||
532 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); | 533 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); |
533 | if (tcp_conn->in.datalen == 0) | 534 | if (tcp_conn->in.datalen == 0) |
@@ -542,10 +543,10 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
542 | tcp_ctask->exp_datasn++; | 543 | tcp_ctask->exp_datasn++; |
543 | 544 | ||
544 | tcp_ctask->data_offset = be32_to_cpu(rhdr->offset); | 545 | tcp_ctask->data_offset = be32_to_cpu(rhdr->offset); |
545 | if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) { | 546 | if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) { |
546 | debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n", | 547 | debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n", |
547 | __FUNCTION__, tcp_ctask->data_offset, | 548 | __FUNCTION__, tcp_ctask->data_offset, |
548 | tcp_conn->in.datalen, scsi_bufflen(sc)); | 549 | tcp_conn->in.datalen, total_in_length); |
549 | return ISCSI_ERR_DATA_OFFSET; | 550 | return ISCSI_ERR_DATA_OFFSET; |
550 | } | 551 | } |
551 | 552 | ||
@@ -558,8 +559,8 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
558 | 559 | ||
559 | if (res_count > 0 && | 560 | if (res_count > 0 && |
560 | (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || | 561 | (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || |
561 | res_count <= scsi_bufflen(sc))) | 562 | res_count <= total_in_length)) |
562 | scsi_set_resid(sc, res_count); | 563 | scsi_in(sc)->resid = res_count; |
563 | else | 564 | else |
564 | sc->result = (DID_BAD_TARGET << 16) | | 565 | sc->result = (DID_BAD_TARGET << 16) | |
565 | rhdr->cmd_status; | 566 | rhdr->cmd_status; |
@@ -670,11 +671,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
670 | r2t->data_length, session->max_burst); | 671 | r2t->data_length, session->max_burst); |
671 | 672 | ||
672 | r2t->data_offset = be32_to_cpu(rhdr->data_offset); | 673 | r2t->data_offset = be32_to_cpu(rhdr->data_offset); |
673 | if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) { | 674 | if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) { |
674 | iscsi_conn_printk(KERN_ERR, conn, | 675 | iscsi_conn_printk(KERN_ERR, conn, |
675 | "invalid R2T with data len %u at offset %u " | 676 | "invalid R2T with data len %u at offset %u " |
676 | "and total length %d\n", r2t->data_length, | 677 | "and total length %d\n", r2t->data_length, |
677 | r2t->data_offset, scsi_bufflen(ctask->sc)); | 678 | r2t->data_offset, scsi_out(ctask->sc)->length); |
678 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, | 679 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, |
679 | sizeof(void*)); | 680 | sizeof(void*)); |
680 | return ISCSI_ERR_DATALEN; | 681 | return ISCSI_ERR_DATALEN; |
@@ -771,6 +772,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
771 | if (tcp_conn->in.datalen) { | 772 | if (tcp_conn->in.datalen) { |
772 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 773 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
773 | struct hash_desc *rx_hash = NULL; | 774 | struct hash_desc *rx_hash = NULL; |
775 | struct scsi_data_buffer *sdb = scsi_in(ctask->sc); | ||
774 | 776 | ||
775 | /* | 777 | /* |
776 | * Setup copy of Data-In into the Scsi_Cmnd | 778 | * Setup copy of Data-In into the Scsi_Cmnd |
@@ -788,8 +790,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
788 | tcp_ctask->data_offset, | 790 | tcp_ctask->data_offset, |
789 | tcp_conn->in.datalen); | 791 | tcp_conn->in.datalen); |
790 | return iscsi_segment_seek_sg(&tcp_conn->in.segment, | 792 | return iscsi_segment_seek_sg(&tcp_conn->in.segment, |
791 | scsi_sglist(ctask->sc), | 793 | sdb->table.sgl, |
792 | scsi_sg_count(ctask->sc), | 794 | sdb->table.nents, |
793 | tcp_ctask->data_offset, | 795 | tcp_ctask->data_offset, |
794 | tcp_conn->in.datalen, | 796 | tcp_conn->in.datalen, |
795 | iscsi_tcp_process_data_in, | 797 | iscsi_tcp_process_data_in, |
@@ -1332,7 +1334,8 @@ iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask) | |||
1332 | return 0; | 1334 | return 0; |
1333 | 1335 | ||
1334 | /* If we have immediate data, attach a payload */ | 1336 | /* If we have immediate data, attach a payload */ |
1335 | err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc), | 1337 | err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl, |
1338 | scsi_out(sc)->table.nents, | ||
1336 | 0, ctask->imm_count); | 1339 | 0, ctask->imm_count); |
1337 | if (err) | 1340 | if (err) |
1338 | return err; | 1341 | return err; |
@@ -1386,6 +1389,7 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1386 | { | 1389 | { |
1387 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1390 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1388 | struct scsi_cmnd *sc = ctask->sc; | 1391 | struct scsi_cmnd *sc = ctask->sc; |
1392 | struct scsi_data_buffer *sdb = scsi_out(sc); | ||
1389 | int rc = 0; | 1393 | int rc = 0; |
1390 | 1394 | ||
1391 | flush: | 1395 | flush: |
@@ -1412,9 +1416,8 @@ flush: | |||
1412 | ctask->itt, tcp_ctask->sent, ctask->data_count); | 1416 | ctask->itt, tcp_ctask->sent, ctask->data_count); |
1413 | 1417 | ||
1414 | iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr)); | 1418 | iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr)); |
1415 | rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), | 1419 | rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl, |
1416 | scsi_sg_count(sc), | 1420 | sdb->table.nents, tcp_ctask->sent, |
1417 | tcp_ctask->sent, | ||
1418 | ctask->data_count); | 1421 | ctask->data_count); |
1419 | if (rc) | 1422 | if (rc) |
1420 | goto fail; | 1423 | goto fail; |
@@ -1460,8 +1463,8 @@ flush: | |||
1460 | iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr, | 1463 | iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr, |
1461 | sizeof(struct iscsi_hdr)); | 1464 | sizeof(struct iscsi_hdr)); |
1462 | 1465 | ||
1463 | rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), | 1466 | rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl, |
1464 | scsi_sg_count(sc), | 1467 | sdb->table.nents, |
1465 | r2t->data_offset + r2t->sent, | 1468 | r2t->data_offset + r2t->sent, |
1466 | r2t->data_count); | 1469 | r2t->data_count); |
1467 | if (rc) | 1470 | if (rc) |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index bdd7de7da39a..010c1b9b178c 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -137,6 +137,70 @@ static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len) | |||
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | /* | ||
141 | * make an extended cdb AHS | ||
142 | */ | ||
143 | static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask) | ||
144 | { | ||
145 | struct scsi_cmnd *cmd = ctask->sc; | ||
146 | unsigned rlen, pad_len; | ||
147 | unsigned short ahslength; | ||
148 | struct iscsi_ecdb_ahdr *ecdb_ahdr; | ||
149 | int rc; | ||
150 | |||
151 | ecdb_ahdr = iscsi_next_hdr(ctask); | ||
152 | rlen = cmd->cmd_len - ISCSI_CDB_SIZE; | ||
153 | |||
154 | BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); | ||
155 | ahslength = rlen + sizeof(ecdb_ahdr->reserved); | ||
156 | |||
157 | pad_len = iscsi_padding(rlen); | ||
158 | |||
159 | rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) + | ||
160 | sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); | ||
161 | if (rc) | ||
162 | return rc; | ||
163 | |||
164 | if (pad_len) | ||
165 | memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len); | ||
166 | |||
167 | ecdb_ahdr->ahslength = cpu_to_be16(ahslength); | ||
168 | ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB; | ||
169 | ecdb_ahdr->reserved = 0; | ||
170 | memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen); | ||
171 | |||
172 | debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d " | ||
173 | "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n", | ||
174 | cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len); | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask) | ||
180 | { | ||
181 | struct scsi_cmnd *sc = ctask->sc; | ||
182 | struct iscsi_rlength_ahdr *rlen_ahdr; | ||
183 | int rc; | ||
184 | |||
185 | rlen_ahdr = iscsi_next_hdr(ctask); | ||
186 | rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr)); | ||
187 | if (rc) | ||
188 | return rc; | ||
189 | |||
190 | rlen_ahdr->ahslength = | ||
191 | cpu_to_be16(sizeof(rlen_ahdr->read_length) + | ||
192 | sizeof(rlen_ahdr->reserved)); | ||
193 | rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH; | ||
194 | rlen_ahdr->reserved = 0; | ||
195 | rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length); | ||
196 | |||
197 | debug_scsi("bidi-in rlen_ahdr->read_length(%d) " | ||
198 | "rlen_ahdr->ahslength(%d)\n", | ||
199 | be32_to_cpu(rlen_ahdr->read_length), | ||
200 | be16_to_cpu(rlen_ahdr->ahslength)); | ||
201 | return 0; | ||
202 | } | ||
203 | |||
140 | /** | 204 | /** |
141 | * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu | 205 | * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu |
142 | * @ctask: iscsi cmd task | 206 | * @ctask: iscsi cmd task |
@@ -150,7 +214,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
150 | struct iscsi_session *session = conn->session; | 214 | struct iscsi_session *session = conn->session; |
151 | struct iscsi_cmd *hdr = ctask->hdr; | 215 | struct iscsi_cmd *hdr = ctask->hdr; |
152 | struct scsi_cmnd *sc = ctask->sc; | 216 | struct scsi_cmnd *sc = ctask->sc; |
153 | unsigned hdrlength; | 217 | unsigned hdrlength, cmd_len; |
154 | int rc; | 218 | int rc; |
155 | 219 | ||
156 | ctask->hdr_len = 0; | 220 | ctask->hdr_len = 0; |
@@ -161,17 +225,30 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
161 | hdr->flags = ISCSI_ATTR_SIMPLE; | 225 | hdr->flags = ISCSI_ATTR_SIMPLE; |
162 | int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); | 226 | int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); |
163 | hdr->itt = build_itt(ctask->itt, session->age); | 227 | hdr->itt = build_itt(ctask->itt, session->age); |
164 | hdr->data_length = cpu_to_be32(scsi_bufflen(sc)); | ||
165 | hdr->cmdsn = cpu_to_be32(session->cmdsn); | 228 | hdr->cmdsn = cpu_to_be32(session->cmdsn); |
166 | session->cmdsn++; | 229 | session->cmdsn++; |
167 | hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); | 230 | hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); |
168 | memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); | 231 | cmd_len = sc->cmd_len; |
169 | if (sc->cmd_len < MAX_COMMAND_SIZE) | 232 | if (cmd_len < ISCSI_CDB_SIZE) |
170 | memset(&hdr->cdb[sc->cmd_len], 0, | 233 | memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); |
171 | MAX_COMMAND_SIZE - sc->cmd_len); | 234 | else if (cmd_len > ISCSI_CDB_SIZE) { |
235 | rc = iscsi_prep_ecdb_ahs(ctask); | ||
236 | if (rc) | ||
237 | return rc; | ||
238 | cmd_len = ISCSI_CDB_SIZE; | ||
239 | } | ||
240 | memcpy(hdr->cdb, sc->cmnd, cmd_len); | ||
172 | 241 | ||
173 | ctask->imm_count = 0; | 242 | ctask->imm_count = 0; |
243 | if (scsi_bidi_cmnd(sc)) { | ||
244 | hdr->flags |= ISCSI_FLAG_CMD_READ; | ||
245 | rc = iscsi_prep_bidi_ahs(ctask); | ||
246 | if (rc) | ||
247 | return rc; | ||
248 | } | ||
174 | if (sc->sc_data_direction == DMA_TO_DEVICE) { | 249 | if (sc->sc_data_direction == DMA_TO_DEVICE) { |
250 | unsigned out_len = scsi_out(sc)->length; | ||
251 | hdr->data_length = cpu_to_be32(out_len); | ||
175 | hdr->flags |= ISCSI_FLAG_CMD_WRITE; | 252 | hdr->flags |= ISCSI_FLAG_CMD_WRITE; |
176 | /* | 253 | /* |
177 | * Write counters: | 254 | * Write counters: |
@@ -192,19 +269,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
192 | ctask->unsol_datasn = 0; | 269 | ctask->unsol_datasn = 0; |
193 | 270 | ||
194 | if (session->imm_data_en) { | 271 | if (session->imm_data_en) { |
195 | if (scsi_bufflen(sc) >= session->first_burst) | 272 | if (out_len >= session->first_burst) |
196 | ctask->imm_count = min(session->first_burst, | 273 | ctask->imm_count = min(session->first_burst, |
197 | conn->max_xmit_dlength); | 274 | conn->max_xmit_dlength); |
198 | else | 275 | else |
199 | ctask->imm_count = min(scsi_bufflen(sc), | 276 | ctask->imm_count = min(out_len, |
200 | conn->max_xmit_dlength); | 277 | conn->max_xmit_dlength); |
201 | hton24(hdr->dlength, ctask->imm_count); | 278 | hton24(hdr->dlength, ctask->imm_count); |
202 | } else | 279 | } else |
203 | zero_data(hdr->dlength); | 280 | zero_data(hdr->dlength); |
204 | 281 | ||
205 | if (!session->initial_r2t_en) { | 282 | if (!session->initial_r2t_en) { |
206 | ctask->unsol_count = min((session->first_burst), | 283 | ctask->unsol_count = min(session->first_burst, out_len) |
207 | (scsi_bufflen(sc))) - ctask->imm_count; | 284 | - ctask->imm_count; |
208 | ctask->unsol_offset = ctask->imm_count; | 285 | ctask->unsol_offset = ctask->imm_count; |
209 | } | 286 | } |
210 | 287 | ||
@@ -214,6 +291,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
214 | } else { | 291 | } else { |
215 | hdr->flags |= ISCSI_FLAG_CMD_FINAL; | 292 | hdr->flags |= ISCSI_FLAG_CMD_FINAL; |
216 | zero_data(hdr->dlength); | 293 | zero_data(hdr->dlength); |
294 | hdr->data_length = cpu_to_be32(scsi_in(sc)->length); | ||
217 | 295 | ||
218 | if (sc->sc_data_direction == DMA_FROM_DEVICE) | 296 | if (sc->sc_data_direction == DMA_FROM_DEVICE) |
219 | hdr->flags |= ISCSI_FLAG_CMD_READ; | 297 | hdr->flags |= ISCSI_FLAG_CMD_READ; |
@@ -232,10 +310,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
232 | return EIO; | 310 | return EIO; |
233 | 311 | ||
234 | conn->scsicmd_pdus_cnt++; | 312 | conn->scsicmd_pdus_cnt++; |
235 | debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d " | 313 | debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x " |
236 | "cmdsn %d win %d]\n", | 314 | "len %d bidi_len %d cmdsn %d win %d]\n", |
237 | sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", | 315 | scsi_bidi_cmnd(sc) ? "bidirectional" : |
238 | conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc), | 316 | sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", |
317 | conn->id, sc, sc->cmnd[0], ctask->itt, | ||
318 | scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, | ||
239 | session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); | 319 | session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); |
240 | return 0; | 320 | return 0; |
241 | } | 321 | } |
@@ -298,7 +378,12 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
298 | conn->session->tt->cleanup_cmd_task(conn, ctask); | 378 | conn->session->tt->cleanup_cmd_task(conn, ctask); |
299 | 379 | ||
300 | sc->result = err; | 380 | sc->result = err; |
301 | scsi_set_resid(sc, scsi_bufflen(sc)); | 381 | if (!scsi_bidi_cmnd(sc)) |
382 | scsi_set_resid(sc, scsi_bufflen(sc)); | ||
383 | else { | ||
384 | scsi_out(sc)->resid = scsi_out(sc)->length; | ||
385 | scsi_in(sc)->resid = scsi_in(sc)->length; | ||
386 | } | ||
302 | if (conn->ctask == ctask) | 387 | if (conn->ctask == ctask) |
303 | conn->ctask = NULL; | 388 | conn->ctask = NULL; |
304 | /* release ref from queuecommand */ | 389 | /* release ref from queuecommand */ |
@@ -433,6 +518,18 @@ invalid_datalen: | |||
433 | min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); | 518 | min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); |
434 | } | 519 | } |
435 | 520 | ||
521 | if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | | ||
522 | ISCSI_FLAG_CMD_BIDI_OVERFLOW)) { | ||
523 | int res_count = be32_to_cpu(rhdr->bi_residual_count); | ||
524 | |||
525 | if (scsi_bidi_cmnd(sc) && res_count > 0 && | ||
526 | (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW || | ||
527 | res_count <= scsi_in(sc)->length)) | ||
528 | scsi_in(sc)->resid = res_count; | ||
529 | else | ||
530 | sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; | ||
531 | } | ||
532 | |||
436 | if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | | 533 | if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | |
437 | ISCSI_FLAG_CMD_OVERFLOW)) { | 534 | ISCSI_FLAG_CMD_OVERFLOW)) { |
438 | int res_count = be32_to_cpu(rhdr->residual_count); | 535 | int res_count = be32_to_cpu(rhdr->residual_count); |
@@ -440,13 +537,11 @@ invalid_datalen: | |||
440 | if (res_count > 0 && | 537 | if (res_count > 0 && |
441 | (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || | 538 | (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || |
442 | res_count <= scsi_bufflen(sc))) | 539 | res_count <= scsi_bufflen(sc))) |
540 | /* write side for bidi or uni-io set_resid */ | ||
443 | scsi_set_resid(sc, res_count); | 541 | scsi_set_resid(sc, res_count); |
444 | else | 542 | else |
445 | sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; | 543 | sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; |
446 | } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | | 544 | } |
447 | ISCSI_FLAG_CMD_BIDI_OVERFLOW)) | ||
448 | sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; | ||
449 | |||
450 | out: | 545 | out: |
451 | debug_scsi("done [sc %lx res %d itt 0x%x]\n", | 546 | debug_scsi("done [sc %lx res %d itt 0x%x]\n", |
452 | (long)sc, sc->result, ctask->itt); | 547 | (long)sc, sc->result, ctask->itt); |
@@ -1102,7 +1197,12 @@ reject: | |||
1102 | fault: | 1197 | fault: |
1103 | spin_unlock(&session->lock); | 1198 | spin_unlock(&session->lock); |
1104 | debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); | 1199 | debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); |
1105 | scsi_set_resid(sc, scsi_bufflen(sc)); | 1200 | if (!scsi_bidi_cmnd(sc)) |
1201 | scsi_set_resid(sc, scsi_bufflen(sc)); | ||
1202 | else { | ||
1203 | scsi_out(sc)->resid = scsi_out(sc)->length; | ||
1204 | scsi_in(sc)->resid = scsi_in(sc)->length; | ||
1205 | } | ||
1106 | sc->scsi_done(sc); | 1206 | sc->scsi_done(sc); |
1107 | spin_lock(host->host_lock); | 1207 | spin_lock(host->host_lock); |
1108 | return 0; | 1208 | return 0; |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index a4811e4106df..744f06d04a36 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -691,7 +691,7 @@ static int sas_discover_sata_dev(struct domain_device *dev) | |||
691 | /* incomplete response */ | 691 | /* incomplete response */ |
692 | SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to " | 692 | SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to " |
693 | "dev %llx\n", SAS_ADDR(dev->sas_addr)); | 693 | "dev %llx\n", SAS_ADDR(dev->sas_addr)); |
694 | if (!le16_to_cpu(identify_x[83] & (1<<6))) | 694 | if (!(identify_x[83] & cpu_to_le16(1<<6))) |
695 | goto cont1; | 695 | goto cont1; |
696 | res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES, | 696 | res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES, |
697 | ATA_FEATURE_PUP_STBY_SPIN_UP, | 697 | ATA_FEATURE_PUP_STBY_SPIN_UP, |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 1f8241563c6c..601ec5b6a7f6 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -24,6 +24,8 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/kthread.h> | 26 | #include <linux/kthread.h> |
27 | #include <linux/firmware.h> | ||
28 | #include <linux/ctype.h> | ||
27 | 29 | ||
28 | #include "sas_internal.h" | 30 | #include "sas_internal.h" |
29 | 31 | ||
@@ -1064,6 +1066,45 @@ void sas_target_destroy(struct scsi_target *starget) | |||
1064 | return; | 1066 | return; |
1065 | } | 1067 | } |
1066 | 1068 | ||
1069 | static void sas_parse_addr(u8 *sas_addr, const char *p) | ||
1070 | { | ||
1071 | int i; | ||
1072 | for (i = 0; i < SAS_ADDR_SIZE; i++) { | ||
1073 | u8 h, l; | ||
1074 | if (!*p) | ||
1075 | break; | ||
1076 | h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10; | ||
1077 | p++; | ||
1078 | l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10; | ||
1079 | p++; | ||
1080 | sas_addr[i] = (h<<4) | l; | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | #define SAS_STRING_ADDR_SIZE 16 | ||
1085 | |||
1086 | int sas_request_addr(struct Scsi_Host *shost, u8 *addr) | ||
1087 | { | ||
1088 | int res; | ||
1089 | const struct firmware *fw; | ||
1090 | |||
1091 | res = request_firmware(&fw, "sas_addr", &shost->shost_gendev); | ||
1092 | if (res) | ||
1093 | return res; | ||
1094 | |||
1095 | if (fw->size < SAS_STRING_ADDR_SIZE) { | ||
1096 | res = -ENODEV; | ||
1097 | goto out; | ||
1098 | } | ||
1099 | |||
1100 | sas_parse_addr(addr, fw->data); | ||
1101 | |||
1102 | out: | ||
1103 | release_firmware(fw); | ||
1104 | return res; | ||
1105 | } | ||
1106 | EXPORT_SYMBOL_GPL(sas_request_addr); | ||
1107 | |||
1067 | EXPORT_SYMBOL_GPL(sas_queuecommand); | 1108 | EXPORT_SYMBOL_GPL(sas_queuecommand); |
1068 | EXPORT_SYMBOL_GPL(sas_target_alloc); | 1109 | EXPORT_SYMBOL_GPL(sas_target_alloc); |
1069 | EXPORT_SYMBOL_GPL(sas_slave_configure); | 1110 | EXPORT_SYMBOL_GPL(sas_slave_configure); |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 2ab2d24dcc15..ec0b0f6e5e1a 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | struct lpfc_sli2_slim; | 24 | struct lpfc_sli2_slim; |
25 | 25 | ||
26 | #define LPFC_MAX_TARGET 256 /* max number of targets supported */ | 26 | #define LPFC_MAX_TARGET 4096 /* max number of targets supported */ |
27 | #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els | 27 | #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els |
28 | requests */ | 28 | requests */ |
29 | #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact | 29 | #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact |
@@ -268,7 +268,6 @@ struct lpfc_vport { | |||
268 | #define FC_NLP_MORE 0x40 /* More node to process in node tbl */ | 268 | #define FC_NLP_MORE 0x40 /* More node to process in node tbl */ |
269 | #define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ | 269 | #define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ |
270 | #define FC_FABRIC 0x100 /* We are fabric attached */ | 270 | #define FC_FABRIC 0x100 /* We are fabric attached */ |
271 | #define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */ | ||
272 | #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ | 271 | #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ |
273 | #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ | 272 | #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ |
274 | #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ | 273 | #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ |
@@ -433,8 +432,6 @@ struct lpfc_hba { | |||
433 | 432 | ||
434 | uint32_t fc_eventTag; /* event tag for link attention */ | 433 | uint32_t fc_eventTag; /* event tag for link attention */ |
435 | 434 | ||
436 | |||
437 | struct timer_list fc_estabtmo; /* link establishment timer */ | ||
438 | /* These fields used to be binfo */ | 435 | /* These fields used to be binfo */ |
439 | uint32_t fc_pref_DID; /* preferred D_ID */ | 436 | uint32_t fc_pref_DID; /* preferred D_ID */ |
440 | uint8_t fc_pref_ALPA; /* preferred AL_PA */ | 437 | uint8_t fc_pref_ALPA; /* preferred AL_PA */ |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index b12a841703ca..74c9fc204211 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -1954,7 +1954,9 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
1954 | (phba->sysfs_mbox.mbox->mb.mbxCommand != | 1954 | (phba->sysfs_mbox.mbox->mb.mbxCommand != |
1955 | MBX_DUMP_MEMORY && | 1955 | MBX_DUMP_MEMORY && |
1956 | phba->sysfs_mbox.mbox->mb.mbxCommand != | 1956 | phba->sysfs_mbox.mbox->mb.mbxCommand != |
1957 | MBX_RESTART)) { | 1957 | MBX_RESTART && |
1958 | phba->sysfs_mbox.mbox->mb.mbxCommand != | ||
1959 | MBX_WRITE_VPARMS)) { | ||
1958 | sysfs_mbox_idle(phba); | 1960 | sysfs_mbox_idle(phba); |
1959 | spin_unlock_irq(&phba->hbalock); | 1961 | spin_unlock_irq(&phba->hbalock); |
1960 | return -EPERM; | 1962 | return -EPERM; |
@@ -1962,7 +1964,11 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
1962 | 1964 | ||
1963 | phba->sysfs_mbox.mbox->vport = vport; | 1965 | phba->sysfs_mbox.mbox->vport = vport; |
1964 | 1966 | ||
1965 | if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { | 1967 | /* Don't allow mailbox commands to be sent when blocked |
1968 | * or when in the middle of discovery | ||
1969 | */ | ||
1970 | if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO || | ||
1971 | vport->fc_flag & FC_NDISC_ACTIVE) { | ||
1966 | sysfs_mbox_idle(phba); | 1972 | sysfs_mbox_idle(phba); |
1967 | spin_unlock_irq(&phba->hbalock); | 1973 | spin_unlock_irq(&phba->hbalock); |
1968 | return -EAGAIN; | 1974 | return -EAGAIN; |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 3d0ccd9b341d..153afae567b5 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -63,7 +63,7 @@ lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
63 | { | 63 | { |
64 | if (!mp) { | 64 | if (!mp) { |
65 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 65 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
66 | "0146 Ignoring unsolicted CT No HBQ " | 66 | "0146 Ignoring unsolicited CT No HBQ " |
67 | "status = x%x\n", | 67 | "status = x%x\n", |
68 | piocbq->iocb.ulpStatus); | 68 | piocbq->iocb.ulpStatus); |
69 | } | 69 | } |
@@ -438,7 +438,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) | |||
438 | (!(vport->ct_flags & FC_CT_RFF_ID)) || | 438 | (!(vport->ct_flags & FC_CT_RFF_ID)) || |
439 | (!vport->cfg_restrict_login)) { | 439 | (!vport->cfg_restrict_login)) { |
440 | ndlp = lpfc_setup_disc_node(vport, Did); | 440 | ndlp = lpfc_setup_disc_node(vport, Did); |
441 | if (ndlp) { | 441 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { |
442 | lpfc_debugfs_disc_trc(vport, | 442 | lpfc_debugfs_disc_trc(vport, |
443 | LPFC_DISC_TRC_CT, | 443 | LPFC_DISC_TRC_CT, |
444 | "Parse GID_FTrsp: " | 444 | "Parse GID_FTrsp: " |
@@ -543,7 +543,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
543 | struct lpfc_dmabuf *outp; | 543 | struct lpfc_dmabuf *outp; |
544 | struct lpfc_sli_ct_request *CTrsp; | 544 | struct lpfc_sli_ct_request *CTrsp; |
545 | struct lpfc_nodelist *ndlp; | 545 | struct lpfc_nodelist *ndlp; |
546 | int rc, retry; | 546 | int rc; |
547 | 547 | ||
548 | /* First save ndlp, before we overwrite it */ | 548 | /* First save ndlp, before we overwrite it */ |
549 | ndlp = cmdiocb->context_un.ndlp; | 549 | ndlp = cmdiocb->context_un.ndlp; |
@@ -563,45 +563,29 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
563 | if (vport->load_flag & FC_UNLOADING) | 563 | if (vport->load_flag & FC_UNLOADING) |
564 | goto out; | 564 | goto out; |
565 | 565 | ||
566 | if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) { | 566 | if (lpfc_els_chk_latt(vport)) { |
567 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 567 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
568 | "0216 Link event during NS query\n"); | 568 | "0216 Link event during NS query\n"); |
569 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 569 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
570 | goto out; | 570 | goto out; |
571 | } | 571 | } |
572 | 572 | if (lpfc_error_lost_link(irsp)) { | |
573 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | ||
574 | "0226 NS query failed due to link event\n"); | ||
575 | goto out; | ||
576 | } | ||
573 | if (irsp->ulpStatus) { | 577 | if (irsp->ulpStatus) { |
574 | /* Check for retry */ | 578 | /* Check for retry */ |
575 | if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { | 579 | if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { |
576 | retry = 1; | 580 | if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || |
577 | if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | 581 | irsp->un.ulpWord[4] != IOERR_NO_RESOURCES) |
578 | switch (irsp->un.ulpWord[4]) { | ||
579 | case IOERR_NO_RESOURCES: | ||
580 | /* We don't increment the retry | ||
581 | * count for this case. | ||
582 | */ | ||
583 | break; | ||
584 | case IOERR_LINK_DOWN: | ||
585 | case IOERR_SLI_ABORTED: | ||
586 | case IOERR_SLI_DOWN: | ||
587 | retry = 0; | ||
588 | break; | ||
589 | default: | ||
590 | vport->fc_ns_retry++; | ||
591 | } | ||
592 | } | ||
593 | else | ||
594 | vport->fc_ns_retry++; | 582 | vport->fc_ns_retry++; |
595 | 583 | ||
596 | if (retry) { | 584 | /* CT command is being retried */ |
597 | /* CT command is being retried */ | 585 | rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, |
598 | rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, | ||
599 | vport->fc_ns_retry, 0); | 586 | vport->fc_ns_retry, 0); |
600 | if (rc == 0) { | 587 | if (rc == 0) |
601 | /* success */ | 588 | goto out; |
602 | goto out; | ||
603 | } | ||
604 | } | ||
605 | } | 589 | } |
606 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 590 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
607 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 591 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
@@ -780,7 +764,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
780 | 764 | ||
781 | /* This is a target port, unregistered port, or the GFF_ID failed */ | 765 | /* This is a target port, unregistered port, or the GFF_ID failed */ |
782 | ndlp = lpfc_setup_disc_node(vport, did); | 766 | ndlp = lpfc_setup_disc_node(vport, did); |
783 | if (ndlp) { | 767 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { |
784 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 768 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
785 | "0242 Process x%x GFF " | 769 | "0242 Process x%x GFF " |
786 | "NameServer Rsp Data: x%x x%x x%x\n", | 770 | "NameServer Rsp Data: x%x x%x x%x\n", |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 783d1eea13ef..90272e65957a 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -503,6 +503,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | |||
503 | ndlp->nlp_sid); | 503 | ndlp->nlp_sid); |
504 | if (ndlp->nlp_type & NLP_FCP_INITIATOR) | 504 | if (ndlp->nlp_type & NLP_FCP_INITIATOR) |
505 | len += snprintf(buf+len, size-len, "FCP_INITIATOR "); | 505 | len += snprintf(buf+len, size-len, "FCP_INITIATOR "); |
506 | len += snprintf(buf+len, size-len, "usgmap:%x ", | ||
507 | ndlp->nlp_usg_map); | ||
506 | len += snprintf(buf+len, size-len, "refcnt:%x", | 508 | len += snprintf(buf+len, size-len, "refcnt:%x", |
507 | atomic_read(&ndlp->kref.refcount)); | 509 | atomic_read(&ndlp->kref.refcount)); |
508 | len += snprintf(buf+len, size-len, "\n"); | 510 | len += snprintf(buf+len, size-len, "\n"); |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index cbb68a942255..886c5f1b11d2 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -719,9 +719,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) | |||
719 | if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && | 719 | if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && |
720 | icmd->un.elsreq64.bdl.ulpIoTag32) { | 720 | icmd->un.elsreq64.bdl.ulpIoTag32) { |
721 | ndlp = (struct lpfc_nodelist *)(iocb->context1); | 721 | ndlp = (struct lpfc_nodelist *)(iocb->context1); |
722 | if (ndlp && (ndlp->nlp_DID == Fabric_DID)) { | 722 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && |
723 | (ndlp->nlp_DID == Fabric_DID)) | ||
723 | lpfc_sli_issue_abort_iotag(phba, pring, iocb); | 724 | lpfc_sli_issue_abort_iotag(phba, pring, iocb); |
724 | } | ||
725 | } | 725 | } |
726 | } | 726 | } |
727 | spin_unlock_irq(&phba->hbalock); | 727 | spin_unlock_irq(&phba->hbalock); |
@@ -829,7 +829,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
829 | struct fc_rport *rport; | 829 | struct fc_rport *rport; |
830 | struct serv_parm *sp; | 830 | struct serv_parm *sp; |
831 | uint8_t name[sizeof(struct lpfc_name)]; | 831 | uint8_t name[sizeof(struct lpfc_name)]; |
832 | uint32_t rc; | 832 | uint32_t rc, keepDID = 0; |
833 | 833 | ||
834 | /* Fabric nodes can have the same WWPN so we don't bother searching | 834 | /* Fabric nodes can have the same WWPN so we don't bother searching |
835 | * by WWPN. Just return the ndlp that was given to us. | 835 | * by WWPN. Just return the ndlp that was given to us. |
@@ -858,11 +858,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
858 | return ndlp; | 858 | return ndlp; |
859 | lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); | 859 | lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); |
860 | } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { | 860 | } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { |
861 | rc = memcmp(&ndlp->nlp_portname, name, | ||
862 | sizeof(struct lpfc_name)); | ||
863 | if (!rc) | ||
864 | return ndlp; | ||
861 | new_ndlp = lpfc_enable_node(vport, new_ndlp, | 865 | new_ndlp = lpfc_enable_node(vport, new_ndlp, |
862 | NLP_STE_UNUSED_NODE); | 866 | NLP_STE_UNUSED_NODE); |
863 | if (!new_ndlp) | 867 | if (!new_ndlp) |
864 | return ndlp; | 868 | return ndlp; |
865 | } | 869 | keepDID = new_ndlp->nlp_DID; |
870 | } else | ||
871 | keepDID = new_ndlp->nlp_DID; | ||
866 | 872 | ||
867 | lpfc_unreg_rpi(vport, new_ndlp); | 873 | lpfc_unreg_rpi(vport, new_ndlp); |
868 | new_ndlp->nlp_DID = ndlp->nlp_DID; | 874 | new_ndlp->nlp_DID = ndlp->nlp_DID; |
@@ -893,12 +899,24 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
893 | } | 899 | } |
894 | new_ndlp->nlp_type = ndlp->nlp_type; | 900 | new_ndlp->nlp_type = ndlp->nlp_type; |
895 | } | 901 | } |
902 | /* We shall actually free the ndlp with both nlp_DID and | ||
903 | * nlp_portname fields equals 0 to avoid any ndlp on the | ||
904 | * nodelist never to be used. | ||
905 | */ | ||
906 | if (ndlp->nlp_DID == 0) { | ||
907 | spin_lock_irq(&phba->ndlp_lock); | ||
908 | NLP_SET_FREE_REQ(ndlp); | ||
909 | spin_unlock_irq(&phba->ndlp_lock); | ||
910 | } | ||
896 | 911 | ||
912 | /* Two ndlps cannot have the same did on the nodelist */ | ||
913 | ndlp->nlp_DID = keepDID; | ||
897 | lpfc_drop_node(vport, ndlp); | 914 | lpfc_drop_node(vport, ndlp); |
898 | } | 915 | } |
899 | else { | 916 | else { |
900 | lpfc_unreg_rpi(vport, ndlp); | 917 | lpfc_unreg_rpi(vport, ndlp); |
901 | ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ | 918 | /* Two ndlps cannot have the same did */ |
919 | ndlp->nlp_DID = keepDID; | ||
902 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 920 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
903 | } | 921 | } |
904 | return new_ndlp; | 922 | return new_ndlp; |
@@ -2091,7 +2109,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2091 | } | 2109 | } |
2092 | 2110 | ||
2093 | phba->fc_stat.elsXmitRetry++; | 2111 | phba->fc_stat.elsXmitRetry++; |
2094 | if (ndlp && delay) { | 2112 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) { |
2095 | phba->fc_stat.elsDelayRetry++; | 2113 | phba->fc_stat.elsDelayRetry++; |
2096 | ndlp->nlp_retry = cmdiocb->retry; | 2114 | ndlp->nlp_retry = cmdiocb->retry; |
2097 | 2115 | ||
@@ -2121,7 +2139,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2121 | lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); | 2139 | lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); |
2122 | return 1; | 2140 | return 1; |
2123 | case ELS_CMD_PLOGI: | 2141 | case ELS_CMD_PLOGI: |
2124 | if (ndlp) { | 2142 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { |
2125 | ndlp->nlp_prev_state = ndlp->nlp_state; | 2143 | ndlp->nlp_prev_state = ndlp->nlp_state; |
2126 | lpfc_nlp_set_state(vport, ndlp, | 2144 | lpfc_nlp_set_state(vport, ndlp, |
2127 | NLP_STE_PLOGI_ISSUE); | 2145 | NLP_STE_PLOGI_ISSUE); |
@@ -2302,7 +2320,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2302 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 2320 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
2303 | kfree(mp); | 2321 | kfree(mp); |
2304 | mempool_free(pmb, phba->mbox_mem_pool); | 2322 | mempool_free(pmb, phba->mbox_mem_pool); |
2305 | if (ndlp) { | 2323 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { |
2306 | lpfc_nlp_put(ndlp); | 2324 | lpfc_nlp_put(ndlp); |
2307 | /* This is the end of the default RPI cleanup logic for this | 2325 | /* This is the end of the default RPI cleanup logic for this |
2308 | * ndlp. If no other discovery threads are using this ndlp. | 2326 | * ndlp. If no other discovery threads are using this ndlp. |
@@ -2335,7 +2353,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2335 | * function can have cmdiocb->contest1 (ndlp) field set to NULL. | 2353 | * function can have cmdiocb->contest1 (ndlp) field set to NULL. |
2336 | */ | 2354 | */ |
2337 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); | 2355 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); |
2338 | if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { | 2356 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && |
2357 | (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { | ||
2339 | /* A LS_RJT associated with Default RPI cleanup has its own | 2358 | /* A LS_RJT associated with Default RPI cleanup has its own |
2340 | * seperate code path. | 2359 | * seperate code path. |
2341 | */ | 2360 | */ |
@@ -2344,7 +2363,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2344 | } | 2363 | } |
2345 | 2364 | ||
2346 | /* Check to see if link went down during discovery */ | 2365 | /* Check to see if link went down during discovery */ |
2347 | if (!ndlp || lpfc_els_chk_latt(vport)) { | 2366 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { |
2348 | if (mbox) { | 2367 | if (mbox) { |
2349 | mp = (struct lpfc_dmabuf *) mbox->context1; | 2368 | mp = (struct lpfc_dmabuf *) mbox->context1; |
2350 | if (mp) { | 2369 | if (mp) { |
@@ -2353,7 +2372,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2353 | } | 2372 | } |
2354 | mempool_free(mbox, phba->mbox_mem_pool); | 2373 | mempool_free(mbox, phba->mbox_mem_pool); |
2355 | } | 2374 | } |
2356 | if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) | 2375 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && |
2376 | (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) | ||
2357 | if (lpfc_nlp_not_used(ndlp)) { | 2377 | if (lpfc_nlp_not_used(ndlp)) { |
2358 | ndlp = NULL; | 2378 | ndlp = NULL; |
2359 | /* Indicate the node has already released, | 2379 | /* Indicate the node has already released, |
@@ -2443,7 +2463,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2443 | mempool_free(mbox, phba->mbox_mem_pool); | 2463 | mempool_free(mbox, phba->mbox_mem_pool); |
2444 | } | 2464 | } |
2445 | out: | 2465 | out: |
2446 | if (ndlp) { | 2466 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { |
2447 | spin_lock_irq(shost->host_lock); | 2467 | spin_lock_irq(shost->host_lock); |
2448 | ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); | 2468 | ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); |
2449 | spin_unlock_irq(shost->host_lock); | 2469 | spin_unlock_irq(shost->host_lock); |
@@ -3139,6 +3159,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3139 | /* Another thread is walking fc_rscn_id_list on this vport */ | 3159 | /* Another thread is walking fc_rscn_id_list on this vport */ |
3140 | spin_unlock_irq(shost->host_lock); | 3160 | spin_unlock_irq(shost->host_lock); |
3141 | vport->fc_flag |= FC_RSCN_DISCOVERY; | 3161 | vport->fc_flag |= FC_RSCN_DISCOVERY; |
3162 | /* Send back ACC */ | ||
3163 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | ||
3142 | return 0; | 3164 | return 0; |
3143 | } | 3165 | } |
3144 | /* Indicate we are walking fc_rscn_id_list on this vport */ | 3166 | /* Indicate we are walking fc_rscn_id_list on this vport */ |
@@ -3928,7 +3950,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) | |||
3928 | else { | 3950 | else { |
3929 | struct lpfc_nodelist *ndlp; | 3951 | struct lpfc_nodelist *ndlp; |
3930 | ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); | 3952 | ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); |
3931 | if (ndlp) | 3953 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) |
3932 | remote_ID = ndlp->nlp_DID; | 3954 | remote_ID = ndlp->nlp_DID; |
3933 | } | 3955 | } |
3934 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 3956 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
@@ -4097,21 +4119,22 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4097 | newnode = 1; | 4119 | newnode = 1; |
4098 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) | 4120 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) |
4099 | ndlp->nlp_type |= NLP_FABRIC; | 4121 | ndlp->nlp_type |= NLP_FABRIC; |
4100 | } else { | 4122 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
4101 | if (!NLP_CHK_NODE_ACT(ndlp)) { | 4123 | ndlp = lpfc_enable_node(vport, ndlp, |
4102 | ndlp = lpfc_enable_node(vport, ndlp, | 4124 | NLP_STE_UNUSED_NODE); |
4103 | NLP_STE_UNUSED_NODE); | 4125 | if (!ndlp) |
4104 | if (!ndlp) | 4126 | goto dropit; |
4105 | goto dropit; | 4127 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
4106 | } | 4128 | newnode = 1; |
4107 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { | 4129 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) |
4108 | /* This is simular to the new node path */ | 4130 | ndlp->nlp_type |= NLP_FABRIC; |
4109 | ndlp = lpfc_nlp_get(ndlp); | 4131 | } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { |
4110 | if (!ndlp) | 4132 | /* This is similar to the new node path */ |
4111 | goto dropit; | 4133 | ndlp = lpfc_nlp_get(ndlp); |
4112 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 4134 | if (!ndlp) |
4113 | newnode = 1; | 4135 | goto dropit; |
4114 | } | 4136 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
4137 | newnode = 1; | ||
4115 | } | 4138 | } |
4116 | 4139 | ||
4117 | phba->fc_stat.elsRcvFrame++; | 4140 | phba->fc_stat.elsRcvFrame++; |
@@ -4451,7 +4474,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
4451 | return; | 4474 | return; |
4452 | } | 4475 | } |
4453 | lpfc_nlp_init(vport, ndlp, NameServer_DID); | 4476 | lpfc_nlp_init(vport, ndlp, NameServer_DID); |
4454 | ndlp->nlp_type |= NLP_FABRIC; | ||
4455 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 4477 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
4456 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | 4478 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
4457 | if (!ndlp) { | 4479 | if (!ndlp) { |
@@ -4465,6 +4487,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
4465 | return; | 4487 | return; |
4466 | } | 4488 | } |
4467 | } | 4489 | } |
4490 | ndlp->nlp_type |= NLP_FABRIC; | ||
4468 | 4491 | ||
4469 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); | 4492 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); |
4470 | 4493 | ||
@@ -4481,8 +4504,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
4481 | if (ndlp_fdmi) { | 4504 | if (ndlp_fdmi) { |
4482 | lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); | 4505 | lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); |
4483 | ndlp_fdmi->nlp_type |= NLP_FABRIC; | 4506 | ndlp_fdmi->nlp_type |= NLP_FABRIC; |
4484 | ndlp_fdmi->nlp_state = | 4507 | lpfc_nlp_set_state(vport, ndlp_fdmi, |
4485 | NLP_STE_PLOGI_ISSUE; | 4508 | NLP_STE_PLOGI_ISSUE); |
4486 | lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, | 4509 | lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, |
4487 | 0); | 4510 | 0); |
4488 | } | 4511 | } |
@@ -5074,39 +5097,3 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) | |||
5074 | (piocb->iocb_cmpl) (phba, piocb, piocb); | 5097 | (piocb->iocb_cmpl) (phba, piocb, piocb); |
5075 | } | 5098 | } |
5076 | } | 5099 | } |
5077 | |||
5078 | |||
5079 | #if 0 | ||
5080 | void lpfc_fabric_abort_flogi(struct lpfc_hba *phba) | ||
5081 | { | ||
5082 | LIST_HEAD(completions); | ||
5083 | struct lpfc_iocbq *tmp_iocb, *piocb; | ||
5084 | IOCB_t *cmd; | ||
5085 | struct lpfc_nodelist *ndlp; | ||
5086 | |||
5087 | spin_lock_irq(&phba->hbalock); | ||
5088 | list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, | ||
5089 | list) { | ||
5090 | |||
5091 | cmd = &piocb->iocb; | ||
5092 | ndlp = (struct lpfc_nodelist *) piocb->context1; | ||
5093 | if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR && | ||
5094 | ndlp != NULL && | ||
5095 | ndlp->nlp_DID == Fabric_DID) | ||
5096 | list_move_tail(&piocb->list, &completions); | ||
5097 | } | ||
5098 | spin_unlock_irq(&phba->hbalock); | ||
5099 | |||
5100 | while (!list_empty(&completions)) { | ||
5101 | piocb = list_get_first(&completions, struct lpfc_iocbq, list); | ||
5102 | list_del_init(&piocb->list); | ||
5103 | |||
5104 | cmd = &piocb->iocb; | ||
5105 | cmd->ulpStatus = IOSTAT_LOCAL_REJECT; | ||
5106 | cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; | ||
5107 | (piocb->iocb_cmpl) (phba, piocb, piocb); | ||
5108 | } | ||
5109 | } | ||
5110 | #endif /* 0 */ | ||
5111 | |||
5112 | |||
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 976653440fba..7cb68feb04fd 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -69,7 +69,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) | |||
69 | rdata = rport->dd_data; | 69 | rdata = rport->dd_data; |
70 | ndlp = rdata->pnode; | 70 | ndlp = rdata->pnode; |
71 | 71 | ||
72 | if (!ndlp) { | 72 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
73 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) | 73 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) |
74 | printk(KERN_ERR "Cannot find remote node" | 74 | printk(KERN_ERR "Cannot find remote node" |
75 | " to terminate I/O Data x%x\n", | 75 | " to terminate I/O Data x%x\n", |
@@ -114,7 +114,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
114 | 114 | ||
115 | rdata = rport->dd_data; | 115 | rdata = rport->dd_data; |
116 | ndlp = rdata->pnode; | 116 | ndlp = rdata->pnode; |
117 | if (!ndlp) | 117 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) |
118 | return; | 118 | return; |
119 | 119 | ||
120 | vport = ndlp->vport; | 120 | vport = ndlp->vport; |
@@ -243,8 +243,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
243 | if (warn_on) { | 243 | if (warn_on) { |
244 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 244 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
245 | "0203 Devloss timeout on " | 245 | "0203 Devloss timeout on " |
246 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " | 246 | "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " |
247 | "NPort x%x Data: x%x x%x x%x\n", | 247 | "NPort x%06x Data: x%x x%x x%x\n", |
248 | *name, *(name+1), *(name+2), *(name+3), | 248 | *name, *(name+1), *(name+2), *(name+3), |
249 | *(name+4), *(name+5), *(name+6), *(name+7), | 249 | *(name+4), *(name+5), *(name+6), *(name+7), |
250 | ndlp->nlp_DID, ndlp->nlp_flag, | 250 | ndlp->nlp_DID, ndlp->nlp_flag, |
@@ -252,8 +252,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
252 | } else { | 252 | } else { |
253 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 253 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
254 | "0204 Devloss timeout on " | 254 | "0204 Devloss timeout on " |
255 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " | 255 | "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " |
256 | "NPort x%x Data: x%x x%x x%x\n", | 256 | "NPort x%06x Data: x%x x%x x%x\n", |
257 | *name, *(name+1), *(name+2), *(name+3), | 257 | *name, *(name+1), *(name+2), *(name+3), |
258 | *(name+4), *(name+5), *(name+6), *(name+7), | 258 | *(name+4), *(name+5), *(name+6), *(name+7), |
259 | ndlp->nlp_DID, ndlp->nlp_flag, | 259 | ndlp->nlp_DID, ndlp->nlp_flag, |
@@ -399,7 +399,10 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
399 | vport = vports[i]; | 399 | vport = vports[i]; |
400 | if (vport == NULL) | 400 | if (vport == NULL) |
401 | break; | 401 | break; |
402 | spin_lock_irq(&vport->work_port_lock); | ||
402 | work_port_events = vport->work_port_events; | 403 | work_port_events = vport->work_port_events; |
404 | vport->work_port_events &= ~work_port_events; | ||
405 | spin_unlock_irq(&vport->work_port_lock); | ||
403 | if (work_port_events & WORKER_DISC_TMO) | 406 | if (work_port_events & WORKER_DISC_TMO) |
404 | lpfc_disc_timeout_handler(vport); | 407 | lpfc_disc_timeout_handler(vport); |
405 | if (work_port_events & WORKER_ELS_TMO) | 408 | if (work_port_events & WORKER_ELS_TMO) |
@@ -416,9 +419,6 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
416 | lpfc_ramp_down_queue_handler(phba); | 419 | lpfc_ramp_down_queue_handler(phba); |
417 | if (work_port_events & WORKER_RAMP_UP_QUEUE) | 420 | if (work_port_events & WORKER_RAMP_UP_QUEUE) |
418 | lpfc_ramp_up_queue_handler(phba); | 421 | lpfc_ramp_up_queue_handler(phba); |
419 | spin_lock_irq(&vport->work_port_lock); | ||
420 | vport->work_port_events &= ~work_port_events; | ||
421 | spin_unlock_irq(&vport->work_port_lock); | ||
422 | } | 422 | } |
423 | lpfc_destroy_vport_work_array(phba, vports); | 423 | lpfc_destroy_vport_work_array(phba, vports); |
424 | 424 | ||
@@ -430,10 +430,10 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
430 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { | 430 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { |
431 | pring->flag |= LPFC_DEFERRED_RING_EVENT; | 431 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
432 | } else { | 432 | } else { |
433 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; | ||
433 | lpfc_sli_handle_slow_ring_event(phba, pring, | 434 | lpfc_sli_handle_slow_ring_event(phba, pring, |
434 | (status & | 435 | (status & |
435 | HA_RXMASK)); | 436 | HA_RXMASK)); |
436 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; | ||
437 | } | 437 | } |
438 | /* | 438 | /* |
439 | * Turn on Ring interrupts | 439 | * Turn on Ring interrupts |
@@ -519,7 +519,9 @@ lpfc_do_work(void *p) | |||
519 | schedule(); | 519 | schedule(); |
520 | } | 520 | } |
521 | } | 521 | } |
522 | spin_lock_irq(&phba->hbalock); | ||
522 | phba->work_wait = NULL; | 523 | phba->work_wait = NULL; |
524 | spin_unlock_irq(&phba->hbalock); | ||
523 | return 0; | 525 | return 0; |
524 | } | 526 | } |
525 | 527 | ||
@@ -809,11 +811,9 @@ out: | |||
809 | mempool_free(pmb, phba->mbox_mem_pool); | 811 | mempool_free(pmb, phba->mbox_mem_pool); |
810 | 812 | ||
811 | spin_lock_irq(shost->host_lock); | 813 | spin_lock_irq(shost->host_lock); |
812 | vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK); | 814 | vport->fc_flag &= ~FC_ABORT_DISCOVERY; |
813 | spin_unlock_irq(shost->host_lock); | 815 | spin_unlock_irq(shost->host_lock); |
814 | 816 | ||
815 | del_timer_sync(&phba->fc_estabtmo); | ||
816 | |||
817 | lpfc_can_disctmo(vport); | 817 | lpfc_can_disctmo(vport); |
818 | 818 | ||
819 | /* turn on Link Attention interrupts */ | 819 | /* turn on Link Attention interrupts */ |
@@ -1340,10 +1340,14 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1340 | i++) { | 1340 | i++) { |
1341 | if (vports[i]->port_type == LPFC_PHYSICAL_PORT) | 1341 | if (vports[i]->port_type == LPFC_PHYSICAL_PORT) |
1342 | continue; | 1342 | continue; |
1343 | if (phba->fc_topology == TOPOLOGY_LOOP) { | ||
1344 | lpfc_vport_set_state(vports[i], | ||
1345 | FC_VPORT_LINKDOWN); | ||
1346 | continue; | ||
1347 | } | ||
1343 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) | 1348 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
1344 | lpfc_initial_fdisc(vports[i]); | 1349 | lpfc_initial_fdisc(vports[i]); |
1345 | else if (phba->sli3_options & | 1350 | else { |
1346 | LPFC_SLI3_NPIV_ENABLED) { | ||
1347 | lpfc_vport_set_state(vports[i], | 1351 | lpfc_vport_set_state(vports[i], |
1348 | FC_VPORT_NO_FABRIC_SUPP); | 1352 | FC_VPORT_NO_FABRIC_SUPP); |
1349 | lpfc_printf_vlog(vport, KERN_ERR, | 1353 | lpfc_printf_vlog(vport, KERN_ERR, |
@@ -2190,10 +2194,6 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2190 | if (did == Bcast_DID) | 2194 | if (did == Bcast_DID) |
2191 | return 0; | 2195 | return 0; |
2192 | 2196 | ||
2193 | if (ndlp->nlp_DID == 0) { | ||
2194 | return 0; | ||
2195 | } | ||
2196 | |||
2197 | /* First check for Direct match */ | 2197 | /* First check for Direct match */ |
2198 | if (ndlp->nlp_DID == did) | 2198 | if (ndlp->nlp_DID == did) |
2199 | return 1; | 2199 | return 1; |
@@ -2301,7 +2301,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) | |||
2301 | return ndlp; | 2301 | return ndlp; |
2302 | } | 2302 | } |
2303 | 2303 | ||
2304 | if (vport->fc_flag & FC_RSCN_MODE) { | 2304 | if ((vport->fc_flag & FC_RSCN_MODE) && |
2305 | !(vport->fc_flag & FC_NDISC_ACTIVE)) { | ||
2305 | if (lpfc_rscn_payload_check(vport, did)) { | 2306 | if (lpfc_rscn_payload_check(vport, did)) { |
2306 | /* If we've already recieved a PLOGI from this NPort | 2307 | /* If we've already recieved a PLOGI from this NPort |
2307 | * we don't need to try to discover it again. | 2308 | * we don't need to try to discover it again. |
@@ -2947,24 +2948,6 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) | |||
2947 | return NULL; | 2948 | return NULL; |
2948 | } | 2949 | } |
2949 | 2950 | ||
2950 | #if 0 | ||
2951 | /* | ||
2952 | * Search node lists for a remote port matching filter criteria | ||
2953 | * Caller needs to hold host_lock before calling this routine. | ||
2954 | */ | ||
2955 | struct lpfc_nodelist * | ||
2956 | lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) | ||
2957 | { | ||
2958 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2959 | struct lpfc_nodelist *ndlp; | ||
2960 | |||
2961 | spin_lock_irq(shost->host_lock); | ||
2962 | ndlp = __lpfc_find_node(vport, filter, param); | ||
2963 | spin_unlock_irq(shost->host_lock); | ||
2964 | return ndlp; | ||
2965 | } | ||
2966 | #endif /* 0 */ | ||
2967 | |||
2968 | /* | 2951 | /* |
2969 | * This routine looks up the ndlp lists for the given RPI. If rpi found it | 2952 | * This routine looks up the ndlp lists for the given RPI. If rpi found it |
2970 | * returns the node list element pointer else return NULL. | 2953 | * returns the node list element pointer else return NULL. |
@@ -2975,20 +2958,6 @@ __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) | |||
2975 | return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); | 2958 | return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); |
2976 | } | 2959 | } |
2977 | 2960 | ||
2978 | #if 0 | ||
2979 | struct lpfc_nodelist * | ||
2980 | lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) | ||
2981 | { | ||
2982 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2983 | struct lpfc_nodelist *ndlp; | ||
2984 | |||
2985 | spin_lock_irq(shost->host_lock); | ||
2986 | ndlp = __lpfc_findnode_rpi(vport, rpi); | ||
2987 | spin_unlock_irq(shost->host_lock); | ||
2988 | return ndlp; | ||
2989 | } | ||
2990 | #endif /* 0 */ | ||
2991 | |||
2992 | /* | 2961 | /* |
2993 | * This routine looks up the ndlp lists for the given WWPN. If WWPN found it | 2962 | * This routine looks up the ndlp lists for the given WWPN. If WWPN found it |
2994 | * returns the node element list pointer else return NULL. | 2963 | * returns the node element list pointer else return NULL. |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 22843751c2ca..fa757b251f82 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -559,8 +559,10 @@ lpfc_hb_timeout(unsigned long ptr) | |||
559 | phba->pport->work_port_events |= WORKER_HB_TMO; | 559 | phba->pport->work_port_events |= WORKER_HB_TMO; |
560 | spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); | 560 | spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); |
561 | 561 | ||
562 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
562 | if (phba->work_wait) | 563 | if (phba->work_wait) |
563 | wake_up(phba->work_wait); | 564 | wake_up(phba->work_wait); |
565 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
564 | return; | 566 | return; |
565 | } | 567 | } |
566 | 568 | ||
@@ -714,12 +716,10 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
714 | struct lpfc_vport *vport = phba->pport; | 716 | struct lpfc_vport *vport = phba->pport; |
715 | struct lpfc_sli *psli = &phba->sli; | 717 | struct lpfc_sli *psli = &phba->sli; |
716 | struct lpfc_sli_ring *pring; | 718 | struct lpfc_sli_ring *pring; |
717 | struct lpfc_vport **vports; | ||
718 | uint32_t event_data; | 719 | uint32_t event_data; |
719 | unsigned long temperature; | 720 | unsigned long temperature; |
720 | struct temp_event temp_event_data; | 721 | struct temp_event temp_event_data; |
721 | struct Scsi_Host *shost; | 722 | struct Scsi_Host *shost; |
722 | int i; | ||
723 | 723 | ||
724 | /* If the pci channel is offline, ignore possible errors, | 724 | /* If the pci channel is offline, ignore possible errors, |
725 | * since we cannot communicate with the pci card anyway. */ | 725 | * since we cannot communicate with the pci card anyway. */ |
@@ -729,25 +729,14 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
729 | if (!phba->cfg_enable_hba_reset) | 729 | if (!phba->cfg_enable_hba_reset) |
730 | return; | 730 | return; |
731 | 731 | ||
732 | if (phba->work_hs & HS_FFER6 || | 732 | if (phba->work_hs & HS_FFER6) { |
733 | phba->work_hs & HS_FFER5) { | ||
734 | /* Re-establishing Link */ | 733 | /* Re-establishing Link */ |
735 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | 734 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
736 | "1301 Re-establishing Link " | 735 | "1301 Re-establishing Link " |
737 | "Data: x%x x%x x%x\n", | 736 | "Data: x%x x%x x%x\n", |
738 | phba->work_hs, | 737 | phba->work_hs, |
739 | phba->work_status[0], phba->work_status[1]); | 738 | phba->work_status[0], phba->work_status[1]); |
740 | vports = lpfc_create_vport_work_array(phba); | 739 | |
741 | if (vports != NULL) | ||
742 | for(i = 0; | ||
743 | i <= phba->max_vpi && vports[i] != NULL; | ||
744 | i++){ | ||
745 | shost = lpfc_shost_from_vport(vports[i]); | ||
746 | spin_lock_irq(shost->host_lock); | ||
747 | vports[i]->fc_flag |= FC_ESTABLISH_LINK; | ||
748 | spin_unlock_irq(shost->host_lock); | ||
749 | } | ||
750 | lpfc_destroy_vport_work_array(phba, vports); | ||
751 | spin_lock_irq(&phba->hbalock); | 740 | spin_lock_irq(&phba->hbalock); |
752 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 741 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; |
753 | spin_unlock_irq(&phba->hbalock); | 742 | spin_unlock_irq(&phba->hbalock); |
@@ -761,7 +750,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
761 | pring = &psli->ring[psli->fcp_ring]; | 750 | pring = &psli->ring[psli->fcp_ring]; |
762 | lpfc_sli_abort_iocb_ring(phba, pring); | 751 | lpfc_sli_abort_iocb_ring(phba, pring); |
763 | 752 | ||
764 | |||
765 | /* | 753 | /* |
766 | * There was a firmware error. Take the hba offline and then | 754 | * There was a firmware error. Take the hba offline and then |
767 | * attempt to restart it. | 755 | * attempt to restart it. |
@@ -770,7 +758,6 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
770 | lpfc_offline(phba); | 758 | lpfc_offline(phba); |
771 | lpfc_sli_brdrestart(phba); | 759 | lpfc_sli_brdrestart(phba); |
772 | if (lpfc_online(phba) == 0) { /* Initialize the HBA */ | 760 | if (lpfc_online(phba) == 0) { /* Initialize the HBA */ |
773 | mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); | ||
774 | lpfc_unblock_mgmt_io(phba); | 761 | lpfc_unblock_mgmt_io(phba); |
775 | return; | 762 | return; |
776 | } | 763 | } |
@@ -1454,6 +1441,13 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1454 | NLP_SET_FREE_REQ(ndlp); | 1441 | NLP_SET_FREE_REQ(ndlp); |
1455 | spin_unlock_irq(&phba->ndlp_lock); | 1442 | spin_unlock_irq(&phba->ndlp_lock); |
1456 | 1443 | ||
1444 | if (vport->port_type != LPFC_PHYSICAL_PORT && | ||
1445 | ndlp->nlp_DID == Fabric_DID) { | ||
1446 | /* Just free up ndlp with Fabric_DID for vports */ | ||
1447 | lpfc_nlp_put(ndlp); | ||
1448 | continue; | ||
1449 | } | ||
1450 | |||
1457 | if (ndlp->nlp_type & NLP_FABRIC) | 1451 | if (ndlp->nlp_type & NLP_FABRIC) |
1458 | lpfc_disc_state_machine(vport, ndlp, NULL, | 1452 | lpfc_disc_state_machine(vport, ndlp, NULL, |
1459 | NLP_EVT_DEVICE_RECOVERY); | 1453 | NLP_EVT_DEVICE_RECOVERY); |
@@ -1491,31 +1485,6 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1491 | return; | 1485 | return; |
1492 | } | 1486 | } |
1493 | 1487 | ||
1494 | static void | ||
1495 | lpfc_establish_link_tmo(unsigned long ptr) | ||
1496 | { | ||
1497 | struct lpfc_hba *phba = (struct lpfc_hba *) ptr; | ||
1498 | struct lpfc_vport **vports; | ||
1499 | unsigned long iflag; | ||
1500 | int i; | ||
1501 | |||
1502 | /* Re-establishing Link, timer expired */ | ||
1503 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
1504 | "1300 Re-establishing Link, timer expired " | ||
1505 | "Data: x%x x%x\n", | ||
1506 | phba->pport->fc_flag, phba->pport->port_state); | ||
1507 | vports = lpfc_create_vport_work_array(phba); | ||
1508 | if (vports != NULL) | ||
1509 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
1510 | struct Scsi_Host *shost; | ||
1511 | shost = lpfc_shost_from_vport(vports[i]); | ||
1512 | spin_lock_irqsave(shost->host_lock, iflag); | ||
1513 | vports[i]->fc_flag &= ~FC_ESTABLISH_LINK; | ||
1514 | spin_unlock_irqrestore(shost->host_lock, iflag); | ||
1515 | } | ||
1516 | lpfc_destroy_vport_work_array(phba, vports); | ||
1517 | } | ||
1518 | |||
1519 | void | 1488 | void |
1520 | lpfc_stop_vport_timers(struct lpfc_vport *vport) | 1489 | lpfc_stop_vport_timers(struct lpfc_vport *vport) |
1521 | { | 1490 | { |
@@ -1529,7 +1498,6 @@ static void | |||
1529 | lpfc_stop_phba_timers(struct lpfc_hba *phba) | 1498 | lpfc_stop_phba_timers(struct lpfc_hba *phba) |
1530 | { | 1499 | { |
1531 | del_timer_sync(&phba->fcp_poll_timer); | 1500 | del_timer_sync(&phba->fcp_poll_timer); |
1532 | del_timer_sync(&phba->fc_estabtmo); | ||
1533 | lpfc_stop_vport_timers(phba->pport); | 1501 | lpfc_stop_vport_timers(phba->pport); |
1534 | del_timer_sync(&phba->sli.mbox_tmo); | 1502 | del_timer_sync(&phba->sli.mbox_tmo); |
1535 | del_timer_sync(&phba->fabric_block_timer); | 1503 | del_timer_sync(&phba->fabric_block_timer); |
@@ -2005,10 +1973,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2005 | phba->max_vpi = LPFC_MAX_VPI; | 1973 | phba->max_vpi = LPFC_MAX_VPI; |
2006 | 1974 | ||
2007 | /* Initialize timers used by driver */ | 1975 | /* Initialize timers used by driver */ |
2008 | init_timer(&phba->fc_estabtmo); | ||
2009 | phba->fc_estabtmo.function = lpfc_establish_link_tmo; | ||
2010 | phba->fc_estabtmo.data = (unsigned long)phba; | ||
2011 | |||
2012 | init_timer(&phba->hb_tmofunc); | 1976 | init_timer(&phba->hb_tmofunc); |
2013 | phba->hb_tmofunc.function = lpfc_hb_timeout; | 1977 | phba->hb_tmofunc.function = lpfc_hb_timeout; |
2014 | phba->hb_tmofunc.data = (unsigned long)phba; | 1978 | phba->hb_tmofunc.data = (unsigned long)phba; |
@@ -2406,6 +2370,7 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |||
2406 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 2370 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
2407 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 2371 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
2408 | struct lpfc_sli *psli = &phba->sli; | 2372 | struct lpfc_sli *psli = &phba->sli; |
2373 | int error, retval; | ||
2409 | 2374 | ||
2410 | dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); | 2375 | dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); |
2411 | if (pci_enable_device_mem(pdev)) { | 2376 | if (pci_enable_device_mem(pdev)) { |
@@ -2416,15 +2381,40 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |||
2416 | 2381 | ||
2417 | pci_set_master(pdev); | 2382 | pci_set_master(pdev); |
2418 | 2383 | ||
2419 | /* Re-establishing Link */ | ||
2420 | spin_lock_irq(shost->host_lock); | ||
2421 | phba->pport->fc_flag |= FC_ESTABLISH_LINK; | ||
2422 | spin_unlock_irq(shost->host_lock); | ||
2423 | |||
2424 | spin_lock_irq(&phba->hbalock); | 2384 | spin_lock_irq(&phba->hbalock); |
2425 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 2385 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; |
2426 | spin_unlock_irq(&phba->hbalock); | 2386 | spin_unlock_irq(&phba->hbalock); |
2427 | 2387 | ||
2388 | /* Enable configured interrupt method */ | ||
2389 | phba->intr_type = NONE; | ||
2390 | if (phba->cfg_use_msi == 2) { | ||
2391 | error = lpfc_enable_msix(phba); | ||
2392 | if (!error) | ||
2393 | phba->intr_type = MSIX; | ||
2394 | } | ||
2395 | |||
2396 | /* Fallback to MSI if MSI-X initialization failed */ | ||
2397 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | ||
2398 | retval = pci_enable_msi(phba->pcidev); | ||
2399 | if (!retval) | ||
2400 | phba->intr_type = MSI; | ||
2401 | else | ||
2402 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2403 | "0470 Enable MSI failed, continuing " | ||
2404 | "with IRQ\n"); | ||
2405 | } | ||
2406 | |||
2407 | /* MSI-X is the only case the doesn't need to call request_irq */ | ||
2408 | if (phba->intr_type != MSIX) { | ||
2409 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | ||
2410 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | ||
2411 | if (retval) { | ||
2412 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2413 | "0471 Enable interrupt handler " | ||
2414 | "failed\n"); | ||
2415 | } else if (phba->intr_type != MSI) | ||
2416 | phba->intr_type = INTx; | ||
2417 | } | ||
2428 | 2418 | ||
2429 | /* Take device offline; this will perform cleanup */ | 2419 | /* Take device offline; this will perform cleanup */ |
2430 | lpfc_offline(phba); | 2420 | lpfc_offline(phba); |
@@ -2445,9 +2435,7 @@ static void lpfc_io_resume(struct pci_dev *pdev) | |||
2445 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 2435 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
2446 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 2436 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
2447 | 2437 | ||
2448 | if (lpfc_online(phba) == 0) { | 2438 | lpfc_online(phba); |
2449 | mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); | ||
2450 | } | ||
2451 | } | 2439 | } |
2452 | 2440 | ||
2453 | static struct pci_device_id lpfc_id_table[] = { | 2441 | static struct pci_device_id lpfc_id_table[] = { |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index d513813f6697..d08c4c890744 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -451,7 +451,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
451 | spin_unlock_irq(shost->host_lock); | 451 | spin_unlock_irq(shost->host_lock); |
452 | 452 | ||
453 | if ((ndlp->nlp_flag & NLP_ADISC_SND) && | 453 | if ((ndlp->nlp_flag & NLP_ADISC_SND) && |
454 | (vport->num_disc_nodes)) { | 454 | (vport->num_disc_nodes)) { |
455 | /* Check to see if there are more | 455 | /* Check to see if there are more |
456 | * ADISCs to be sent | 456 | * ADISCs to be sent |
457 | */ | 457 | */ |
@@ -469,20 +469,23 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
469 | lpfc_end_rscn(vport); | 469 | lpfc_end_rscn(vport); |
470 | } | 470 | } |
471 | } | 471 | } |
472 | else if (vport->num_disc_nodes) { | 472 | } |
473 | /* Check to see if there are more | 473 | } else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) && |
474 | * PLOGIs to be sent | 474 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) && |
475 | */ | 475 | (vport->num_disc_nodes)) { |
476 | lpfc_more_plogi(vport); | 476 | spin_lock_irq(shost->host_lock); |
477 | 477 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; | |
478 | if (vport->num_disc_nodes == 0) { | 478 | spin_unlock_irq(shost->host_lock); |
479 | spin_lock_irq(shost->host_lock); | 479 | /* Check to see if there are more |
480 | vport->fc_flag &= ~FC_NDISC_ACTIVE; | 480 | * PLOGIs to be sent |
481 | spin_unlock_irq(shost->host_lock); | 481 | */ |
482 | lpfc_can_disctmo(vport); | 482 | lpfc_more_plogi(vport); |
483 | lpfc_end_rscn(vport); | 483 | if (vport->num_disc_nodes == 0) { |
484 | } | 484 | spin_lock_irq(shost->host_lock); |
485 | } | 485 | vport->fc_flag &= ~FC_NDISC_ACTIVE; |
486 | spin_unlock_irq(shost->host_lock); | ||
487 | lpfc_can_disctmo(vport); | ||
488 | lpfc_end_rscn(vport); | ||
486 | } | 489 | } |
487 | } | 490 | } |
488 | 491 | ||
@@ -869,8 +872,11 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, | |||
869 | 872 | ||
870 | lp = (uint32_t *) prsp->virt; | 873 | lp = (uint32_t *) prsp->virt; |
871 | sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); | 874 | sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); |
872 | if (wwn_to_u64(sp->portName.u.wwn) == 0 || | 875 | |
873 | wwn_to_u64(sp->nodeName.u.wwn) == 0) { | 876 | /* Some switches have FDMI servers returning 0 for WWN */ |
877 | if ((ndlp->nlp_DID != FDMI_DID) && | ||
878 | (wwn_to_u64(sp->portName.u.wwn) == 0 || | ||
879 | wwn_to_u64(sp->nodeName.u.wwn) == 0)) { | ||
874 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 880 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
875 | "0142 PLOGI RSP: Invalid WWN.\n"); | 881 | "0142 PLOGI RSP: Invalid WWN.\n"); |
876 | goto out; | 882 | goto out; |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 70255c11d3ad..0910a9ab76a5 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -169,6 +169,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
169 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 169 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { |
170 | shost = lpfc_shost_from_vport(vports[i]); | 170 | shost = lpfc_shost_from_vport(vports[i]); |
171 | shost_for_each_device(sdev, shost) { | 171 | shost_for_each_device(sdev, shost) { |
172 | if (vports[i]->cfg_lun_queue_depth <= | ||
173 | sdev->queue_depth) | ||
174 | continue; | ||
172 | if (sdev->ordered_tags) | 175 | if (sdev->ordered_tags) |
173 | scsi_adjust_queue_depth(sdev, | 176 | scsi_adjust_queue_depth(sdev, |
174 | MSG_ORDERED_TAG, | 177 | MSG_ORDERED_TAG, |
@@ -578,14 +581,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
578 | lpfc_cmd->result == IOERR_NO_RESOURCES || | 581 | lpfc_cmd->result == IOERR_NO_RESOURCES || |
579 | lpfc_cmd->result == RJT_LOGIN_REQUIRED) { | 582 | lpfc_cmd->result == RJT_LOGIN_REQUIRED) { |
580 | cmd->result = ScsiResult(DID_REQUEUE, 0); | 583 | cmd->result = ScsiResult(DID_REQUEUE, 0); |
581 | break; | 584 | break; |
582 | } /* else: fall through */ | 585 | } /* else: fall through */ |
583 | default: | 586 | default: |
584 | cmd->result = ScsiResult(DID_ERROR, 0); | 587 | cmd->result = ScsiResult(DID_ERROR, 0); |
585 | break; | 588 | break; |
586 | } | 589 | } |
587 | 590 | ||
588 | if ((pnode == NULL ) | 591 | if (!pnode || !NLP_CHK_NODE_ACT(pnode) |
589 | || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) | 592 | || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) |
590 | cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); | 593 | cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); |
591 | } else { | 594 | } else { |
@@ -606,6 +609,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
606 | result = cmd->result; | 609 | result = cmd->result; |
607 | sdev = cmd->device; | 610 | sdev = cmd->device; |
608 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 611 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
612 | spin_lock_irqsave(sdev->host->host_lock, flags); | ||
613 | lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */ | ||
614 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | ||
609 | cmd->scsi_done(cmd); | 615 | cmd->scsi_done(cmd); |
610 | 616 | ||
611 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | 617 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
@@ -614,7 +620,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
614 | * wake up the thread. | 620 | * wake up the thread. |
615 | */ | 621 | */ |
616 | spin_lock_irqsave(sdev->host->host_lock, flags); | 622 | spin_lock_irqsave(sdev->host->host_lock, flags); |
617 | lpfc_cmd->pCmd = NULL; | ||
618 | if (lpfc_cmd->waitq) | 623 | if (lpfc_cmd->waitq) |
619 | wake_up(lpfc_cmd->waitq); | 624 | wake_up(lpfc_cmd->waitq); |
620 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | 625 | spin_unlock_irqrestore(sdev->host->host_lock, flags); |
@@ -626,7 +631,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
626 | if (!result) | 631 | if (!result) |
627 | lpfc_rampup_queue_depth(vport, sdev); | 632 | lpfc_rampup_queue_depth(vport, sdev); |
628 | 633 | ||
629 | if (!result && pnode != NULL && | 634 | if (!result && pnode && NLP_CHK_NODE_ACT(pnode) && |
630 | ((jiffies - pnode->last_ramp_up_time) > | 635 | ((jiffies - pnode->last_ramp_up_time) > |
631 | LPFC_Q_RAMP_UP_INTERVAL * HZ) && | 636 | LPFC_Q_RAMP_UP_INTERVAL * HZ) && |
632 | ((jiffies - pnode->last_q_full_time) > | 637 | ((jiffies - pnode->last_q_full_time) > |
@@ -654,7 +659,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
654 | * Check for queue full. If the lun is reporting queue full, then | 659 | * Check for queue full. If the lun is reporting queue full, then |
655 | * back off the lun queue depth to prevent target overloads. | 660 | * back off the lun queue depth to prevent target overloads. |
656 | */ | 661 | */ |
657 | if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { | 662 | if (result == SAM_STAT_TASK_SET_FULL && pnode && |
663 | NLP_CHK_NODE_ACT(pnode)) { | ||
658 | pnode->last_q_full_time = jiffies; | 664 | pnode->last_q_full_time = jiffies; |
659 | 665 | ||
660 | shost_for_each_device(tmp_sdev, sdev->host) { | 666 | shost_for_each_device(tmp_sdev, sdev->host) { |
@@ -684,7 +690,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
684 | * wake up the thread. | 690 | * wake up the thread. |
685 | */ | 691 | */ |
686 | spin_lock_irqsave(sdev->host->host_lock, flags); | 692 | spin_lock_irqsave(sdev->host->host_lock, flags); |
687 | lpfc_cmd->pCmd = NULL; | ||
688 | if (lpfc_cmd->waitq) | 693 | if (lpfc_cmd->waitq) |
689 | wake_up(lpfc_cmd->waitq); | 694 | wake_up(lpfc_cmd->waitq); |
690 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | 695 | spin_unlock_irqrestore(sdev->host->host_lock, flags); |
@@ -704,6 +709,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
704 | int datadir = scsi_cmnd->sc_data_direction; | 709 | int datadir = scsi_cmnd->sc_data_direction; |
705 | char tag[2]; | 710 | char tag[2]; |
706 | 711 | ||
712 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) | ||
713 | return; | ||
714 | |||
707 | lpfc_cmd->fcp_rsp->rspSnsLen = 0; | 715 | lpfc_cmd->fcp_rsp->rspSnsLen = 0; |
708 | /* clear task management bits */ | 716 | /* clear task management bits */ |
709 | lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; | 717 | lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; |
@@ -785,9 +793,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | |||
785 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; | 793 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; |
786 | struct lpfc_nodelist *ndlp = rdata->pnode; | 794 | struct lpfc_nodelist *ndlp = rdata->pnode; |
787 | 795 | ||
788 | if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { | 796 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || |
797 | ndlp->nlp_state != NLP_STE_MAPPED_NODE) | ||
789 | return 0; | 798 | return 0; |
790 | } | ||
791 | 799 | ||
792 | piocbq = &(lpfc_cmd->cur_iocbq); | 800 | piocbq = &(lpfc_cmd->cur_iocbq); |
793 | piocbq->vport = vport; | 801 | piocbq->vport = vport; |
@@ -842,7 +850,7 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, | |||
842 | struct lpfc_iocbq *iocbqrsp; | 850 | struct lpfc_iocbq *iocbqrsp; |
843 | int ret; | 851 | int ret; |
844 | 852 | ||
845 | if (!rdata->pnode) | 853 | if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) |
846 | return FAILED; | 854 | return FAILED; |
847 | 855 | ||
848 | lpfc_cmd->rdata = rdata; | 856 | lpfc_cmd->rdata = rdata; |
@@ -959,7 +967,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
959 | * Catch race where our node has transitioned, but the | 967 | * Catch race where our node has transitioned, but the |
960 | * transport is still transitioning. | 968 | * transport is still transitioning. |
961 | */ | 969 | */ |
962 | if (!ndlp) { | 970 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
963 | cmnd->result = ScsiResult(DID_BUS_BUSY, 0); | 971 | cmnd->result = ScsiResult(DID_BUS_BUSY, 0); |
964 | goto out_fail_command; | 972 | goto out_fail_command; |
965 | } | 973 | } |
@@ -1146,7 +1154,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
1146 | * target is rediscovered or devloss timeout expires. | 1154 | * target is rediscovered or devloss timeout expires. |
1147 | */ | 1155 | */ |
1148 | while (1) { | 1156 | while (1) { |
1149 | if (!pnode) | 1157 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) |
1150 | goto out; | 1158 | goto out; |
1151 | 1159 | ||
1152 | if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { | 1160 | if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { |
@@ -1162,7 +1170,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
1162 | goto out; | 1170 | goto out; |
1163 | } | 1171 | } |
1164 | pnode = rdata->pnode; | 1172 | pnode = rdata->pnode; |
1165 | if (!pnode) | 1173 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) |
1166 | goto out; | 1174 | goto out; |
1167 | } | 1175 | } |
1168 | if (pnode->nlp_state == NLP_STE_MAPPED_NODE) | 1176 | if (pnode->nlp_state == NLP_STE_MAPPED_NODE) |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index fc0d9501aba6..70a0a9eab211 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -2648,7 +2648,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) | |||
2648 | spin_unlock_irq(&phba->pport->work_port_lock); | 2648 | spin_unlock_irq(&phba->pport->work_port_lock); |
2649 | spin_lock_irq(&phba->hbalock); | 2649 | spin_lock_irq(&phba->hbalock); |
2650 | phba->link_state = LPFC_LINK_UNKNOWN; | 2650 | phba->link_state = LPFC_LINK_UNKNOWN; |
2651 | phba->pport->fc_flag |= FC_ESTABLISH_LINK; | ||
2652 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 2651 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; |
2653 | spin_unlock_irq(&phba->hbalock); | 2652 | spin_unlock_irq(&phba->hbalock); |
2654 | 2653 | ||
@@ -2669,8 +2668,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) | |||
2669 | lpfc_offline_prep(phba); | 2668 | lpfc_offline_prep(phba); |
2670 | lpfc_offline(phba); | 2669 | lpfc_offline(phba); |
2671 | lpfc_sli_brdrestart(phba); | 2670 | lpfc_sli_brdrestart(phba); |
2672 | if (lpfc_online(phba) == 0) /* Initialize the HBA */ | 2671 | lpfc_online(phba); |
2673 | mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); | ||
2674 | lpfc_unblock_mgmt_io(phba); | 2672 | lpfc_unblock_mgmt_io(phba); |
2675 | return; | 2673 | return; |
2676 | } | 2674 | } |
@@ -2687,28 +2685,41 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2687 | unsigned long drvr_flag = 0; | 2685 | unsigned long drvr_flag = 0; |
2688 | volatile uint32_t word0, ldata; | 2686 | volatile uint32_t word0, ldata; |
2689 | void __iomem *to_slim; | 2687 | void __iomem *to_slim; |
2688 | int processing_queue = 0; | ||
2689 | |||
2690 | spin_lock_irqsave(&phba->hbalock, drvr_flag); | ||
2691 | if (!pmbox) { | ||
2692 | /* processing mbox queue from intr_handler */ | ||
2693 | processing_queue = 1; | ||
2694 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
2695 | pmbox = lpfc_mbox_get(phba); | ||
2696 | if (!pmbox) { | ||
2697 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | ||
2698 | return MBX_SUCCESS; | ||
2699 | } | ||
2700 | } | ||
2690 | 2701 | ||
2691 | if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && | 2702 | if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && |
2692 | pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { | 2703 | pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { |
2693 | if(!pmbox->vport) { | 2704 | if(!pmbox->vport) { |
2705 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | ||
2694 | lpfc_printf_log(phba, KERN_ERR, | 2706 | lpfc_printf_log(phba, KERN_ERR, |
2695 | LOG_MBOX | LOG_VPORT, | 2707 | LOG_MBOX | LOG_VPORT, |
2696 | "1806 Mbox x%x failed. No vport\n", | 2708 | "1806 Mbox x%x failed. No vport\n", |
2697 | pmbox->mb.mbxCommand); | 2709 | pmbox->mb.mbxCommand); |
2698 | dump_stack(); | 2710 | dump_stack(); |
2699 | return MBX_NOT_FINISHED; | 2711 | goto out_not_finished; |
2700 | } | 2712 | } |
2701 | } | 2713 | } |
2702 | 2714 | ||
2703 | |||
2704 | /* If the PCI channel is in offline state, do not post mbox. */ | 2715 | /* If the PCI channel is in offline state, do not post mbox. */ |
2705 | if (unlikely(pci_channel_offline(phba->pcidev))) | 2716 | if (unlikely(pci_channel_offline(phba->pcidev))) { |
2706 | return MBX_NOT_FINISHED; | 2717 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2718 | goto out_not_finished; | ||
2719 | } | ||
2707 | 2720 | ||
2708 | spin_lock_irqsave(&phba->hbalock, drvr_flag); | ||
2709 | psli = &phba->sli; | 2721 | psli = &phba->sli; |
2710 | 2722 | ||
2711 | |||
2712 | mb = &pmbox->mb; | 2723 | mb = &pmbox->mb; |
2713 | status = MBX_SUCCESS; | 2724 | status = MBX_SUCCESS; |
2714 | 2725 | ||
@@ -2717,14 +2728,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2717 | 2728 | ||
2718 | /* Mbox command <mbxCommand> cannot issue */ | 2729 | /* Mbox command <mbxCommand> cannot issue */ |
2719 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 2730 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2720 | return MBX_NOT_FINISHED; | 2731 | goto out_not_finished; |
2721 | } | 2732 | } |
2722 | 2733 | ||
2723 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && | 2734 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && |
2724 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { | 2735 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { |
2725 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 2736 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2726 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 2737 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2727 | return MBX_NOT_FINISHED; | 2738 | goto out_not_finished; |
2728 | } | 2739 | } |
2729 | 2740 | ||
2730 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { | 2741 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { |
@@ -2738,14 +2749,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2738 | 2749 | ||
2739 | /* Mbox command <mbxCommand> cannot issue */ | 2750 | /* Mbox command <mbxCommand> cannot issue */ |
2740 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 2751 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2741 | return MBX_NOT_FINISHED; | 2752 | goto out_not_finished; |
2742 | } | 2753 | } |
2743 | 2754 | ||
2744 | if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { | 2755 | if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { |
2745 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 2756 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2746 | /* Mbox command <mbxCommand> cannot issue */ | 2757 | /* Mbox command <mbxCommand> cannot issue */ |
2747 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 2758 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2748 | return MBX_NOT_FINISHED; | 2759 | goto out_not_finished; |
2749 | } | 2760 | } |
2750 | 2761 | ||
2751 | /* Another mailbox command is still being processed, queue this | 2762 | /* Another mailbox command is still being processed, queue this |
@@ -2792,7 +2803,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2792 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 2803 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2793 | /* Mbox command <mbxCommand> cannot issue */ | 2804 | /* Mbox command <mbxCommand> cannot issue */ |
2794 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 2805 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2795 | return MBX_NOT_FINISHED; | 2806 | goto out_not_finished; |
2796 | } | 2807 | } |
2797 | /* timeout active mbox command */ | 2808 | /* timeout active mbox command */ |
2798 | mod_timer(&psli->mbox_tmo, (jiffies + | 2809 | mod_timer(&psli->mbox_tmo, (jiffies + |
@@ -2900,7 +2911,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2900 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 2911 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
2901 | spin_unlock_irqrestore(&phba->hbalock, | 2912 | spin_unlock_irqrestore(&phba->hbalock, |
2902 | drvr_flag); | 2913 | drvr_flag); |
2903 | return MBX_NOT_FINISHED; | 2914 | goto out_not_finished; |
2904 | } | 2915 | } |
2905 | 2916 | ||
2906 | /* Check if we took a mbox interrupt while we were | 2917 | /* Check if we took a mbox interrupt while we were |
@@ -2967,6 +2978,13 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2967 | 2978 | ||
2968 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 2979 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2969 | return status; | 2980 | return status; |
2981 | |||
2982 | out_not_finished: | ||
2983 | if (processing_queue) { | ||
2984 | pmbox->mb.mbxStatus = MBX_NOT_FINISHED; | ||
2985 | lpfc_mbox_cmpl_put(phba, pmbox); | ||
2986 | } | ||
2987 | return MBX_NOT_FINISHED; | ||
2970 | } | 2988 | } |
2971 | 2989 | ||
2972 | /* | 2990 | /* |
@@ -3463,26 +3481,21 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) | |||
3463 | phba->pport->work_port_events &= ~WORKER_MBOX_TMO; | 3481 | phba->pport->work_port_events &= ~WORKER_MBOX_TMO; |
3464 | spin_unlock(&phba->pport->work_port_lock); | 3482 | spin_unlock(&phba->pport->work_port_lock); |
3465 | 3483 | ||
3484 | /* Return any pending or completed mbox cmds */ | ||
3485 | list_splice_init(&phba->sli.mboxq, &completions); | ||
3466 | if (psli->mbox_active) { | 3486 | if (psli->mbox_active) { |
3467 | list_add_tail(&psli->mbox_active->list, &completions); | 3487 | list_add_tail(&psli->mbox_active->list, &completions); |
3468 | psli->mbox_active = NULL; | 3488 | psli->mbox_active = NULL; |
3469 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 3489 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
3470 | } | 3490 | } |
3471 | |||
3472 | /* Return any pending or completed mbox cmds */ | ||
3473 | list_splice_init(&phba->sli.mboxq, &completions); | ||
3474 | list_splice_init(&phba->sli.mboxq_cmpl, &completions); | 3491 | list_splice_init(&phba->sli.mboxq_cmpl, &completions); |
3475 | INIT_LIST_HEAD(&psli->mboxq); | ||
3476 | INIT_LIST_HEAD(&psli->mboxq_cmpl); | ||
3477 | |||
3478 | spin_unlock_irqrestore(&phba->hbalock, flags); | 3492 | spin_unlock_irqrestore(&phba->hbalock, flags); |
3479 | 3493 | ||
3480 | while (!list_empty(&completions)) { | 3494 | while (!list_empty(&completions)) { |
3481 | list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); | 3495 | list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); |
3482 | pmb->mb.mbxStatus = MBX_NOT_FINISHED; | 3496 | pmb->mb.mbxStatus = MBX_NOT_FINISHED; |
3483 | if (pmb->mbox_cmpl) { | 3497 | if (pmb->mbox_cmpl) |
3484 | pmb->mbox_cmpl(phba,pmb); | 3498 | pmb->mbox_cmpl(phba,pmb); |
3485 | } | ||
3486 | } | 3499 | } |
3487 | return 1; | 3500 | return 1; |
3488 | } | 3501 | } |
@@ -3613,6 +3626,15 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
3613 | irsp->ulpStatus, irsp->un.ulpWord[4]); | 3626 | irsp->ulpStatus, irsp->un.ulpWord[4]); |
3614 | 3627 | ||
3615 | /* | 3628 | /* |
3629 | * If the iocb is not found in Firmware queue the iocb | ||
3630 | * might have completed already. Do not free it again. | ||
3631 | */ | ||
3632 | if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | ||
3633 | spin_unlock_irq(&phba->hbalock); | ||
3634 | lpfc_sli_release_iocbq(phba, cmdiocb); | ||
3635 | return; | ||
3636 | } | ||
3637 | /* | ||
3616 | * make sure we have the right iocbq before taking it | 3638 | * make sure we have the right iocbq before taking it |
3617 | * off the txcmplq and try to call completion routine. | 3639 | * off the txcmplq and try to call completion routine. |
3618 | */ | 3640 | */ |
@@ -4174,6 +4196,7 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4174 | phba->pport->stopped = 1; | 4196 | phba->pport->stopped = 1; |
4175 | } | 4197 | } |
4176 | 4198 | ||
4199 | spin_lock(&phba->hbalock); | ||
4177 | if ((work_ha_copy & HA_MBATT) && | 4200 | if ((work_ha_copy & HA_MBATT) && |
4178 | (phba->sli.mbox_active)) { | 4201 | (phba->sli.mbox_active)) { |
4179 | pmb = phba->sli.mbox_active; | 4202 | pmb = phba->sli.mbox_active; |
@@ -4184,6 +4207,7 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4184 | /* First check out the status word */ | 4207 | /* First check out the status word */ |
4185 | lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); | 4208 | lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); |
4186 | if (pmbox->mbxOwner != OWN_HOST) { | 4209 | if (pmbox->mbxOwner != OWN_HOST) { |
4210 | spin_unlock(&phba->hbalock); | ||
4187 | /* | 4211 | /* |
4188 | * Stray Mailbox Interrupt, mbxCommand <cmd> | 4212 | * Stray Mailbox Interrupt, mbxCommand <cmd> |
4189 | * mbxStatus <status> | 4213 | * mbxStatus <status> |
@@ -4199,10 +4223,10 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4199 | /* clear mailbox attention bit */ | 4223 | /* clear mailbox attention bit */ |
4200 | work_ha_copy &= ~HA_MBATT; | 4224 | work_ha_copy &= ~HA_MBATT; |
4201 | } else { | 4225 | } else { |
4226 | phba->sli.mbox_active = NULL; | ||
4227 | spin_unlock(&phba->hbalock); | ||
4202 | phba->last_completion_time = jiffies; | 4228 | phba->last_completion_time = jiffies; |
4203 | del_timer(&phba->sli.mbox_tmo); | 4229 | del_timer(&phba->sli.mbox_tmo); |
4204 | |||
4205 | phba->sli.mbox_active = NULL; | ||
4206 | if (pmb->mbox_cmpl) { | 4230 | if (pmb->mbox_cmpl) { |
4207 | lpfc_sli_pcimem_bcopy(mbox, pmbox, | 4231 | lpfc_sli_pcimem_bcopy(mbox, pmbox, |
4208 | MAILBOX_CMD_SIZE); | 4232 | MAILBOX_CMD_SIZE); |
@@ -4237,10 +4261,15 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4237 | pmb->context1 = mp; | 4261 | pmb->context1 = mp; |
4238 | pmb->context2 = ndlp; | 4262 | pmb->context2 = ndlp; |
4239 | pmb->vport = vport; | 4263 | pmb->vport = vport; |
4240 | spin_lock(&phba->hbalock); | 4264 | rc = lpfc_sli_issue_mbox(phba, |
4241 | phba->sli.sli_flag &= | 4265 | pmb, |
4242 | ~LPFC_SLI_MBOX_ACTIVE; | 4266 | MBX_NOWAIT); |
4243 | spin_unlock(&phba->hbalock); | 4267 | if (rc != MBX_BUSY) |
4268 | lpfc_printf_log(phba, | ||
4269 | KERN_ERR, | ||
4270 | LOG_MBOX | LOG_SLI, | ||
4271 | "0306 rc should have" | ||
4272 | "been MBX_BUSY"); | ||
4244 | goto send_current_mbox; | 4273 | goto send_current_mbox; |
4245 | } | 4274 | } |
4246 | } | 4275 | } |
@@ -4250,25 +4279,20 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4250 | spin_unlock(&phba->pport->work_port_lock); | 4279 | spin_unlock(&phba->pport->work_port_lock); |
4251 | lpfc_mbox_cmpl_put(phba, pmb); | 4280 | lpfc_mbox_cmpl_put(phba, pmb); |
4252 | } | 4281 | } |
4253 | } | 4282 | } else |
4283 | spin_unlock(&phba->hbalock); | ||
4254 | if ((work_ha_copy & HA_MBATT) && | 4284 | if ((work_ha_copy & HA_MBATT) && |
4255 | (phba->sli.mbox_active == NULL)) { | 4285 | (phba->sli.mbox_active == NULL)) { |
4256 | send_next_mbox: | ||
4257 | spin_lock(&phba->hbalock); | ||
4258 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
4259 | pmb = lpfc_mbox_get(phba); | ||
4260 | spin_unlock(&phba->hbalock); | ||
4261 | send_current_mbox: | 4286 | send_current_mbox: |
4262 | /* Process next mailbox command if there is one */ | 4287 | /* Process next mailbox command if there is one */ |
4263 | if (pmb != NULL) { | 4288 | do { |
4264 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | 4289 | rc = lpfc_sli_issue_mbox(phba, NULL, |
4265 | if (rc == MBX_NOT_FINISHED) { | 4290 | MBX_NOWAIT); |
4266 | pmb->mb.mbxStatus = MBX_NOT_FINISHED; | 4291 | } while (rc == MBX_NOT_FINISHED); |
4267 | lpfc_mbox_cmpl_put(phba, pmb); | 4292 | if (rc != MBX_SUCCESS) |
4268 | goto send_next_mbox; | 4293 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | |
4269 | } | 4294 | LOG_SLI, "0349 rc should be " |
4270 | } | 4295 | "MBX_SUCCESS"); |
4271 | |||
4272 | } | 4296 | } |
4273 | 4297 | ||
4274 | spin_lock(&phba->hbalock); | 4298 | spin_lock(&phba->hbalock); |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index ca540d1d041e..b22b893019f4 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.2.5" | 21 | #define LPFC_DRIVER_VERSION "8.2.6" |
22 | 22 | ||
23 | #define LPFC_DRIVER_NAME "lpfc" | 23 | #define LPFC_DRIVER_NAME "lpfc" |
24 | 24 | ||
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 86d05beb00b8..6feaf59b0b1b 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -538,7 +538,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
538 | /* Otherwise, we will perform fabric logo as needed */ | 538 | /* Otherwise, we will perform fabric logo as needed */ |
539 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && | 539 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && |
540 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && | 540 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && |
541 | phba->link_state >= LPFC_LINK_UP) { | 541 | phba->link_state >= LPFC_LINK_UP && |
542 | phba->fc_topology != TOPOLOGY_LOOP) { | ||
542 | if (vport->cfg_enable_da_id) { | 543 | if (vport->cfg_enable_da_id) { |
543 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 544 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
544 | if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) | 545 | if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) |
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 3b09ab21d701..0248919bc2df 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c | |||
@@ -592,7 +592,6 @@ static struct scsi_host_template driver_template = { | |||
592 | .this_id = 7, | 592 | .this_id = 7, |
593 | .sg_tablesize = SG_ALL, | 593 | .sg_tablesize = SG_ALL, |
594 | .cmd_per_lun = CMD_PER_LUN, | 594 | .cmd_per_lun = CMD_PER_LUN, |
595 | .unchecked_isa_dma = 0, | ||
596 | .use_clustering = DISABLE_CLUSTERING | 595 | .use_clustering = DISABLE_CLUSTERING |
597 | }; | 596 | }; |
598 | 597 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 77a62a1b12c3..b937e9cddb23 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -68,6 +68,8 @@ static struct pci_device_id megasas_pci_table[] = { | |||
68 | /* xscale IOP */ | 68 | /* xscale IOP */ |
69 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, | 69 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, |
70 | /* ppc IOP */ | 70 | /* ppc IOP */ |
71 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, | ||
72 | /* ppc IOP */ | ||
71 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, | 73 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, |
72 | /* xscale IOP, vega */ | 74 | /* xscale IOP, vega */ |
73 | {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, | 75 | {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, |
@@ -488,12 +490,13 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
488 | 490 | ||
489 | /** | 491 | /** |
490 | * megasas_get_frame_count - Computes the number of frames | 492 | * megasas_get_frame_count - Computes the number of frames |
493 | * @frame_type : type of frame- io or pthru frame | ||
491 | * @sge_count : number of sg elements | 494 | * @sge_count : number of sg elements |
492 | * | 495 | * |
493 | * Returns the number of frames required for numnber of sge's (sge_count) | 496 | * Returns the number of frames required for numnber of sge's (sge_count) |
494 | */ | 497 | */ |
495 | 498 | ||
496 | static u32 megasas_get_frame_count(u8 sge_count) | 499 | static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type) |
497 | { | 500 | { |
498 | int num_cnt; | 501 | int num_cnt; |
499 | int sge_bytes; | 502 | int sge_bytes; |
@@ -504,13 +507,22 @@ static u32 megasas_get_frame_count(u8 sge_count) | |||
504 | sizeof(struct megasas_sge32); | 507 | sizeof(struct megasas_sge32); |
505 | 508 | ||
506 | /* | 509 | /* |
507 | * Main frame can contain 2 SGEs for 64-bit SGLs and | 510 | * Main frame can contain 2 SGEs for 64-bit SGLs and |
508 | * 3 SGEs for 32-bit SGLs | 511 | * 3 SGEs for 32-bit SGLs for ldio & |
509 | */ | 512 | * 1 SGEs for 64-bit SGLs and |
510 | if (IS_DMA64) | 513 | * 2 SGEs for 32-bit SGLs for pthru frame |
511 | num_cnt = sge_count - 2; | 514 | */ |
512 | else | 515 | if (unlikely(frame_type == PTHRU_FRAME)) { |
513 | num_cnt = sge_count - 3; | 516 | if (IS_DMA64) |
517 | num_cnt = sge_count - 1; | ||
518 | else | ||
519 | num_cnt = sge_count - 2; | ||
520 | } else { | ||
521 | if (IS_DMA64) | ||
522 | num_cnt = sge_count - 2; | ||
523 | else | ||
524 | num_cnt = sge_count - 3; | ||
525 | } | ||
514 | 526 | ||
515 | if(num_cnt>0){ | 527 | if(num_cnt>0){ |
516 | sge_bytes = sge_sz * num_cnt; | 528 | sge_bytes = sge_sz * num_cnt; |
@@ -592,7 +604,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
592 | * Compute the total number of frames this command consumes. FW uses | 604 | * Compute the total number of frames this command consumes. FW uses |
593 | * this number to pull sufficient number of frames from host memory. | 605 | * this number to pull sufficient number of frames from host memory. |
594 | */ | 606 | */ |
595 | cmd->frame_count = megasas_get_frame_count(pthru->sge_count); | 607 | cmd->frame_count = megasas_get_frame_count(pthru->sge_count, |
608 | PTHRU_FRAME); | ||
596 | 609 | ||
597 | return cmd->frame_count; | 610 | return cmd->frame_count; |
598 | } | 611 | } |
@@ -709,7 +722,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
709 | * Compute the total number of frames this command consumes. FW uses | 722 | * Compute the total number of frames this command consumes. FW uses |
710 | * this number to pull sufficient number of frames from host memory. | 723 | * this number to pull sufficient number of frames from host memory. |
711 | */ | 724 | */ |
712 | cmd->frame_count = megasas_get_frame_count(ldio->sge_count); | 725 | cmd->frame_count = megasas_get_frame_count(ldio->sge_count, IO_FRAME); |
713 | 726 | ||
714 | return cmd->frame_count; | 727 | return cmd->frame_count; |
715 | } | 728 | } |
@@ -1460,7 +1473,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
1460 | instance->instancet->disable_intr(instance->reg_set); | 1473 | instance->instancet->disable_intr(instance->reg_set); |
1461 | writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); | 1474 | writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); |
1462 | 1475 | ||
1463 | max_wait = 10; | 1476 | max_wait = 60; |
1464 | cur_state = MFI_STATE_OPERATIONAL; | 1477 | cur_state = MFI_STATE_OPERATIONAL; |
1465 | break; | 1478 | break; |
1466 | 1479 | ||
@@ -1980,7 +1993,8 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
1980 | 1993 | ||
1981 | switch(instance->pdev->device) | 1994 | switch(instance->pdev->device) |
1982 | { | 1995 | { |
1983 | case PCI_DEVICE_ID_LSI_SAS1078R: | 1996 | case PCI_DEVICE_ID_LSI_SAS1078R: |
1997 | case PCI_DEVICE_ID_LSI_SAS1078DE: | ||
1984 | instance->instancet = &megasas_instance_template_ppc; | 1998 | instance->instancet = &megasas_instance_template_ppc; |
1985 | break; | 1999 | break; |
1986 | case PCI_DEVICE_ID_LSI_SAS1064R: | 2000 | case PCI_DEVICE_ID_LSI_SAS1064R: |
@@ -2909,7 +2923,6 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
2909 | void *sense = NULL; | 2923 | void *sense = NULL; |
2910 | dma_addr_t sense_handle; | 2924 | dma_addr_t sense_handle; |
2911 | u32 *sense_ptr; | 2925 | u32 *sense_ptr; |
2912 | unsigned long *sense_buff; | ||
2913 | 2926 | ||
2914 | memset(kbuff_arr, 0, sizeof(kbuff_arr)); | 2927 | memset(kbuff_arr, 0, sizeof(kbuff_arr)); |
2915 | 2928 | ||
@@ -3014,14 +3027,14 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
3014 | */ | 3027 | */ |
3015 | if (ioc->sense_len) { | 3028 | if (ioc->sense_len) { |
3016 | /* | 3029 | /* |
3017 | * sense_buff points to the location that has the user | 3030 | * sense_ptr points to the location that has the user |
3018 | * sense buffer address | 3031 | * sense buffer address |
3019 | */ | 3032 | */ |
3020 | sense_buff = (unsigned long *) ((unsigned long)ioc->frame.raw + | 3033 | sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + |
3021 | ioc->sense_off); | 3034 | ioc->sense_off); |
3022 | 3035 | ||
3023 | if (copy_to_user((void __user *)(unsigned long)(*sense_buff), | 3036 | if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), |
3024 | sense, ioc->sense_len)) { | 3037 | sense, ioc->sense_len)) { |
3025 | printk(KERN_ERR "megasas: Failed to copy out to user " | 3038 | printk(KERN_ERR "megasas: Failed to copy out to user " |
3026 | "sense data\n"); | 3039 | "sense data\n"); |
3027 | error = -EFAULT; | 3040 | error = -EFAULT; |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 6466bdf548c2..3a997eb457bf 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -26,6 +26,7 @@ | |||
26 | * Device IDs | 26 | * Device IDs |
27 | */ | 27 | */ |
28 | #define PCI_DEVICE_ID_LSI_SAS1078R 0x0060 | 28 | #define PCI_DEVICE_ID_LSI_SAS1078R 0x0060 |
29 | #define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C | ||
29 | #define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 | 30 | #define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 |
30 | 31 | ||
31 | /* | 32 | /* |
@@ -542,6 +543,10 @@ struct megasas_ctrl_info { | |||
542 | 543 | ||
543 | #define MEGASAS_FW_BUSY 1 | 544 | #define MEGASAS_FW_BUSY 1 |
544 | 545 | ||
546 | /* Frame Type */ | ||
547 | #define IO_FRAME 0 | ||
548 | #define PTHRU_FRAME 1 | ||
549 | |||
545 | /* | 550 | /* |
546 | * When SCSI mid-layer calls driver's reset routine, driver waits for | 551 | * When SCSI mid-layer calls driver's reset routine, driver waits for |
547 | * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note | 552 | * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note |
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c index be41aadccae5..d722235111a8 100644 --- a/drivers/scsi/mvme147.c +++ b/drivers/scsi/mvme147.c | |||
@@ -82,6 +82,9 @@ int mvme147_detect(struct scsi_host_template *tpnt) | |||
82 | mvme147_host->irq = MVME147_IRQ_SCSI_PORT; | 82 | mvme147_host->irq = MVME147_IRQ_SCSI_PORT; |
83 | regs.SASR = (volatile unsigned char *)0xfffe4000; | 83 | regs.SASR = (volatile unsigned char *)0xfffe4000; |
84 | regs.SCMD = (volatile unsigned char *)0xfffe4001; | 84 | regs.SCMD = (volatile unsigned char *)0xfffe4001; |
85 | HDATA(mvme147_host)->no_sync = 0xff; | ||
86 | HDATA(mvme147_host)->fast = 0; | ||
87 | HDATA(mvme147_host)->dma_mode = CTRL_DMA; | ||
85 | wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); | 88 | wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); |
86 | 89 | ||
87 | if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_intr)) | 90 | if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_intr)) |
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index fad6cb5cba28..ce48e2d0193c 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <scsi/scsi_dbg.h> | 26 | #include <scsi/scsi_dbg.h> |
27 | #include <scsi/scsi_device.h> | 27 | #include <scsi/scsi_device.h> |
28 | #include <scsi/scsi_host.h> | 28 | #include <scsi/scsi_host.h> |
29 | #include <scsi/scsi_eh.h> | ||
29 | 30 | ||
30 | #include <asm/lv1call.h> | 31 | #include <asm/lv1call.h> |
31 | #include <asm/ps3stor.h> | 32 | #include <asm/ps3stor.h> |
@@ -90,78 +91,6 @@ static int ps3rom_slave_configure(struct scsi_device *scsi_dev) | |||
90 | return 0; | 91 | return 0; |
91 | } | 92 | } |
92 | 93 | ||
93 | /* | ||
94 | * copy data from device into scatter/gather buffer | ||
95 | */ | ||
96 | static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf) | ||
97 | { | ||
98 | int k, req_len, act_len, len, active; | ||
99 | void *kaddr; | ||
100 | struct scatterlist *sgpnt; | ||
101 | unsigned int buflen; | ||
102 | |||
103 | buflen = scsi_bufflen(cmd); | ||
104 | if (!buflen) | ||
105 | return 0; | ||
106 | |||
107 | if (!scsi_sglist(cmd)) | ||
108 | return -1; | ||
109 | |||
110 | active = 1; | ||
111 | req_len = act_len = 0; | ||
112 | scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) { | ||
113 | if (active) { | ||
114 | kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0); | ||
115 | len = sgpnt->length; | ||
116 | if ((req_len + len) > buflen) { | ||
117 | active = 0; | ||
118 | len = buflen - req_len; | ||
119 | } | ||
120 | memcpy(kaddr + sgpnt->offset, buf + req_len, len); | ||
121 | flush_kernel_dcache_page(sg_page(sgpnt)); | ||
122 | kunmap_atomic(kaddr, KM_IRQ0); | ||
123 | act_len += len; | ||
124 | } | ||
125 | req_len += sgpnt->length; | ||
126 | } | ||
127 | scsi_set_resid(cmd, buflen - act_len); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * copy data from scatter/gather into device's buffer | ||
133 | */ | ||
134 | static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf) | ||
135 | { | ||
136 | int k, req_len, len, fin; | ||
137 | void *kaddr; | ||
138 | struct scatterlist *sgpnt; | ||
139 | unsigned int buflen; | ||
140 | |||
141 | buflen = scsi_bufflen(cmd); | ||
142 | if (!buflen) | ||
143 | return 0; | ||
144 | |||
145 | if (!scsi_sglist(cmd)) | ||
146 | return -1; | ||
147 | |||
148 | req_len = fin = 0; | ||
149 | scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) { | ||
150 | kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0); | ||
151 | len = sgpnt->length; | ||
152 | if ((req_len + len) > buflen) { | ||
153 | len = buflen - req_len; | ||
154 | fin = 1; | ||
155 | } | ||
156 | memcpy(buf + req_len, kaddr + sgpnt->offset, len); | ||
157 | kunmap_atomic(kaddr, KM_IRQ0); | ||
158 | if (fin) | ||
159 | return req_len + len; | ||
160 | req_len += sgpnt->length; | ||
161 | } | ||
162 | return req_len; | ||
163 | } | ||
164 | |||
165 | static int ps3rom_atapi_request(struct ps3_storage_device *dev, | 94 | static int ps3rom_atapi_request(struct ps3_storage_device *dev, |
166 | struct scsi_cmnd *cmd) | 95 | struct scsi_cmnd *cmd) |
167 | { | 96 | { |
@@ -195,9 +124,7 @@ static int ps3rom_atapi_request(struct ps3_storage_device *dev, | |||
195 | else | 124 | else |
196 | atapi_cmnd.proto = PIO_DATA_OUT_PROTO; | 125 | atapi_cmnd.proto = PIO_DATA_OUT_PROTO; |
197 | atapi_cmnd.in_out = DIR_WRITE; | 126 | atapi_cmnd.in_out = DIR_WRITE; |
198 | res = fetch_to_dev_buffer(cmd, dev->bounce_buf); | 127 | scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size); |
199 | if (res < 0) | ||
200 | return DID_ERROR << 16; | ||
201 | break; | 128 | break; |
202 | 129 | ||
203 | default: | 130 | default: |
@@ -269,9 +196,7 @@ static int ps3rom_write_request(struct ps3_storage_device *dev, | |||
269 | dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n", | 196 | dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n", |
270 | __func__, __LINE__, sectors, start_sector); | 197 | __func__, __LINE__, sectors, start_sector); |
271 | 198 | ||
272 | res = fetch_to_dev_buffer(cmd, dev->bounce_buf); | 199 | scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size); |
273 | if (res < 0) | ||
274 | return DID_ERROR << 16; | ||
275 | 200 | ||
276 | res = lv1_storage_write(dev->sbd.dev_id, | 201 | res = lv1_storage_write(dev->sbd.dev_id, |
277 | dev->regions[dev->region_idx].id, start_sector, | 202 | dev->regions[dev->region_idx].id, start_sector, |
@@ -381,11 +306,13 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data) | |||
381 | if (!status) { | 306 | if (!status) { |
382 | /* OK, completed */ | 307 | /* OK, completed */ |
383 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | 308 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
384 | res = fill_from_dev_buffer(cmd, dev->bounce_buf); | 309 | int len; |
385 | if (res) { | 310 | |
386 | cmd->result = DID_ERROR << 16; | 311 | len = scsi_sg_copy_from_buffer(cmd, |
387 | goto done; | 312 | dev->bounce_buf, |
388 | } | 313 | dev->bounce_size); |
314 | |||
315 | scsi_set_resid(cmd, scsi_bufflen(cmd) - len); | ||
389 | } | 316 | } |
390 | cmd->result = DID_OK << 16; | 317 | cmd->result = DID_OK << 16; |
391 | goto done; | 318 | goto done; |
@@ -404,11 +331,7 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data) | |||
404 | goto done; | 331 | goto done; |
405 | } | 332 | } |
406 | 333 | ||
407 | cmd->sense_buffer[0] = 0x70; | 334 | scsi_build_sense_buffer(0, cmd->sense_buffer, sense_key, asc, ascq); |
408 | cmd->sense_buffer[2] = sense_key; | ||
409 | cmd->sense_buffer[7] = 16 - 6; | ||
410 | cmd->sense_buffer[12] = asc; | ||
411 | cmd->sense_buffer[13] = ascq; | ||
412 | cmd->result = SAM_STAT_CHECK_CONDITION; | 335 | cmd->result = SAM_STAT_CHECK_CONDITION; |
413 | 336 | ||
414 | done: | 337 | done: |
@@ -427,7 +350,7 @@ static struct scsi_host_template ps3rom_host_template = { | |||
427 | .cmd_per_lun = 1, | 350 | .cmd_per_lun = 1, |
428 | .emulated = 1, /* only sg driver uses this */ | 351 | .emulated = 1, /* only sg driver uses this */ |
429 | .max_sectors = PS3ROM_MAX_SECTORS, | 352 | .max_sectors = PS3ROM_MAX_SECTORS, |
430 | .use_clustering = DISABLE_CLUSTERING, | 353 | .use_clustering = ENABLE_CLUSTERING, |
431 | .module = THIS_MODULE, | 354 | .module = THIS_MODULE, |
432 | }; | 355 | }; |
433 | 356 | ||
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 68c0d09ffe78..09ab3eac1c1a 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -333,7 +333,6 @@ | |||
333 | 333 | ||
334 | #include <linux/module.h> | 334 | #include <linux/module.h> |
335 | 335 | ||
336 | #include <linux/version.h> | ||
337 | #include <linux/types.h> | 336 | #include <linux/types.h> |
338 | #include <linux/string.h> | 337 | #include <linux/string.h> |
339 | #include <linux/errno.h> | 338 | #include <linux/errno.h> |
@@ -367,10 +366,6 @@ | |||
367 | #include <asm/sn/io.h> | 366 | #include <asm/sn/io.h> |
368 | #endif | 367 | #endif |
369 | 368 | ||
370 | #if LINUX_VERSION_CODE < 0x020600 | ||
371 | #error "Kernels older than 2.6.0 are no longer supported" | ||
372 | #endif | ||
373 | |||
374 | 369 | ||
375 | /* | 370 | /* |
376 | * Compile time Options: | 371 | * Compile time Options: |
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 8c865b9e02b5..6208d562890d 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig | |||
@@ -16,7 +16,8 @@ config SCSI_QLA_FC | |||
16 | 22xx ql2200_fw.bin | 16 | 22xx ql2200_fw.bin |
17 | 2300, 2312, 6312 ql2300_fw.bin | 17 | 2300, 2312, 6312 ql2300_fw.bin |
18 | 2322, 6322 ql2322_fw.bin | 18 | 2322, 6322 ql2322_fw.bin |
19 | 24xx ql2400_fw.bin | 19 | 24xx, 54xx ql2400_fw.bin |
20 | 25xx ql2500_fw.bin | ||
20 | 21 | ||
21 | Upon request, the driver caches the firmware image until | 22 | Upon request, the driver caches the firmware image until |
22 | the driver is unloaded. | 23 | the driver is unloaded. |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 4894dc886b62..413d8cd6a324 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -849,20 +849,20 @@ static void | |||
849 | qla2x00_get_host_speed(struct Scsi_Host *shost) | 849 | qla2x00_get_host_speed(struct Scsi_Host *shost) |
850 | { | 850 | { |
851 | scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); | 851 | scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); |
852 | uint32_t speed = 0; | 852 | u32 speed = FC_PORTSPEED_UNKNOWN; |
853 | 853 | ||
854 | switch (ha->link_data_rate) { | 854 | switch (ha->link_data_rate) { |
855 | case PORT_SPEED_1GB: | 855 | case PORT_SPEED_1GB: |
856 | speed = 1; | 856 | speed = FC_PORTSPEED_1GBIT; |
857 | break; | 857 | break; |
858 | case PORT_SPEED_2GB: | 858 | case PORT_SPEED_2GB: |
859 | speed = 2; | 859 | speed = FC_PORTSPEED_2GBIT; |
860 | break; | 860 | break; |
861 | case PORT_SPEED_4GB: | 861 | case PORT_SPEED_4GB: |
862 | speed = 4; | 862 | speed = FC_PORTSPEED_4GBIT; |
863 | break; | 863 | break; |
864 | case PORT_SPEED_8GB: | 864 | case PORT_SPEED_8GB: |
865 | speed = 8; | 865 | speed = FC_PORTSPEED_8GBIT; |
866 | break; | 866 | break; |
867 | } | 867 | } |
868 | fc_host_speed(shost) = speed; | 868 | fc_host_speed(shost) = speed; |
@@ -900,7 +900,8 @@ qla2x00_get_starget_node_name(struct scsi_target *starget) | |||
900 | u64 node_name = 0; | 900 | u64 node_name = 0; |
901 | 901 | ||
902 | list_for_each_entry(fcport, &ha->fcports, list) { | 902 | list_for_each_entry(fcport, &ha->fcports, list) { |
903 | if (starget->id == fcport->os_target_id) { | 903 | if (fcport->rport && |
904 | starget->id == fcport->rport->scsi_target_id) { | ||
904 | node_name = wwn_to_u64(fcport->node_name); | 905 | node_name = wwn_to_u64(fcport->node_name); |
905 | break; | 906 | break; |
906 | } | 907 | } |
@@ -918,7 +919,8 @@ qla2x00_get_starget_port_name(struct scsi_target *starget) | |||
918 | u64 port_name = 0; | 919 | u64 port_name = 0; |
919 | 920 | ||
920 | list_for_each_entry(fcport, &ha->fcports, list) { | 921 | list_for_each_entry(fcport, &ha->fcports, list) { |
921 | if (starget->id == fcport->os_target_id) { | 922 | if (fcport->rport && |
923 | starget->id == fcport->rport->scsi_target_id) { | ||
922 | port_name = wwn_to_u64(fcport->port_name); | 924 | port_name = wwn_to_u64(fcport->port_name); |
923 | break; | 925 | break; |
924 | } | 926 | } |
@@ -936,7 +938,8 @@ qla2x00_get_starget_port_id(struct scsi_target *starget) | |||
936 | uint32_t port_id = ~0U; | 938 | uint32_t port_id = ~0U; |
937 | 939 | ||
938 | list_for_each_entry(fcport, &ha->fcports, list) { | 940 | list_for_each_entry(fcport, &ha->fcports, list) { |
939 | if (starget->id == fcport->os_target_id) { | 941 | if (fcport->rport && |
942 | starget->id == fcport->rport->scsi_target_id) { | ||
940 | port_id = fcport->d_id.b.domain << 16 | | 943 | port_id = fcport->d_id.b.domain << 16 | |
941 | fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; | 944 | fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; |
942 | break; | 945 | break; |
@@ -1196,6 +1199,7 @@ struct fc_function_template qla2xxx_transport_functions = { | |||
1196 | .show_host_node_name = 1, | 1199 | .show_host_node_name = 1, |
1197 | .show_host_port_name = 1, | 1200 | .show_host_port_name = 1, |
1198 | .show_host_supported_classes = 1, | 1201 | .show_host_supported_classes = 1, |
1202 | .show_host_supported_speeds = 1, | ||
1199 | 1203 | ||
1200 | .get_host_port_id = qla2x00_get_host_port_id, | 1204 | .get_host_port_id = qla2x00_get_host_port_id, |
1201 | .show_host_port_id = 1, | 1205 | .show_host_port_id = 1, |
@@ -1276,9 +1280,23 @@ struct fc_function_template qla2xxx_transport_vport_functions = { | |||
1276 | void | 1280 | void |
1277 | qla2x00_init_host_attr(scsi_qla_host_t *ha) | 1281 | qla2x00_init_host_attr(scsi_qla_host_t *ha) |
1278 | { | 1282 | { |
1283 | u32 speed = FC_PORTSPEED_UNKNOWN; | ||
1284 | |||
1279 | fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name); | 1285 | fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name); |
1280 | fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name); | 1286 | fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name); |
1281 | fc_host_supported_classes(ha->host) = FC_COS_CLASS3; | 1287 | fc_host_supported_classes(ha->host) = FC_COS_CLASS3; |
1282 | fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;; | 1288 | fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;; |
1283 | fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count; | 1289 | fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count; |
1290 | |||
1291 | if (IS_QLA25XX(ha)) | ||
1292 | speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | | ||
1293 | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; | ||
1294 | else if (IS_QLA24XX_TYPE(ha)) | ||
1295 | speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | | ||
1296 | FC_PORTSPEED_1GBIT; | ||
1297 | else if (IS_QLA23XX(ha)) | ||
1298 | speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; | ||
1299 | else | ||
1300 | speed = FC_PORTSPEED_1GBIT; | ||
1301 | fc_host_supported_speeds(ha->host) = speed; | ||
1284 | } | 1302 | } |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index d88e98c476b0..9d12d9f26209 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -1410,125 +1410,3 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size) | |||
1410 | if (cnt % 16) | 1410 | if (cnt % 16) |
1411 | printk("\n"); | 1411 | printk("\n"); |
1412 | } | 1412 | } |
1413 | |||
1414 | /************************************************************************** | ||
1415 | * qla2x00_print_scsi_cmd | ||
1416 | * Dumps out info about the scsi cmd and srb. | ||
1417 | * Input | ||
1418 | * cmd : struct scsi_cmnd | ||
1419 | **************************************************************************/ | ||
1420 | void | ||
1421 | qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd) | ||
1422 | { | ||
1423 | int i; | ||
1424 | struct scsi_qla_host *ha; | ||
1425 | srb_t *sp; | ||
1426 | |||
1427 | ha = shost_priv(cmd->device->host); | ||
1428 | |||
1429 | sp = (srb_t *) cmd->SCp.ptr; | ||
1430 | printk("SCSI Command @=0x%p, Handle=0x%p\n", cmd, cmd->host_scribble); | ||
1431 | printk(" chan=0x%02x, target=0x%02x, lun=0x%02x, cmd_len=0x%02x\n", | ||
1432 | cmd->device->channel, cmd->device->id, cmd->device->lun, | ||
1433 | cmd->cmd_len); | ||
1434 | printk(" CDB: "); | ||
1435 | for (i = 0; i < cmd->cmd_len; i++) { | ||
1436 | printk("0x%02x ", cmd->cmnd[i]); | ||
1437 | } | ||
1438 | printk("\n seg_cnt=%d, allowed=%d, retries=%d\n", | ||
1439 | scsi_sg_count(cmd), cmd->allowed, cmd->retries); | ||
1440 | printk(" request buffer=0x%p, request buffer len=0x%x\n", | ||
1441 | scsi_sglist(cmd), scsi_bufflen(cmd)); | ||
1442 | printk(" tag=%d, transfersize=0x%x\n", | ||
1443 | cmd->tag, cmd->transfersize); | ||
1444 | printk(" serial_number=%lx, SP=%p\n", cmd->serial_number, sp); | ||
1445 | printk(" data direction=%d\n", cmd->sc_data_direction); | ||
1446 | |||
1447 | if (!sp) | ||
1448 | return; | ||
1449 | |||
1450 | printk(" sp flags=0x%x\n", sp->flags); | ||
1451 | } | ||
1452 | |||
1453 | #if defined(QL_DEBUG_ROUTINES) | ||
1454 | /* | ||
1455 | * qla2x00_formatted_dump_buffer | ||
1456 | * Prints string plus buffer. | ||
1457 | * | ||
1458 | * Input: | ||
1459 | * string = Null terminated string (no newline at end). | ||
1460 | * buffer = buffer address. | ||
1461 | * wd_size = word size 8, 16, 32 or 64 bits | ||
1462 | * count = number of words. | ||
1463 | */ | ||
1464 | void | ||
1465 | qla2x00_formatted_dump_buffer(char *string, uint8_t * buffer, | ||
1466 | uint8_t wd_size, uint32_t count) | ||
1467 | { | ||
1468 | uint32_t cnt; | ||
1469 | uint16_t *buf16; | ||
1470 | uint32_t *buf32; | ||
1471 | |||
1472 | if (strcmp(string, "") != 0) | ||
1473 | printk("%s\n",string); | ||
1474 | |||
1475 | switch (wd_size) { | ||
1476 | case 8: | ||
1477 | printk(" 0 1 2 3 4 5 6 7 " | ||
1478 | "8 9 Ah Bh Ch Dh Eh Fh\n"); | ||
1479 | printk("-----------------------------------------" | ||
1480 | "-------------------------------------\n"); | ||
1481 | |||
1482 | for (cnt = 1; cnt <= count; cnt++, buffer++) { | ||
1483 | printk("%02x",*buffer); | ||
1484 | if (cnt % 16 == 0) | ||
1485 | printk("\n"); | ||
1486 | else | ||
1487 | printk(" "); | ||
1488 | } | ||
1489 | if (cnt % 16 != 0) | ||
1490 | printk("\n"); | ||
1491 | break; | ||
1492 | case 16: | ||
1493 | printk(" 0 2 4 6 8 Ah " | ||
1494 | " Ch Eh\n"); | ||
1495 | printk("-----------------------------------------" | ||
1496 | "-------------\n"); | ||
1497 | |||
1498 | buf16 = (uint16_t *) buffer; | ||
1499 | for (cnt = 1; cnt <= count; cnt++, buf16++) { | ||
1500 | printk("%4x",*buf16); | ||
1501 | |||
1502 | if (cnt % 8 == 0) | ||
1503 | printk("\n"); | ||
1504 | else if (*buf16 < 10) | ||
1505 | printk(" "); | ||
1506 | else | ||
1507 | printk(" "); | ||
1508 | } | ||
1509 | if (cnt % 8 != 0) | ||
1510 | printk("\n"); | ||
1511 | break; | ||
1512 | case 32: | ||
1513 | printk(" 0 4 8 Ch\n"); | ||
1514 | printk("------------------------------------------\n"); | ||
1515 | |||
1516 | buf32 = (uint32_t *) buffer; | ||
1517 | for (cnt = 1; cnt <= count; cnt++, buf32++) { | ||
1518 | printk("%8x", *buf32); | ||
1519 | |||
1520 | if (cnt % 4 == 0) | ||
1521 | printk("\n"); | ||
1522 | else if (*buf32 < 10) | ||
1523 | printk(" "); | ||
1524 | else | ||
1525 | printk(" "); | ||
1526 | } | ||
1527 | if (cnt % 4 != 0) | ||
1528 | printk("\n"); | ||
1529 | break; | ||
1530 | default: | ||
1531 | break; | ||
1532 | } | ||
1533 | } | ||
1534 | #endif | ||
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 524598afc81c..2e9c0c097f5e 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -22,19 +22,7 @@ | |||
22 | /* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */ | 22 | /* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */ |
23 | /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ | 23 | /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ |
24 | /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ | 24 | /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ |
25 | /* | 25 | /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ |
26 | * Local Macro Definitions. | ||
27 | */ | ||
28 | #if defined(QL_DEBUG_LEVEL_1) || defined(QL_DEBUG_LEVEL_2) || \ | ||
29 | defined(QL_DEBUG_LEVEL_3) || defined(QL_DEBUG_LEVEL_4) || \ | ||
30 | defined(QL_DEBUG_LEVEL_5) || defined(QL_DEBUG_LEVEL_6) || \ | ||
31 | defined(QL_DEBUG_LEVEL_7) || defined(QL_DEBUG_LEVEL_8) || \ | ||
32 | defined(QL_DEBUG_LEVEL_9) || defined(QL_DEBUG_LEVEL_10) || \ | ||
33 | defined(QL_DEBUG_LEVEL_11) || defined(QL_DEBUG_LEVEL_12) || \ | ||
34 | defined(QL_DEBUG_LEVEL_13) || defined(QL_DEBUG_LEVEL_14) || \ | ||
35 | defined(QL_DEBUG_LEVEL_15) | ||
36 | #define QL_DEBUG_ROUTINES | ||
37 | #endif | ||
38 | 26 | ||
39 | /* | 27 | /* |
40 | * Macros use for debugging the driver. | 28 | * Macros use for debugging the driver. |
@@ -54,6 +42,7 @@ | |||
54 | #define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0) | 42 | #define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0) |
55 | #define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) | 43 | #define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) |
56 | #define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0) | 44 | #define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0) |
45 | #define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0) | ||
57 | 46 | ||
58 | #if defined(QL_DEBUG_LEVEL_3) | 47 | #if defined(QL_DEBUG_LEVEL_3) |
59 | #define DEBUG3(x) do {x;} while (0) | 48 | #define DEBUG3(x) do {x;} while (0) |
@@ -133,6 +122,12 @@ | |||
133 | #define DEBUG15(x) do {} while (0) | 122 | #define DEBUG15(x) do {} while (0) |
134 | #endif | 123 | #endif |
135 | 124 | ||
125 | #if defined(QL_DEBUG_LEVEL_16) | ||
126 | #define DEBUG16(x) do {x;} while (0) | ||
127 | #else | ||
128 | #define DEBUG16(x) do {} while (0) | ||
129 | #endif | ||
130 | |||
136 | /* | 131 | /* |
137 | * Firmware Dump structure definition | 132 | * Firmware Dump structure definition |
138 | */ | 133 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 3750319f4968..094d95f0764c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
25 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
26 | #include <linux/aer.h> | 26 | #include <linux/aer.h> |
27 | #include <linux/mutex.h> | ||
27 | #include <asm/semaphore.h> | 28 | #include <asm/semaphore.h> |
28 | 29 | ||
29 | #include <scsi/scsi.h> | 30 | #include <scsi/scsi.h> |
@@ -192,9 +193,6 @@ typedef struct srb { | |||
192 | 193 | ||
193 | uint16_t flags; | 194 | uint16_t flags; |
194 | 195 | ||
195 | /* Single transfer DMA context */ | ||
196 | dma_addr_t dma_handle; | ||
197 | |||
198 | uint32_t request_sense_length; | 196 | uint32_t request_sense_length; |
199 | uint8_t *request_sense_ptr; | 197 | uint8_t *request_sense_ptr; |
200 | } srb_t; | 198 | } srb_t; |
@@ -1542,8 +1540,6 @@ typedef struct fc_port { | |||
1542 | atomic_t state; | 1540 | atomic_t state; |
1543 | uint32_t flags; | 1541 | uint32_t flags; |
1544 | 1542 | ||
1545 | unsigned int os_target_id; | ||
1546 | |||
1547 | int port_login_retry_count; | 1543 | int port_login_retry_count; |
1548 | int login_retry; | 1544 | int login_retry; |
1549 | atomic_t port_down_timer; | 1545 | atomic_t port_down_timer; |
@@ -1613,6 +1609,7 @@ typedef struct fc_port { | |||
1613 | #define CT_ACCEPT_RESPONSE 0x8002 | 1609 | #define CT_ACCEPT_RESPONSE 0x8002 |
1614 | #define CT_REASON_INVALID_COMMAND_CODE 0x01 | 1610 | #define CT_REASON_INVALID_COMMAND_CODE 0x01 |
1615 | #define CT_REASON_CANNOT_PERFORM 0x09 | 1611 | #define CT_REASON_CANNOT_PERFORM 0x09 |
1612 | #define CT_REASON_COMMAND_UNSUPPORTED 0x0b | ||
1616 | #define CT_EXPL_ALREADY_REGISTERED 0x10 | 1613 | #define CT_EXPL_ALREADY_REGISTERED 0x10 |
1617 | 1614 | ||
1618 | #define NS_N_PORT_TYPE 0x01 | 1615 | #define NS_N_PORT_TYPE 0x01 |
@@ -2063,7 +2060,8 @@ struct isp_operations { | |||
2063 | void (*disable_intrs) (struct scsi_qla_host *); | 2060 | void (*disable_intrs) (struct scsi_qla_host *); |
2064 | 2061 | ||
2065 | int (*abort_command) (struct scsi_qla_host *, srb_t *); | 2062 | int (*abort_command) (struct scsi_qla_host *, srb_t *); |
2066 | int (*abort_target) (struct fc_port *); | 2063 | int (*target_reset) (struct fc_port *, unsigned int); |
2064 | int (*lun_reset) (struct fc_port *, unsigned int); | ||
2067 | int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, | 2065 | int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, |
2068 | uint8_t, uint8_t, uint16_t *, uint8_t); | 2066 | uint8_t, uint8_t, uint16_t *, uint8_t); |
2069 | int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, | 2067 | int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, |
@@ -2117,6 +2115,46 @@ struct qla_msix_entry { | |||
2117 | 2115 | ||
2118 | #define WATCH_INTERVAL 1 /* number of seconds */ | 2116 | #define WATCH_INTERVAL 1 /* number of seconds */ |
2119 | 2117 | ||
2118 | /* Work events. */ | ||
2119 | enum qla_work_type { | ||
2120 | QLA_EVT_AEN, | ||
2121 | QLA_EVT_HWE_LOG, | ||
2122 | }; | ||
2123 | |||
2124 | |||
2125 | struct qla_work_evt { | ||
2126 | struct list_head list; | ||
2127 | enum qla_work_type type; | ||
2128 | u32 flags; | ||
2129 | #define QLA_EVT_FLAG_FREE 0x1 | ||
2130 | |||
2131 | union { | ||
2132 | struct { | ||
2133 | enum fc_host_event_code code; | ||
2134 | u32 data; | ||
2135 | } aen; | ||
2136 | struct { | ||
2137 | uint16_t code; | ||
2138 | uint16_t d1, d2, d3; | ||
2139 | } hwe; | ||
2140 | } u; | ||
2141 | }; | ||
2142 | |||
2143 | struct qla_chip_state_84xx { | ||
2144 | struct list_head list; | ||
2145 | struct kref kref; | ||
2146 | |||
2147 | void *bus; | ||
2148 | spinlock_t access_lock; | ||
2149 | struct mutex fw_update_mutex; | ||
2150 | uint32_t fw_update; | ||
2151 | uint32_t op_fw_version; | ||
2152 | uint32_t op_fw_size; | ||
2153 | uint32_t op_fw_seq_size; | ||
2154 | uint32_t diag_fw_version; | ||
2155 | uint32_t gold_fw_version; | ||
2156 | }; | ||
2157 | |||
2120 | /* | 2158 | /* |
2121 | * Linux Host Adapter structure | 2159 | * Linux Host Adapter structure |
2122 | */ | 2160 | */ |
@@ -2155,6 +2193,7 @@ typedef struct scsi_qla_host { | |||
2155 | uint32_t vsan_enabled :1; | 2193 | uint32_t vsan_enabled :1; |
2156 | uint32_t npiv_supported :1; | 2194 | uint32_t npiv_supported :1; |
2157 | uint32_t fce_enabled :1; | 2195 | uint32_t fce_enabled :1; |
2196 | uint32_t hw_event_marker_found :1; | ||
2158 | } flags; | 2197 | } flags; |
2159 | 2198 | ||
2160 | atomic_t loop_state; | 2199 | atomic_t loop_state; |
@@ -2204,6 +2243,7 @@ typedef struct scsi_qla_host { | |||
2204 | #define DFLG_NO_CABLE BIT_4 | 2243 | #define DFLG_NO_CABLE BIT_4 |
2205 | 2244 | ||
2206 | #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 | 2245 | #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 |
2246 | #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 | ||
2207 | uint32_t device_type; | 2247 | uint32_t device_type; |
2208 | #define DT_ISP2100 BIT_0 | 2248 | #define DT_ISP2100 BIT_0 |
2209 | #define DT_ISP2200 BIT_1 | 2249 | #define DT_ISP2200 BIT_1 |
@@ -2217,7 +2257,8 @@ typedef struct scsi_qla_host { | |||
2217 | #define DT_ISP5422 BIT_9 | 2257 | #define DT_ISP5422 BIT_9 |
2218 | #define DT_ISP5432 BIT_10 | 2258 | #define DT_ISP5432 BIT_10 |
2219 | #define DT_ISP2532 BIT_11 | 2259 | #define DT_ISP2532 BIT_11 |
2220 | #define DT_ISP_LAST (DT_ISP2532 << 1) | 2260 | #define DT_ISP8432 BIT_12 |
2261 | #define DT_ISP_LAST (DT_ISP8432 << 1) | ||
2221 | 2262 | ||
2222 | #define DT_IIDMA BIT_26 | 2263 | #define DT_IIDMA BIT_26 |
2223 | #define DT_FWI2 BIT_27 | 2264 | #define DT_FWI2 BIT_27 |
@@ -2239,12 +2280,16 @@ typedef struct scsi_qla_host { | |||
2239 | #define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422) | 2280 | #define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422) |
2240 | #define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432) | 2281 | #define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432) |
2241 | #define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) | 2282 | #define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) |
2283 | #define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432) | ||
2242 | 2284 | ||
2243 | #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ | 2285 | #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ |
2244 | IS_QLA6312(ha) || IS_QLA6322(ha)) | 2286 | IS_QLA6312(ha) || IS_QLA6322(ha)) |
2245 | #define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha)) | 2287 | #define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha)) |
2246 | #define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha)) | 2288 | #define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha)) |
2247 | #define IS_QLA25XX(ha) (IS_QLA2532(ha)) | 2289 | #define IS_QLA25XX(ha) (IS_QLA2532(ha)) |
2290 | #define IS_QLA84XX(ha) (IS_QLA8432(ha)) | ||
2291 | #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ | ||
2292 | IS_QLA84XX(ha)) | ||
2248 | 2293 | ||
2249 | #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) | 2294 | #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) |
2250 | #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) | 2295 | #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) |
@@ -2356,6 +2401,8 @@ typedef struct scsi_qla_host { | |||
2356 | uint32_t login_retry_count; | 2401 | uint32_t login_retry_count; |
2357 | int max_q_depth; | 2402 | int max_q_depth; |
2358 | 2403 | ||
2404 | struct list_head work_list; | ||
2405 | |||
2359 | /* Fibre Channel Device List. */ | 2406 | /* Fibre Channel Device List. */ |
2360 | struct list_head fcports; | 2407 | struct list_head fcports; |
2361 | 2408 | ||
@@ -2423,8 +2470,6 @@ typedef struct scsi_qla_host { | |||
2423 | #define MBX_TIMEDOUT BIT_5 | 2470 | #define MBX_TIMEDOUT BIT_5 |
2424 | #define MBX_ACCESS_TIMEDOUT BIT_6 | 2471 | #define MBX_ACCESS_TIMEDOUT BIT_6 |
2425 | 2472 | ||
2426 | mbx_cmd_t mc; | ||
2427 | |||
2428 | /* Basic firmware related information. */ | 2473 | /* Basic firmware related information. */ |
2429 | uint16_t fw_major_version; | 2474 | uint16_t fw_major_version; |
2430 | uint16_t fw_minor_version; | 2475 | uint16_t fw_minor_version; |
@@ -2458,6 +2503,10 @@ typedef struct scsi_qla_host { | |||
2458 | uint64_t fce_wr, fce_rd; | 2503 | uint64_t fce_wr, fce_rd; |
2459 | struct mutex fce_mutex; | 2504 | struct mutex fce_mutex; |
2460 | 2505 | ||
2506 | uint32_t hw_event_start; | ||
2507 | uint32_t hw_event_ptr; | ||
2508 | uint32_t hw_event_pause_errors; | ||
2509 | |||
2461 | uint8_t host_str[16]; | 2510 | uint8_t host_str[16]; |
2462 | uint32_t pci_attr; | 2511 | uint32_t pci_attr; |
2463 | uint16_t chip_revision; | 2512 | uint16_t chip_revision; |
@@ -2493,6 +2542,13 @@ typedef struct scsi_qla_host { | |||
2493 | uint8_t fcode_revision[16]; | 2542 | uint8_t fcode_revision[16]; |
2494 | uint32_t fw_revision[4]; | 2543 | uint32_t fw_revision[4]; |
2495 | 2544 | ||
2545 | uint16_t fdt_odd_index; | ||
2546 | uint32_t fdt_wrt_disable; | ||
2547 | uint32_t fdt_erase_cmd; | ||
2548 | uint32_t fdt_block_size; | ||
2549 | uint32_t fdt_unprotect_sec_cmd; | ||
2550 | uint32_t fdt_protect_sec_cmd; | ||
2551 | |||
2496 | /* Needed for BEACON */ | 2552 | /* Needed for BEACON */ |
2497 | uint16_t beacon_blink_led; | 2553 | uint16_t beacon_blink_led; |
2498 | uint8_t beacon_color_state; | 2554 | uint8_t beacon_color_state; |
@@ -2538,6 +2594,8 @@ typedef struct scsi_qla_host { | |||
2538 | #define VP_ERR_ADAP_NORESOURCES 5 | 2594 | #define VP_ERR_ADAP_NORESOURCES 5 |
2539 | uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ | 2595 | uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ |
2540 | int cur_vport_count; | 2596 | int cur_vport_count; |
2597 | |||
2598 | struct qla_chip_state_84xx *cs84xx; | ||
2541 | } scsi_qla_host_t; | 2599 | } scsi_qla_host_t; |
2542 | 2600 | ||
2543 | 2601 | ||
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 2cd899bfe84b..561a4411719d 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 9337e138ed63..078f2a15f40b 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -719,7 +719,7 @@ struct tsk_mgmt_entry { | |||
719 | 719 | ||
720 | uint16_t timeout; /* Command timeout. */ | 720 | uint16_t timeout; /* Command timeout. */ |
721 | 721 | ||
722 | uint8_t lun[8]; /* FCP LUN (BE). */ | 722 | struct scsi_lun lun; /* FCP LUN (BE). */ |
723 | 723 | ||
724 | uint32_t control_flags; /* Control Flags. */ | 724 | uint32_t control_flags; /* Control Flags. */ |
725 | #define TCF_NOTMCMD_TO_TARGET BIT_31 | 725 | #define TCF_NOTMCMD_TO_TARGET BIT_31 |
@@ -793,7 +793,19 @@ struct device_reg_24xx { | |||
793 | #define FA_VPD_NVRAM_ADDR 0x48000 | 793 | #define FA_VPD_NVRAM_ADDR 0x48000 |
794 | #define FA_FEATURE_ADDR 0x4C000 | 794 | #define FA_FEATURE_ADDR 0x4C000 |
795 | #define FA_FLASH_DESCR_ADDR 0x50000 | 795 | #define FA_FLASH_DESCR_ADDR 0x50000 |
796 | #define FA_HW_EVENT_ADDR 0x54000 | 796 | #define FA_HW_EVENT0_ADDR 0x54000 |
797 | #define FA_HW_EVENT1_ADDR 0x54200 | ||
798 | #define FA_HW_EVENT_SIZE 0x200 | ||
799 | #define FA_HW_EVENT_ENTRY_SIZE 4 | ||
800 | /* | ||
801 | * Flash Error Log Event Codes. | ||
802 | */ | ||
803 | #define HW_EVENT_RESET_ERR 0xF00B | ||
804 | #define HW_EVENT_ISP_ERR 0xF020 | ||
805 | #define HW_EVENT_PARITY_ERR 0xF022 | ||
806 | #define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 | ||
807 | #define HW_EVENT_FLASH_FW_ERR 0xF024 | ||
808 | |||
797 | #define FA_BOOT_LOG_ADDR 0x58000 | 809 | #define FA_BOOT_LOG_ADDR 0x58000 |
798 | #define FA_FW_DUMP0_ADDR 0x60000 | 810 | #define FA_FW_DUMP0_ADDR 0x60000 |
799 | #define FA_FW_DUMP1_ADDR 0x70000 | 811 | #define FA_FW_DUMP1_ADDR 0x70000 |
@@ -1174,4 +1186,159 @@ struct vf_evfp_entry_24xx { | |||
1174 | }; | 1186 | }; |
1175 | 1187 | ||
1176 | /* END MID Support ***********************************************************/ | 1188 | /* END MID Support ***********************************************************/ |
1189 | |||
1190 | /* Flash Description Table ***************************************************/ | ||
1191 | |||
1192 | struct qla_fdt_layout { | ||
1193 | uint8_t sig[4]; | ||
1194 | uint16_t version; | ||
1195 | uint16_t len; | ||
1196 | uint16_t checksum; | ||
1197 | uint8_t unused1[2]; | ||
1198 | uint8_t model[16]; | ||
1199 | uint16_t man_id; | ||
1200 | uint16_t id; | ||
1201 | uint8_t flags; | ||
1202 | uint8_t erase_cmd; | ||
1203 | uint8_t alt_erase_cmd; | ||
1204 | uint8_t wrt_enable_cmd; | ||
1205 | uint8_t wrt_enable_bits; | ||
1206 | uint8_t wrt_sts_reg_cmd; | ||
1207 | uint8_t unprotect_sec_cmd; | ||
1208 | uint8_t read_man_id_cmd; | ||
1209 | uint32_t block_size; | ||
1210 | uint32_t alt_block_size; | ||
1211 | uint32_t flash_size; | ||
1212 | uint32_t wrt_enable_data; | ||
1213 | uint8_t read_id_addr_len; | ||
1214 | uint8_t wrt_disable_bits; | ||
1215 | uint8_t read_dev_id_len; | ||
1216 | uint8_t chip_erase_cmd; | ||
1217 | uint16_t read_timeout; | ||
1218 | uint8_t protect_sec_cmd; | ||
1219 | uint8_t unused2[65]; | ||
1220 | }; | ||
1221 | |||
1222 | /* 84XX Support **************************************************************/ | ||
1223 | |||
1224 | #define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */ | ||
1225 | #define A84_PANIC_RECOVERY 0x1 | ||
1226 | #define A84_OP_LOGIN_COMPLETE 0x2 | ||
1227 | #define A84_DIAG_LOGIN_COMPLETE 0x3 | ||
1228 | #define A84_GOLD_LOGIN_COMPLETE 0x4 | ||
1229 | |||
1230 | #define MBC_ISP84XX_RESET 0x3a /* Reset. */ | ||
1231 | |||
1232 | #define FSTATE_REMOTE_FC_DOWN BIT_0 | ||
1233 | #define FSTATE_NSL_LINK_DOWN BIT_1 | ||
1234 | #define FSTATE_IS_DIAG_FW BIT_2 | ||
1235 | #define FSTATE_LOGGED_IN BIT_3 | ||
1236 | #define FSTATE_WAITING_FOR_VERIFY BIT_4 | ||
1237 | |||
1238 | #define VERIFY_CHIP_IOCB_TYPE 0x1B | ||
1239 | struct verify_chip_entry_84xx { | ||
1240 | uint8_t entry_type; | ||
1241 | uint8_t entry_count; | ||
1242 | uint8_t sys_defined; | ||
1243 | uint8_t entry_status; | ||
1244 | |||
1245 | uint32_t handle; | ||
1246 | |||
1247 | uint16_t options; | ||
1248 | #define VCO_DONT_UPDATE_FW BIT_0 | ||
1249 | #define VCO_FORCE_UPDATE BIT_1 | ||
1250 | #define VCO_DONT_RESET_UPDATE BIT_2 | ||
1251 | #define VCO_DIAG_FW BIT_3 | ||
1252 | #define VCO_END_OF_DATA BIT_14 | ||
1253 | #define VCO_ENABLE_DSD BIT_15 | ||
1254 | |||
1255 | uint16_t reserved_1; | ||
1256 | |||
1257 | uint16_t data_seg_cnt; | ||
1258 | uint16_t reserved_2[3]; | ||
1259 | |||
1260 | uint32_t fw_ver; | ||
1261 | uint32_t exchange_address; | ||
1262 | |||
1263 | uint32_t reserved_3[3]; | ||
1264 | uint32_t fw_size; | ||
1265 | uint32_t fw_seq_size; | ||
1266 | uint32_t relative_offset; | ||
1267 | |||
1268 | uint32_t dseg_address[2]; | ||
1269 | uint32_t dseg_length; | ||
1270 | }; | ||
1271 | |||
1272 | struct verify_chip_rsp_84xx { | ||
1273 | uint8_t entry_type; | ||
1274 | uint8_t entry_count; | ||
1275 | uint8_t sys_defined; | ||
1276 | uint8_t entry_status; | ||
1277 | |||
1278 | uint32_t handle; | ||
1279 | |||
1280 | uint16_t comp_status; | ||
1281 | #define CS_VCS_CHIP_FAILURE 0x3 | ||
1282 | #define CS_VCS_BAD_EXCHANGE 0x8 | ||
1283 | #define CS_VCS_SEQ_COMPLETEi 0x40 | ||
1284 | |||
1285 | uint16_t failure_code; | ||
1286 | #define VFC_CHECKSUM_ERROR 0x1 | ||
1287 | #define VFC_INVALID_LEN 0x2 | ||
1288 | #define VFC_ALREADY_IN_PROGRESS 0x8 | ||
1289 | |||
1290 | uint16_t reserved_1[4]; | ||
1291 | |||
1292 | uint32_t fw_ver; | ||
1293 | uint32_t exchange_address; | ||
1294 | |||
1295 | uint32_t reserved_2[6]; | ||
1296 | }; | ||
1297 | |||
1298 | #define ACCESS_CHIP_IOCB_TYPE 0x2B | ||
1299 | struct access_chip_84xx { | ||
1300 | uint8_t entry_type; | ||
1301 | uint8_t entry_count; | ||
1302 | uint8_t sys_defined; | ||
1303 | uint8_t entry_status; | ||
1304 | |||
1305 | uint32_t handle; | ||
1306 | |||
1307 | uint16_t options; | ||
1308 | #define ACO_DUMP_MEMORY 0x0 | ||
1309 | #define ACO_LOAD_MEMORY 0x1 | ||
1310 | #define ACO_CHANGE_CONFIG_PARAM 0x2 | ||
1311 | #define ACO_REQUEST_INFO 0x3 | ||
1312 | |||
1313 | uint16_t reserved1; | ||
1314 | |||
1315 | uint16_t dseg_count; | ||
1316 | uint16_t reserved2[3]; | ||
1317 | |||
1318 | uint32_t parameter1; | ||
1319 | uint32_t parameter2; | ||
1320 | uint32_t parameter3; | ||
1321 | |||
1322 | uint32_t reserved3[3]; | ||
1323 | uint32_t total_byte_cnt; | ||
1324 | uint32_t reserved4; | ||
1325 | |||
1326 | uint32_t dseg_address[2]; | ||
1327 | uint32_t dseg_length; | ||
1328 | }; | ||
1329 | |||
1330 | struct access_chip_rsp_84xx { | ||
1331 | uint8_t entry_type; | ||
1332 | uint8_t entry_count; | ||
1333 | uint8_t sys_defined; | ||
1334 | uint8_t entry_status; | ||
1335 | |||
1336 | uint32_t handle; | ||
1337 | |||
1338 | uint16_t comp_status; | ||
1339 | uint16_t failure_code; | ||
1340 | uint32_t residual_count; | ||
1341 | |||
1342 | uint32_t reserved[12]; | ||
1343 | }; | ||
1177 | #endif | 1344 | #endif |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 193f688ec3d7..a9571c214a9e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -38,9 +38,6 @@ extern int qla2x00_loop_resync(scsi_qla_host_t *); | |||
38 | extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); | 38 | extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); |
39 | extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); | 39 | extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); |
40 | 40 | ||
41 | extern void qla2x00_restart_queues(scsi_qla_host_t *, uint8_t); | ||
42 | |||
43 | extern void qla2x00_rescan_fcports(scsi_qla_host_t *); | ||
44 | extern void qla2x00_update_fcports(scsi_qla_host_t *); | 41 | extern void qla2x00_update_fcports(scsi_qla_host_t *); |
45 | 42 | ||
46 | extern int qla2x00_abort_isp(scsi_qla_host_t *); | 43 | extern int qla2x00_abort_isp(scsi_qla_host_t *); |
@@ -50,6 +47,8 @@ extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); | |||
50 | extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); | 47 | extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); |
51 | extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); | 48 | extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); |
52 | 49 | ||
50 | extern void qla84xx_put_chip(struct scsi_qla_host *); | ||
51 | |||
53 | /* | 52 | /* |
54 | * Global Data in qla_os.c source file. | 53 | * Global Data in qla_os.c source file. |
55 | */ | 54 | */ |
@@ -67,6 +66,10 @@ extern int num_hosts; | |||
67 | 66 | ||
68 | extern int qla2x00_loop_reset(scsi_qla_host_t *); | 67 | extern int qla2x00_loop_reset(scsi_qla_host_t *); |
69 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); | 68 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); |
69 | extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum | ||
70 | fc_host_event_code, u32); | ||
71 | extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t, | ||
72 | uint16_t, uint16_t); | ||
70 | 73 | ||
71 | /* | 74 | /* |
72 | * Global Functions in qla_mid.c source file. | 75 | * Global Functions in qla_mid.c source file. |
@@ -149,12 +152,17 @@ extern int | |||
149 | qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); | 152 | qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); |
150 | 153 | ||
151 | extern int | 154 | extern int |
155 | qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t, | ||
156 | uint32_t); | ||
157 | |||
158 | extern int | ||
152 | qla2x00_abort_command(scsi_qla_host_t *, srb_t *); | 159 | qla2x00_abort_command(scsi_qla_host_t *, srb_t *); |
153 | 160 | ||
154 | #if USE_ABORT_TGT | ||
155 | extern int | 161 | extern int |
156 | qla2x00_abort_target(fc_port_t *); | 162 | qla2x00_abort_target(struct fc_port *, unsigned int); |
157 | #endif | 163 | |
164 | extern int | ||
165 | qla2x00_lun_reset(struct fc_port *, unsigned int); | ||
158 | 166 | ||
159 | extern int | 167 | extern int |
160 | qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, | 168 | qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, |
@@ -220,7 +228,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, | |||
220 | dma_addr_t); | 228 | dma_addr_t); |
221 | 229 | ||
222 | extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); | 230 | extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); |
223 | extern int qla24xx_abort_target(fc_port_t *); | 231 | extern int qla24xx_abort_target(struct fc_port *, unsigned int); |
232 | extern int qla24xx_lun_reset(struct fc_port *, unsigned int); | ||
224 | 233 | ||
225 | extern int | 234 | extern int |
226 | qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); | 235 | qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); |
@@ -246,6 +255,8 @@ qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t); | |||
246 | extern int | 255 | extern int |
247 | qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); | 256 | qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); |
248 | 257 | ||
258 | extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); | ||
259 | |||
249 | /* | 260 | /* |
250 | * Global Function Prototypes in qla_isr.c source file. | 261 | * Global Function Prototypes in qla_isr.c source file. |
251 | */ | 262 | */ |
@@ -298,6 +309,11 @@ extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, | |||
298 | extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); | 309 | extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); |
299 | extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); | 310 | extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); |
300 | 311 | ||
312 | extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t, | ||
313 | uint16_t, uint16_t); | ||
314 | |||
315 | extern void qla2xxx_get_flash_info(scsi_qla_host_t *); | ||
316 | |||
301 | /* | 317 | /* |
302 | * Global Function Prototypes in qla_dbg.c source file. | 318 | * Global Function Prototypes in qla_dbg.c source file. |
303 | */ | 319 | */ |
@@ -307,7 +323,6 @@ extern void qla24xx_fw_dump(scsi_qla_host_t *, int); | |||
307 | extern void qla25xx_fw_dump(scsi_qla_host_t *, int); | 323 | extern void qla25xx_fw_dump(scsi_qla_host_t *, int); |
308 | extern void qla2x00_dump_regs(scsi_qla_host_t *); | 324 | extern void qla2x00_dump_regs(scsi_qla_host_t *); |
309 | extern void qla2x00_dump_buffer(uint8_t *, uint32_t); | 325 | extern void qla2x00_dump_buffer(uint8_t *, uint32_t); |
310 | extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *); | ||
311 | 326 | ||
312 | /* | 327 | /* |
313 | * Global Function Prototypes in qla_gs.c source file. | 328 | * Global Function Prototypes in qla_gs.c source file. |
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index c1808763d40e..750d7ef83aae 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -1,17 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | 8 | ||
9 | static inline struct ct_sns_req * | ||
10 | qla2x00_prep_ct_req(struct ct_sns_req *, uint16_t, uint16_t); | ||
11 | |||
12 | static inline struct sns_cmd_pkt * | ||
13 | qla2x00_prep_sns_cmd(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); | ||
14 | |||
15 | static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); | 9 | static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); |
16 | static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); | 10 | static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); |
17 | static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *); | 11 | static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *); |
@@ -1538,7 +1532,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha) | |||
1538 | eiter->a.sup_speed = __constant_cpu_to_be32( | 1532 | eiter->a.sup_speed = __constant_cpu_to_be32( |
1539 | FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| | 1533 | FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| |
1540 | FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB); | 1534 | FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB); |
1541 | else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) | 1535 | else if (IS_QLA24XX_TYPE(ha)) |
1542 | eiter->a.sup_speed = __constant_cpu_to_be32( | 1536 | eiter->a.sup_speed = __constant_cpu_to_be32( |
1543 | FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| | 1537 | FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| |
1544 | FDMI_PORT_SPEED_4GB); | 1538 | FDMI_PORT_SPEED_4GB); |
@@ -1847,8 +1841,10 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list) | |||
1847 | "GPSC")) != QLA_SUCCESS) { | 1841 | "GPSC")) != QLA_SUCCESS) { |
1848 | /* FM command unsupported? */ | 1842 | /* FM command unsupported? */ |
1849 | if (rval == QLA_INVALID_COMMAND && | 1843 | if (rval == QLA_INVALID_COMMAND && |
1850 | ct_rsp->header.reason_code == | 1844 | (ct_rsp->header.reason_code == |
1851 | CT_REASON_INVALID_COMMAND_CODE) { | 1845 | CT_REASON_INVALID_COMMAND_CODE || |
1846 | ct_rsp->header.reason_code == | ||
1847 | CT_REASON_COMMAND_UNSUPPORTED)) { | ||
1852 | DEBUG2(printk("scsi(%ld): GPSC command " | 1848 | DEBUG2(printk("scsi(%ld): GPSC command " |
1853 | "unsupported, disabling query...\n", | 1849 | "unsupported, disabling query...\n", |
1854 | ha->host_no)); | 1850 | ha->host_no)); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 364be7d06875..01e26087c1dd 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -15,14 +15,6 @@ | |||
15 | #include <asm/prom.h> | 15 | #include <asm/prom.h> |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | /* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */ | ||
19 | #ifndef EXT_IS_LUN_BIT_SET | ||
20 | #define EXT_IS_LUN_BIT_SET(P,L) \ | ||
21 | (((P)->mask[L/8] & (0x80 >> (L%8)))?1:0) | ||
22 | #define EXT_SET_LUN_BIT(P,L) \ | ||
23 | ((P)->mask[L/8] |= (0x80 >> (L%8))) | ||
24 | #endif | ||
25 | |||
26 | /* | 18 | /* |
27 | * QLogic ISP2x00 Hardware Support Function Prototypes. | 19 | * QLogic ISP2x00 Hardware Support Function Prototypes. |
28 | */ | 20 | */ |
@@ -45,6 +37,9 @@ static int qla2x00_restart_isp(scsi_qla_host_t *); | |||
45 | 37 | ||
46 | static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev); | 38 | static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev); |
47 | 39 | ||
40 | static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); | ||
41 | static int qla84xx_init_chip(scsi_qla_host_t *); | ||
42 | |||
48 | /****************************************************************************/ | 43 | /****************************************************************************/ |
49 | /* QLogic ISP2x00 Hardware Support Functions. */ | 44 | /* QLogic ISP2x00 Hardware Support Functions. */ |
50 | /****************************************************************************/ | 45 | /****************************************************************************/ |
@@ -114,6 +109,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha) | |||
114 | rval = qla2x00_setup_chip(ha); | 109 | rval = qla2x00_setup_chip(ha); |
115 | if (rval) | 110 | if (rval) |
116 | return (rval); | 111 | return (rval); |
112 | qla2xxx_get_flash_info(ha); | ||
113 | } | ||
114 | if (IS_QLA84XX(ha)) { | ||
115 | ha->cs84xx = qla84xx_get_chip(ha); | ||
116 | if (!ha->cs84xx) { | ||
117 | qla_printk(KERN_ERR, ha, | ||
118 | "Unable to configure ISP84XX.\n"); | ||
119 | return QLA_FUNCTION_FAILED; | ||
120 | } | ||
117 | } | 121 | } |
118 | rval = qla2x00_init_rings(ha); | 122 | rval = qla2x00_init_rings(ha); |
119 | 123 | ||
@@ -500,6 +504,7 @@ qla2x00_reset_chip(scsi_qla_host_t *ha) | |||
500 | static inline void | 504 | static inline void |
501 | qla24xx_reset_risc(scsi_qla_host_t *ha) | 505 | qla24xx_reset_risc(scsi_qla_host_t *ha) |
502 | { | 506 | { |
507 | int hw_evt = 0; | ||
503 | unsigned long flags = 0; | 508 | unsigned long flags = 0; |
504 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 509 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
505 | uint32_t cnt, d2; | 510 | uint32_t cnt, d2; |
@@ -528,6 +533,8 @@ qla24xx_reset_risc(scsi_qla_host_t *ha) | |||
528 | d2 = (uint32_t) RD_REG_WORD(®->mailbox0); | 533 | d2 = (uint32_t) RD_REG_WORD(®->mailbox0); |
529 | barrier(); | 534 | barrier(); |
530 | } | 535 | } |
536 | if (cnt == 0) | ||
537 | hw_evt = 1; | ||
531 | 538 | ||
532 | /* Wait for soft-reset to complete. */ | 539 | /* Wait for soft-reset to complete. */ |
533 | d2 = RD_REG_DWORD(®->ctrl_status); | 540 | d2 = RD_REG_DWORD(®->ctrl_status); |
@@ -536,6 +543,10 @@ qla24xx_reset_risc(scsi_qla_host_t *ha) | |||
536 | d2 = RD_REG_DWORD(®->ctrl_status); | 543 | d2 = RD_REG_DWORD(®->ctrl_status); |
537 | barrier(); | 544 | barrier(); |
538 | } | 545 | } |
546 | if (cnt == 0 || hw_evt) | ||
547 | qla2xxx_hw_event_log(ha, HW_EVENT_RESET_ERR, | ||
548 | RD_REG_WORD(®->mailbox1), RD_REG_WORD(®->mailbox2), | ||
549 | RD_REG_WORD(®->mailbox3)); | ||
539 | 550 | ||
540 | WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); | 551 | WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); |
541 | RD_REG_DWORD(®->hccr); | 552 | RD_REG_DWORD(®->hccr); |
@@ -1243,10 +1254,10 @@ static int | |||
1243 | qla2x00_fw_ready(scsi_qla_host_t *ha) | 1254 | qla2x00_fw_ready(scsi_qla_host_t *ha) |
1244 | { | 1255 | { |
1245 | int rval; | 1256 | int rval; |
1246 | unsigned long wtime, mtime; | 1257 | unsigned long wtime, mtime, cs84xx_time; |
1247 | uint16_t min_wait; /* Minimum wait time if loop is down */ | 1258 | uint16_t min_wait; /* Minimum wait time if loop is down */ |
1248 | uint16_t wait_time; /* Wait time if loop is coming ready */ | 1259 | uint16_t wait_time; /* Wait time if loop is coming ready */ |
1249 | uint16_t fw_state; | 1260 | uint16_t state[3]; |
1250 | 1261 | ||
1251 | rval = QLA_SUCCESS; | 1262 | rval = QLA_SUCCESS; |
1252 | 1263 | ||
@@ -1275,12 +1286,34 @@ qla2x00_fw_ready(scsi_qla_host_t *ha) | |||
1275 | ha->host_no)); | 1286 | ha->host_no)); |
1276 | 1287 | ||
1277 | do { | 1288 | do { |
1278 | rval = qla2x00_get_firmware_state(ha, &fw_state); | 1289 | rval = qla2x00_get_firmware_state(ha, state); |
1279 | if (rval == QLA_SUCCESS) { | 1290 | if (rval == QLA_SUCCESS) { |
1280 | if (fw_state < FSTATE_LOSS_OF_SYNC) { | 1291 | if (state[0] < FSTATE_LOSS_OF_SYNC) { |
1281 | ha->device_flags &= ~DFLG_NO_CABLE; | 1292 | ha->device_flags &= ~DFLG_NO_CABLE; |
1282 | } | 1293 | } |
1283 | if (fw_state == FSTATE_READY) { | 1294 | if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { |
1295 | DEBUG16(printk("scsi(%ld): fw_state=%x " | ||
1296 | "84xx=%x.\n", ha->host_no, state[0], | ||
1297 | state[2])); | ||
1298 | if ((state[2] & FSTATE_LOGGED_IN) && | ||
1299 | (state[2] & FSTATE_WAITING_FOR_VERIFY)) { | ||
1300 | DEBUG16(printk("scsi(%ld): Sending " | ||
1301 | "verify iocb.\n", ha->host_no)); | ||
1302 | |||
1303 | cs84xx_time = jiffies; | ||
1304 | rval = qla84xx_init_chip(ha); | ||
1305 | if (rval != QLA_SUCCESS) | ||
1306 | break; | ||
1307 | |||
1308 | /* Add time taken to initialize. */ | ||
1309 | cs84xx_time = jiffies - cs84xx_time; | ||
1310 | wtime += cs84xx_time; | ||
1311 | mtime += cs84xx_time; | ||
1312 | DEBUG16(printk("scsi(%ld): Increasing " | ||
1313 | "wait time by %ld. New time %ld\n", | ||
1314 | ha->host_no, cs84xx_time, wtime)); | ||
1315 | } | ||
1316 | } else if (state[0] == FSTATE_READY) { | ||
1284 | DEBUG(printk("scsi(%ld): F/W Ready - OK \n", | 1317 | DEBUG(printk("scsi(%ld): F/W Ready - OK \n", |
1285 | ha->host_no)); | 1318 | ha->host_no)); |
1286 | 1319 | ||
@@ -1294,7 +1327,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha) | |||
1294 | rval = QLA_FUNCTION_FAILED; | 1327 | rval = QLA_FUNCTION_FAILED; |
1295 | 1328 | ||
1296 | if (atomic_read(&ha->loop_down_timer) && | 1329 | if (atomic_read(&ha->loop_down_timer) && |
1297 | fw_state != FSTATE_READY) { | 1330 | state[0] != FSTATE_READY) { |
1298 | /* Loop down. Timeout on min_wait for states | 1331 | /* Loop down. Timeout on min_wait for states |
1299 | * other than Wait for Login. | 1332 | * other than Wait for Login. |
1300 | */ | 1333 | */ |
@@ -1319,11 +1352,11 @@ qla2x00_fw_ready(scsi_qla_host_t *ha) | |||
1319 | msleep(500); | 1352 | msleep(500); |
1320 | 1353 | ||
1321 | DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", | 1354 | DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", |
1322 | ha->host_no, fw_state, jiffies)); | 1355 | ha->host_no, state[0], jiffies)); |
1323 | } while (1); | 1356 | } while (1); |
1324 | 1357 | ||
1325 | DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", | 1358 | DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", |
1326 | ha->host_no, fw_state, jiffies)); | 1359 | ha->host_no, state[0], jiffies)); |
1327 | 1360 | ||
1328 | if (rval) { | 1361 | if (rval) { |
1329 | DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", | 1362 | DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", |
@@ -1555,6 +1588,10 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) | |||
1555 | qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " | 1588 | qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " |
1556 | "invalid -- WWPN) defaults.\n"); | 1589 | "invalid -- WWPN) defaults.\n"); |
1557 | 1590 | ||
1591 | if (chksum) | ||
1592 | qla2xxx_hw_event_log(ha, HW_EVENT_NVRAM_CHKSUM_ERR, 0, | ||
1593 | MSW(chksum), LSW(chksum)); | ||
1594 | |||
1558 | /* | 1595 | /* |
1559 | * Set default initialization control block. | 1596 | * Set default initialization control block. |
1560 | */ | 1597 | */ |
@@ -2165,20 +2202,6 @@ cleanup_allocation: | |||
2165 | } | 2202 | } |
2166 | 2203 | ||
2167 | static void | 2204 | static void |
2168 | qla2x00_probe_for_all_luns(scsi_qla_host_t *ha) | ||
2169 | { | ||
2170 | fc_port_t *fcport; | ||
2171 | |||
2172 | qla2x00_mark_all_devices_lost(ha, 0); | ||
2173 | list_for_each_entry(fcport, &ha->fcports, list) { | ||
2174 | if (fcport->port_type != FCT_TARGET) | ||
2175 | continue; | ||
2176 | |||
2177 | qla2x00_update_fcport(ha, fcport); | ||
2178 | } | ||
2179 | } | ||
2180 | |||
2181 | static void | ||
2182 | qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) | 2205 | qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) |
2183 | { | 2206 | { |
2184 | #define LS_UNKNOWN 2 | 2207 | #define LS_UNKNOWN 2 |
@@ -2251,10 +2274,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport) | |||
2251 | if (fcport->port_type == FCT_TARGET) | 2274 | if (fcport->port_type == FCT_TARGET) |
2252 | rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; | 2275 | rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; |
2253 | fc_remote_port_rolechg(rport, rport_ids.roles); | 2276 | fc_remote_port_rolechg(rport, rport_ids.roles); |
2254 | |||
2255 | if (rport->scsi_target_id != -1 && | ||
2256 | rport->scsi_target_id < ha->host->max_id) | ||
2257 | fcport->os_target_id = rport->scsi_target_id; | ||
2258 | } | 2277 | } |
2259 | 2278 | ||
2260 | /* | 2279 | /* |
@@ -2434,7 +2453,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha) | |||
2434 | 2453 | ||
2435 | if (fcport->loop_id == FC_NO_LOOP_ID) { | 2454 | if (fcport->loop_id == FC_NO_LOOP_ID) { |
2436 | fcport->loop_id = next_loopid; | 2455 | fcport->loop_id = next_loopid; |
2437 | rval = qla2x00_find_new_loop_id(ha, fcport); | 2456 | rval = qla2x00_find_new_loop_id( |
2457 | to_qla_parent(ha), fcport); | ||
2438 | if (rval != QLA_SUCCESS) { | 2458 | if (rval != QLA_SUCCESS) { |
2439 | /* Ran out of IDs to use */ | 2459 | /* Ran out of IDs to use */ |
2440 | break; | 2460 | break; |
@@ -2459,7 +2479,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha) | |||
2459 | 2479 | ||
2460 | /* Find a new loop ID to use. */ | 2480 | /* Find a new loop ID to use. */ |
2461 | fcport->loop_id = next_loopid; | 2481 | fcport->loop_id = next_loopid; |
2462 | rval = qla2x00_find_new_loop_id(ha, fcport); | 2482 | rval = qla2x00_find_new_loop_id(to_qla_parent(ha), |
2483 | fcport); | ||
2463 | if (rval != QLA_SUCCESS) { | 2484 | if (rval != QLA_SUCCESS) { |
2464 | /* Ran out of IDs to use */ | 2485 | /* Ran out of IDs to use */ |
2465 | break; | 2486 | break; |
@@ -3193,25 +3214,6 @@ qla2x00_loop_resync(scsi_qla_host_t *ha) | |||
3193 | } | 3214 | } |
3194 | 3215 | ||
3195 | void | 3216 | void |
3196 | qla2x00_rescan_fcports(scsi_qla_host_t *ha) | ||
3197 | { | ||
3198 | int rescan_done; | ||
3199 | fc_port_t *fcport; | ||
3200 | |||
3201 | rescan_done = 0; | ||
3202 | list_for_each_entry(fcport, &ha->fcports, list) { | ||
3203 | if ((fcport->flags & FCF_RESCAN_NEEDED) == 0) | ||
3204 | continue; | ||
3205 | |||
3206 | qla2x00_update_fcport(ha, fcport); | ||
3207 | fcport->flags &= ~FCF_RESCAN_NEEDED; | ||
3208 | |||
3209 | rescan_done = 1; | ||
3210 | } | ||
3211 | qla2x00_probe_for_all_luns(ha); | ||
3212 | } | ||
3213 | |||
3214 | void | ||
3215 | qla2x00_update_fcports(scsi_qla_host_t *ha) | 3217 | qla2x00_update_fcports(scsi_qla_host_t *ha) |
3216 | { | 3218 | { |
3217 | fc_port_t *fcport; | 3219 | fc_port_t *fcport; |
@@ -4044,16 +4046,16 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha) | |||
4044 | if (!ha->parent) | 4046 | if (!ha->parent) |
4045 | return -EINVAL; | 4047 | return -EINVAL; |
4046 | 4048 | ||
4047 | rval = qla2x00_fw_ready(ha); | 4049 | rval = qla2x00_fw_ready(ha->parent); |
4048 | if (rval == QLA_SUCCESS) { | 4050 | if (rval == QLA_SUCCESS) { |
4049 | clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); | 4051 | clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); |
4050 | qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); | 4052 | qla2x00_marker(ha->parent, 0, 0, MK_SYNC_ALL); |
4051 | } | 4053 | } |
4052 | 4054 | ||
4053 | ha->flags.management_server_logged_in = 0; | 4055 | ha->flags.management_server_logged_in = 0; |
4054 | 4056 | ||
4055 | /* Login to SNS first */ | 4057 | /* Login to SNS first */ |
4056 | qla24xx_login_fabric(ha, NPH_SNS, 0xff, 0xff, 0xfc, | 4058 | qla24xx_login_fabric(ha->parent, NPH_SNS, 0xff, 0xff, 0xfc, |
4057 | mb, BIT_1); | 4059 | mb, BIT_1); |
4058 | if (mb[0] != MBS_COMMAND_COMPLETE) { | 4060 | if (mb[0] != MBS_COMMAND_COMPLETE) { |
4059 | DEBUG15(qla_printk(KERN_INFO, ha, | 4061 | DEBUG15(qla_printk(KERN_INFO, ha, |
@@ -4067,7 +4069,77 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha) | |||
4067 | atomic_set(&ha->loop_state, LOOP_UP); | 4069 | atomic_set(&ha->loop_state, LOOP_UP); |
4068 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); | 4070 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); |
4069 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); | 4071 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); |
4070 | rval = qla2x00_loop_resync(ha); | 4072 | rval = qla2x00_loop_resync(ha->parent); |
4071 | 4073 | ||
4072 | return rval; | 4074 | return rval; |
4073 | } | 4075 | } |
4076 | |||
4077 | /* 84XX Support **************************************************************/ | ||
4078 | |||
4079 | static LIST_HEAD(qla_cs84xx_list); | ||
4080 | static DEFINE_MUTEX(qla_cs84xx_mutex); | ||
4081 | |||
4082 | static struct qla_chip_state_84xx * | ||
4083 | qla84xx_get_chip(struct scsi_qla_host *ha) | ||
4084 | { | ||
4085 | struct qla_chip_state_84xx *cs84xx; | ||
4086 | |||
4087 | mutex_lock(&qla_cs84xx_mutex); | ||
4088 | |||
4089 | /* Find any shared 84xx chip. */ | ||
4090 | list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { | ||
4091 | if (cs84xx->bus == ha->pdev->bus) { | ||
4092 | kref_get(&cs84xx->kref); | ||
4093 | goto done; | ||
4094 | } | ||
4095 | } | ||
4096 | |||
4097 | cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); | ||
4098 | if (!cs84xx) | ||
4099 | goto done; | ||
4100 | |||
4101 | kref_init(&cs84xx->kref); | ||
4102 | spin_lock_init(&cs84xx->access_lock); | ||
4103 | mutex_init(&cs84xx->fw_update_mutex); | ||
4104 | cs84xx->bus = ha->pdev->bus; | ||
4105 | |||
4106 | list_add_tail(&cs84xx->list, &qla_cs84xx_list); | ||
4107 | done: | ||
4108 | mutex_unlock(&qla_cs84xx_mutex); | ||
4109 | return cs84xx; | ||
4110 | } | ||
4111 | |||
4112 | static void | ||
4113 | __qla84xx_chip_release(struct kref *kref) | ||
4114 | { | ||
4115 | struct qla_chip_state_84xx *cs84xx = | ||
4116 | container_of(kref, struct qla_chip_state_84xx, kref); | ||
4117 | |||
4118 | mutex_lock(&qla_cs84xx_mutex); | ||
4119 | list_del(&cs84xx->list); | ||
4120 | mutex_unlock(&qla_cs84xx_mutex); | ||
4121 | kfree(cs84xx); | ||
4122 | } | ||
4123 | |||
4124 | void | ||
4125 | qla84xx_put_chip(struct scsi_qla_host *ha) | ||
4126 | { | ||
4127 | if (ha->cs84xx) | ||
4128 | kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); | ||
4129 | } | ||
4130 | |||
4131 | static int | ||
4132 | qla84xx_init_chip(scsi_qla_host_t *ha) | ||
4133 | { | ||
4134 | int rval; | ||
4135 | uint16_t status[2]; | ||
4136 | |||
4137 | mutex_lock(&ha->cs84xx->fw_update_mutex); | ||
4138 | |||
4139 | rval = qla84xx_verify_chip(ha, status); | ||
4140 | |||
4141 | mutex_unlock(&ha->cs84xx->fw_update_mutex); | ||
4142 | |||
4143 | return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: | ||
4144 | QLA_SUCCESS; | ||
4145 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 5d1a3f7c408f..e9bae27737d1 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | static __inline__ uint16_t qla2x00_debounce_register(volatile uint16_t __iomem *); | ||
9 | /* | 8 | /* |
10 | * qla2x00_debounce_register | 9 | * qla2x00_debounce_register |
11 | * Debounce register. | 10 | * Debounce register. |
@@ -32,94 +31,12 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr) | |||
32 | return (first); | 31 | return (first); |
33 | } | 32 | } |
34 | 33 | ||
35 | static __inline__ int qla2x00_normalize_dma_addr( | ||
36 | dma_addr_t *e_addr, uint32_t *e_len, | ||
37 | dma_addr_t *ne_addr, uint32_t *ne_len); | ||
38 | |||
39 | /** | ||
40 | * qla2x00_normalize_dma_addr() - Normalize an DMA address. | ||
41 | * @e_addr: Raw DMA address | ||
42 | * @e_len: Raw DMA length | ||
43 | * @ne_addr: Normalized second DMA address | ||
44 | * @ne_len: Normalized second DMA length | ||
45 | * | ||
46 | * If the address does not span a 4GB page boundary, the contents of @ne_addr | ||
47 | * and @ne_len are undefined. @e_len is updated to reflect a normalization. | ||
48 | * | ||
49 | * Example: | ||
50 | * | ||
51 | * ffffabc0ffffeeee (e_addr) start of DMA address | ||
52 | * 0000000020000000 (e_len) length of DMA transfer | ||
53 | * ffffabc11fffeeed end of DMA transfer | ||
54 | * | ||
55 | * Is the 4GB boundary crossed? | ||
56 | * | ||
57 | * ffffabc0ffffeeee (e_addr) | ||
58 | * ffffabc11fffeeed (e_addr + e_len - 1) | ||
59 | * 00000001e0000003 ((e_addr ^ (e_addr + e_len - 1)) | ||
60 | * 0000000100000000 ((e_addr ^ (e_addr + e_len - 1)) & ~(0xffffffff) | ||
61 | * | ||
62 | * Compute start of second DMA segment: | ||
63 | * | ||
64 | * ffffabc0ffffeeee (e_addr) | ||
65 | * ffffabc1ffffeeee (0x100000000 + e_addr) | ||
66 | * ffffabc100000000 (0x100000000 + e_addr) & ~(0xffffffff) | ||
67 | * ffffabc100000000 (ne_addr) | ||
68 | * | ||
69 | * Compute length of second DMA segment: | ||
70 | * | ||
71 | * 00000000ffffeeee (e_addr & 0xffffffff) | ||
72 | * 0000000000001112 (0x100000000 - (e_addr & 0xffffffff)) | ||
73 | * 000000001fffeeee (e_len - (0x100000000 - (e_addr & 0xffffffff)) | ||
74 | * 000000001fffeeee (ne_len) | ||
75 | * | ||
76 | * Adjust length of first DMA segment | ||
77 | * | ||
78 | * 0000000020000000 (e_len) | ||
79 | * 0000000000001112 (e_len - ne_len) | ||
80 | * 0000000000001112 (e_len) | ||
81 | * | ||
82 | * Returns non-zero if the specified address was normalized, else zero. | ||
83 | */ | ||
84 | static __inline__ int | ||
85 | qla2x00_normalize_dma_addr( | ||
86 | dma_addr_t *e_addr, uint32_t *e_len, | ||
87 | dma_addr_t *ne_addr, uint32_t *ne_len) | ||
88 | { | ||
89 | int normalized; | ||
90 | |||
91 | normalized = 0; | ||
92 | if ((*e_addr ^ (*e_addr + *e_len - 1)) & ~(0xFFFFFFFFULL)) { | ||
93 | /* Compute normalized crossed address and len */ | ||
94 | *ne_addr = (0x100000000ULL + *e_addr) & ~(0xFFFFFFFFULL); | ||
95 | *ne_len = *e_len - (0x100000000ULL - (*e_addr & 0xFFFFFFFFULL)); | ||
96 | *e_len -= *ne_len; | ||
97 | |||
98 | normalized++; | ||
99 | } | ||
100 | return (normalized); | ||
101 | } | ||
102 | |||
103 | static __inline__ void qla2x00_poll(scsi_qla_host_t *); | ||
104 | static inline void | 34 | static inline void |
105 | qla2x00_poll(scsi_qla_host_t *ha) | 35 | qla2x00_poll(scsi_qla_host_t *ha) |
106 | { | 36 | { |
107 | ha->isp_ops->intr_handler(0, ha); | 37 | ha->isp_ops->intr_handler(0, ha); |
108 | } | 38 | } |
109 | 39 | ||
110 | static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *); | ||
111 | /* | ||
112 | * This routine will wait for fabric devices for | ||
113 | * the reset delay. | ||
114 | */ | ||
115 | static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *ha) | ||
116 | { | ||
117 | uint16_t fw_state; | ||
118 | |||
119 | qla2x00_get_firmware_state(ha, &fw_state); | ||
120 | } | ||
121 | |||
122 | static __inline__ scsi_qla_host_t * to_qla_parent(scsi_qla_host_t *); | ||
123 | static __inline__ scsi_qla_host_t * | 40 | static __inline__ scsi_qla_host_t * |
124 | to_qla_parent(scsi_qla_host_t *ha) | 41 | to_qla_parent(scsi_qla_host_t *ha) |
125 | { | 42 | { |
@@ -152,7 +69,6 @@ qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) | |||
152 | return (QLA_SUCCESS); | 69 | return (QLA_SUCCESS); |
153 | } | 70 | } |
154 | 71 | ||
155 | static inline uint8_t *host_to_fcp_swap(uint8_t *, uint32_t); | ||
156 | static inline uint8_t * | 72 | static inline uint8_t * |
157 | host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) | 73 | host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) |
158 | { | 74 | { |
@@ -166,7 +82,6 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) | |||
166 | return fcp; | 82 | return fcp; |
167 | } | 83 | } |
168 | 84 | ||
169 | static inline int qla2x00_is_reserved_id(scsi_qla_host_t *, uint16_t); | ||
170 | static inline int | 85 | static inline int |
171 | qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) | 86 | qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) |
172 | { | 87 | { |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 024c662ec34d..5489d5024673 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -11,9 +11,6 @@ | |||
11 | 11 | ||
12 | #include <scsi/scsi_tcq.h> | 12 | #include <scsi/scsi_tcq.h> |
13 | 13 | ||
14 | static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd); | ||
15 | static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *); | ||
16 | static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *); | ||
17 | static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); | 14 | static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); |
18 | static void qla2x00_isp_cmd(scsi_qla_host_t *ha); | 15 | static void qla2x00_isp_cmd(scsi_qla_host_t *ha); |
19 | 16 | ||
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index f0337036c7bb..285479b62d8f 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -14,9 +14,6 @@ static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); | |||
14 | static void qla2x00_status_entry(scsi_qla_host_t *, void *); | 14 | static void qla2x00_status_entry(scsi_qla_host_t *, void *); |
15 | static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); | 15 | static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); |
16 | static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); | 16 | static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); |
17 | static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *); | ||
18 | |||
19 | static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *); | ||
20 | 17 | ||
21 | /** | 18 | /** |
22 | * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. | 19 | * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. |
@@ -33,7 +30,6 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
33 | scsi_qla_host_t *ha; | 30 | scsi_qla_host_t *ha; |
34 | struct device_reg_2xxx __iomem *reg; | 31 | struct device_reg_2xxx __iomem *reg; |
35 | int status; | 32 | int status; |
36 | unsigned long flags; | ||
37 | unsigned long iter; | 33 | unsigned long iter; |
38 | uint16_t hccr; | 34 | uint16_t hccr; |
39 | uint16_t mb[4]; | 35 | uint16_t mb[4]; |
@@ -48,7 +44,7 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
48 | reg = &ha->iobase->isp; | 44 | reg = &ha->iobase->isp; |
49 | status = 0; | 45 | status = 0; |
50 | 46 | ||
51 | spin_lock_irqsave(&ha->hardware_lock, flags); | 47 | spin_lock(&ha->hardware_lock); |
52 | for (iter = 50; iter--; ) { | 48 | for (iter = 50; iter--; ) { |
53 | hccr = RD_REG_WORD(®->hccr); | 49 | hccr = RD_REG_WORD(®->hccr); |
54 | if (hccr & HCCR_RISC_PAUSE) { | 50 | if (hccr & HCCR_RISC_PAUSE) { |
@@ -99,7 +95,7 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
99 | RD_REG_WORD(®->hccr); | 95 | RD_REG_WORD(®->hccr); |
100 | } | 96 | } |
101 | } | 97 | } |
102 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 98 | spin_unlock(&ha->hardware_lock); |
103 | 99 | ||
104 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 100 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
105 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 101 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
@@ -125,7 +121,6 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
125 | scsi_qla_host_t *ha; | 121 | scsi_qla_host_t *ha; |
126 | struct device_reg_2xxx __iomem *reg; | 122 | struct device_reg_2xxx __iomem *reg; |
127 | int status; | 123 | int status; |
128 | unsigned long flags; | ||
129 | unsigned long iter; | 124 | unsigned long iter; |
130 | uint32_t stat; | 125 | uint32_t stat; |
131 | uint16_t hccr; | 126 | uint16_t hccr; |
@@ -141,7 +136,7 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
141 | reg = &ha->iobase->isp; | 136 | reg = &ha->iobase->isp; |
142 | status = 0; | 137 | status = 0; |
143 | 138 | ||
144 | spin_lock_irqsave(&ha->hardware_lock, flags); | 139 | spin_lock(&ha->hardware_lock); |
145 | for (iter = 50; iter--; ) { | 140 | for (iter = 50; iter--; ) { |
146 | stat = RD_REG_DWORD(®->u.isp2300.host_status); | 141 | stat = RD_REG_DWORD(®->u.isp2300.host_status); |
147 | if (stat & HSR_RISC_PAUSED) { | 142 | if (stat & HSR_RISC_PAUSED) { |
@@ -211,7 +206,7 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
211 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); | 206 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); |
212 | RD_REG_WORD_RELAXED(®->hccr); | 207 | RD_REG_WORD_RELAXED(®->hccr); |
213 | } | 208 | } |
214 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 209 | spin_unlock(&ha->hardware_lock); |
215 | 210 | ||
216 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 211 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
217 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 212 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
@@ -276,6 +271,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
276 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 271 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
277 | uint32_t rscn_entry, host_pid; | 272 | uint32_t rscn_entry, host_pid; |
278 | uint8_t rscn_queue_index; | 273 | uint8_t rscn_queue_index; |
274 | unsigned long flags; | ||
275 | scsi_qla_host_t *vha; | ||
276 | int i; | ||
279 | 277 | ||
280 | /* Setup to process RIO completion. */ | 278 | /* Setup to process RIO completion. */ |
281 | handle_cnt = 0; | 279 | handle_cnt = 0; |
@@ -351,6 +349,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
351 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", | 349 | "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", |
352 | mb[1], mb[2], mb[3]); | 350 | mb[1], mb[2], mb[3]); |
353 | 351 | ||
352 | qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); | ||
354 | ha->isp_ops->fw_dump(ha, 1); | 353 | ha->isp_ops->fw_dump(ha, 1); |
355 | 354 | ||
356 | if (IS_FWI2_CAPABLE(ha)) { | 355 | if (IS_FWI2_CAPABLE(ha)) { |
@@ -375,6 +374,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
375 | ha->host_no)); | 374 | ha->host_no)); |
376 | qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); | 375 | qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); |
377 | 376 | ||
377 | qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); | ||
378 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 378 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); |
379 | break; | 379 | break; |
380 | 380 | ||
@@ -383,6 +383,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
383 | ha->host_no)); | 383 | ha->host_no)); |
384 | qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); | 384 | qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); |
385 | 385 | ||
386 | qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); | ||
386 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 387 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); |
387 | break; | 388 | break; |
388 | 389 | ||
@@ -410,6 +411,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
410 | set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); | 411 | set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); |
411 | 412 | ||
412 | ha->flags.management_server_logged_in = 0; | 413 | ha->flags.management_server_logged_in = 0; |
414 | qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]); | ||
413 | break; | 415 | break; |
414 | 416 | ||
415 | case MBA_LOOP_UP: /* Loop Up Event */ | 417 | case MBA_LOOP_UP: /* Loop Up Event */ |
@@ -429,12 +431,14 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
429 | link_speed); | 431 | link_speed); |
430 | 432 | ||
431 | ha->flags.management_server_logged_in = 0; | 433 | ha->flags.management_server_logged_in = 0; |
434 | qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate); | ||
432 | break; | 435 | break; |
433 | 436 | ||
434 | case MBA_LOOP_DOWN: /* Loop Down Event */ | 437 | case MBA_LOOP_DOWN: /* Loop Down Event */ |
435 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n", | 438 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " |
436 | ha->host_no, mb[1])); | 439 | "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3])); |
437 | qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]); | 440 | qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", |
441 | mb[1], mb[2], mb[3]); | ||
438 | 442 | ||
439 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 443 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { |
440 | atomic_set(&ha->loop_state, LOOP_DOWN); | 444 | atomic_set(&ha->loop_state, LOOP_DOWN); |
@@ -452,6 +456,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
452 | ha->link_data_rate = PORT_SPEED_UNKNOWN; | 456 | ha->link_data_rate = PORT_SPEED_UNKNOWN; |
453 | if (ql2xfdmienable) | 457 | if (ql2xfdmienable) |
454 | set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); | 458 | set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); |
459 | qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0); | ||
455 | break; | 460 | break; |
456 | 461 | ||
457 | case MBA_LIP_RESET: /* LIP reset occurred */ | 462 | case MBA_LIP_RESET: /* LIP reset occurred */ |
@@ -475,6 +480,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
475 | 480 | ||
476 | ha->operating_mode = LOOP; | 481 | ha->operating_mode = LOOP; |
477 | ha->flags.management_server_logged_in = 0; | 482 | ha->flags.management_server_logged_in = 0; |
483 | qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]); | ||
478 | break; | 484 | break; |
479 | 485 | ||
480 | case MBA_POINT_TO_POINT: /* Point-to-Point */ | 486 | case MBA_POINT_TO_POINT: /* Point-to-Point */ |
@@ -538,6 +544,18 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
538 | break; | 544 | break; |
539 | 545 | ||
540 | case MBA_PORT_UPDATE: /* Port database update */ | 546 | case MBA_PORT_UPDATE: /* Port database update */ |
547 | if ((ha->flags.npiv_supported) && (ha->num_vhosts)) { | ||
548 | for_each_mapped_vp_idx(ha, i) { | ||
549 | list_for_each_entry(vha, &ha->vp_list, | ||
550 | vp_list) { | ||
551 | if ((mb[3] & 0xff) | ||
552 | == vha->vp_idx) { | ||
553 | ha = vha; | ||
554 | break; | ||
555 | } | ||
556 | } | ||
557 | } | ||
558 | } | ||
541 | /* | 559 | /* |
542 | * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET | 560 | * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET |
543 | * event etc. earlier indicating loop is down) then process | 561 | * event etc. earlier indicating loop is down) then process |
@@ -572,12 +590,18 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
572 | break; | 590 | break; |
573 | 591 | ||
574 | case MBA_RSCN_UPDATE: /* State Change Registration */ | 592 | case MBA_RSCN_UPDATE: /* State Change Registration */ |
575 | /* Check if the Vport has issued a SCR */ | 593 | if ((ha->flags.npiv_supported) && (ha->num_vhosts)) { |
576 | if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags)) | 594 | for_each_mapped_vp_idx(ha, i) { |
577 | break; | 595 | list_for_each_entry(vha, &ha->vp_list, |
578 | /* Only handle SCNs for our Vport index. */ | 596 | vp_list) { |
579 | if (ha->flags.npiv_supported && ha->vp_idx != mb[3]) | 597 | if ((mb[3] & 0xff) |
580 | break; | 598 | == vha->vp_idx) { |
599 | ha = vha; | ||
600 | break; | ||
601 | } | ||
602 | } | ||
603 | } | ||
604 | } | ||
581 | 605 | ||
582 | DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", | 606 | DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", |
583 | ha->host_no)); | 607 | ha->host_no)); |
@@ -612,6 +636,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
612 | 636 | ||
613 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); | 637 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); |
614 | set_bit(RSCN_UPDATE, &ha->dpc_flags); | 638 | set_bit(RSCN_UPDATE, &ha->dpc_flags); |
639 | qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry); | ||
615 | break; | 640 | break; |
616 | 641 | ||
617 | /* case MBA_RIO_RESPONSE: */ | 642 | /* case MBA_RIO_RESPONSE: */ |
@@ -637,6 +662,42 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
637 | DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", | 662 | DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", |
638 | ha->host_no, mb[1], mb[2])); | 663 | ha->host_no, mb[1], mb[2])); |
639 | break; | 664 | break; |
665 | |||
666 | case MBA_ISP84XX_ALERT: | ||
667 | DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " | ||
668 | "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3])); | ||
669 | |||
670 | spin_lock_irqsave(&ha->cs84xx->access_lock, flags); | ||
671 | switch (mb[1]) { | ||
672 | case A84_PANIC_RECOVERY: | ||
673 | qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery " | ||
674 | "%04x %04x\n", mb[2], mb[3]); | ||
675 | break; | ||
676 | case A84_OP_LOGIN_COMPLETE: | ||
677 | ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; | ||
678 | DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" | ||
679 | "firmware version %x\n", ha->cs84xx->op_fw_version)); | ||
680 | break; | ||
681 | case A84_DIAG_LOGIN_COMPLETE: | ||
682 | ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; | ||
683 | DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" | ||
684 | "diagnostic firmware version %x\n", | ||
685 | ha->cs84xx->diag_fw_version)); | ||
686 | break; | ||
687 | case A84_GOLD_LOGIN_COMPLETE: | ||
688 | ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; | ||
689 | ha->cs84xx->fw_update = 1; | ||
690 | DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold " | ||
691 | "firmware version %x\n", | ||
692 | ha->cs84xx->gold_fw_version)); | ||
693 | break; | ||
694 | default: | ||
695 | qla_printk(KERN_ERR, ha, | ||
696 | "Alert 84xx: Invalid Alert %04x %04x %04x\n", | ||
697 | mb[1], mb[2], mb[3]); | ||
698 | } | ||
699 | spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); | ||
700 | break; | ||
640 | } | 701 | } |
641 | 702 | ||
642 | if (!ha->parent && ha->num_vhosts) | 703 | if (!ha->parent && ha->num_vhosts) |
@@ -803,9 +864,6 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha) | |||
803 | case STATUS_CONT_TYPE: | 864 | case STATUS_CONT_TYPE: |
804 | qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); | 865 | qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); |
805 | break; | 866 | break; |
806 | case MS_IOCB_TYPE: | ||
807 | qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt); | ||
808 | break; | ||
809 | default: | 867 | default: |
810 | /* Type Not Supported. */ | 868 | /* Type Not Supported. */ |
811 | DEBUG4(printk(KERN_WARNING | 869 | DEBUG4(printk(KERN_WARNING |
@@ -1340,44 +1398,6 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) | |||
1340 | } | 1398 | } |
1341 | 1399 | ||
1342 | /** | 1400 | /** |
1343 | * qla2x00_ms_entry() - Process a Management Server entry. | ||
1344 | * @ha: SCSI driver HA context | ||
1345 | * @index: Response queue out pointer | ||
1346 | */ | ||
1347 | static void | ||
1348 | qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt) | ||
1349 | { | ||
1350 | srb_t *sp; | ||
1351 | |||
1352 | DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n", | ||
1353 | __func__, ha->host_no, pkt, pkt->handle1)); | ||
1354 | |||
1355 | /* Validate handle. */ | ||
1356 | if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS) | ||
1357 | sp = ha->outstanding_cmds[pkt->handle1]; | ||
1358 | else | ||
1359 | sp = NULL; | ||
1360 | |||
1361 | if (sp == NULL) { | ||
1362 | DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n", | ||
1363 | ha->host_no)); | ||
1364 | qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n"); | ||
1365 | |||
1366 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | ||
1367 | return; | ||
1368 | } | ||
1369 | |||
1370 | CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status); | ||
1371 | CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status; | ||
1372 | |||
1373 | /* Free outstanding command slot. */ | ||
1374 | ha->outstanding_cmds[pkt->handle1] = NULL; | ||
1375 | |||
1376 | qla2x00_sp_compl(ha, sp); | ||
1377 | } | ||
1378 | |||
1379 | |||
1380 | /** | ||
1381 | * qla24xx_mbx_completion() - Process mailbox command completions. | 1401 | * qla24xx_mbx_completion() - Process mailbox command completions. |
1382 | * @ha: SCSI driver HA context | 1402 | * @ha: SCSI driver HA context |
1383 | * @mb0: Mailbox0 register | 1403 | * @mb0: Mailbox0 register |
@@ -1449,9 +1469,6 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha) | |||
1449 | case STATUS_CONT_TYPE: | 1469 | case STATUS_CONT_TYPE: |
1450 | qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); | 1470 | qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); |
1451 | break; | 1471 | break; |
1452 | case MS_IOCB_TYPE: | ||
1453 | qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt); | ||
1454 | break; | ||
1455 | case VP_RPT_ID_IOCB_TYPE: | 1472 | case VP_RPT_ID_IOCB_TYPE: |
1456 | qla24xx_report_id_acquisition(ha, | 1473 | qla24xx_report_id_acquisition(ha, |
1457 | (struct vp_rpt_id_entry_24xx *)pkt); | 1474 | (struct vp_rpt_id_entry_24xx *)pkt); |
@@ -1533,7 +1550,6 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1533 | scsi_qla_host_t *ha; | 1550 | scsi_qla_host_t *ha; |
1534 | struct device_reg_24xx __iomem *reg; | 1551 | struct device_reg_24xx __iomem *reg; |
1535 | int status; | 1552 | int status; |
1536 | unsigned long flags; | ||
1537 | unsigned long iter; | 1553 | unsigned long iter; |
1538 | uint32_t stat; | 1554 | uint32_t stat; |
1539 | uint32_t hccr; | 1555 | uint32_t hccr; |
@@ -1549,13 +1565,19 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1549 | reg = &ha->iobase->isp24; | 1565 | reg = &ha->iobase->isp24; |
1550 | status = 0; | 1566 | status = 0; |
1551 | 1567 | ||
1552 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1568 | spin_lock(&ha->hardware_lock); |
1553 | for (iter = 50; iter--; ) { | 1569 | for (iter = 50; iter--; ) { |
1554 | stat = RD_REG_DWORD(®->host_status); | 1570 | stat = RD_REG_DWORD(®->host_status); |
1555 | if (stat & HSRX_RISC_PAUSED) { | 1571 | if (stat & HSRX_RISC_PAUSED) { |
1556 | if (pci_channel_offline(ha->pdev)) | 1572 | if (pci_channel_offline(ha->pdev)) |
1557 | break; | 1573 | break; |
1558 | 1574 | ||
1575 | if (ha->hw_event_pause_errors == 0) | ||
1576 | qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, | ||
1577 | 0, MSW(stat), LSW(stat)); | ||
1578 | else if (ha->hw_event_pause_errors < 0xffffffff) | ||
1579 | ha->hw_event_pause_errors++; | ||
1580 | |||
1559 | hccr = RD_REG_DWORD(®->hccr); | 1581 | hccr = RD_REG_DWORD(®->hccr); |
1560 | 1582 | ||
1561 | qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " | 1583 | qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " |
@@ -1597,7 +1619,7 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1597 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1619 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
1598 | RD_REG_DWORD_RELAXED(®->hccr); | 1620 | RD_REG_DWORD_RELAXED(®->hccr); |
1599 | } | 1621 | } |
1600 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1622 | spin_unlock(&ha->hardware_lock); |
1601 | 1623 | ||
1602 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 1624 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
1603 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 1625 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
@@ -1608,66 +1630,21 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1608 | return IRQ_HANDLED; | 1630 | return IRQ_HANDLED; |
1609 | } | 1631 | } |
1610 | 1632 | ||
1611 | /** | ||
1612 | * qla24xx_ms_entry() - Process a Management Server entry. | ||
1613 | * @ha: SCSI driver HA context | ||
1614 | * @index: Response queue out pointer | ||
1615 | */ | ||
1616 | static void | ||
1617 | qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt) | ||
1618 | { | ||
1619 | srb_t *sp; | ||
1620 | |||
1621 | DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n", | ||
1622 | __func__, ha->host_no, pkt, pkt->handle)); | ||
1623 | |||
1624 | DEBUG9(printk("%s: ct pkt dump:\n", __func__)); | ||
1625 | DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx))); | ||
1626 | |||
1627 | /* Validate handle. */ | ||
1628 | if (pkt->handle < MAX_OUTSTANDING_COMMANDS) | ||
1629 | sp = ha->outstanding_cmds[pkt->handle]; | ||
1630 | else | ||
1631 | sp = NULL; | ||
1632 | |||
1633 | if (sp == NULL) { | ||
1634 | DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n", | ||
1635 | ha->host_no)); | ||
1636 | DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n", | ||
1637 | ha->host_no)); | ||
1638 | qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n", | ||
1639 | pkt->handle); | ||
1640 | |||
1641 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | ||
1642 | return; | ||
1643 | } | ||
1644 | |||
1645 | CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status); | ||
1646 | CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status; | ||
1647 | |||
1648 | /* Free outstanding command slot. */ | ||
1649 | ha->outstanding_cmds[pkt->handle] = NULL; | ||
1650 | |||
1651 | qla2x00_sp_compl(ha, sp); | ||
1652 | } | ||
1653 | |||
1654 | static irqreturn_t | 1633 | static irqreturn_t |
1655 | qla24xx_msix_rsp_q(int irq, void *dev_id) | 1634 | qla24xx_msix_rsp_q(int irq, void *dev_id) |
1656 | { | 1635 | { |
1657 | scsi_qla_host_t *ha; | 1636 | scsi_qla_host_t *ha; |
1658 | struct device_reg_24xx __iomem *reg; | 1637 | struct device_reg_24xx __iomem *reg; |
1659 | unsigned long flags; | ||
1660 | 1638 | ||
1661 | ha = dev_id; | 1639 | ha = dev_id; |
1662 | reg = &ha->iobase->isp24; | 1640 | reg = &ha->iobase->isp24; |
1663 | 1641 | ||
1664 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1642 | spin_lock(&ha->hardware_lock); |
1665 | 1643 | ||
1666 | qla24xx_process_response_queue(ha); | 1644 | qla24xx_process_response_queue(ha); |
1667 | |||
1668 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1645 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
1669 | 1646 | ||
1670 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1647 | spin_unlock(&ha->hardware_lock); |
1671 | 1648 | ||
1672 | return IRQ_HANDLED; | 1649 | return IRQ_HANDLED; |
1673 | } | 1650 | } |
@@ -1678,7 +1655,6 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1678 | scsi_qla_host_t *ha; | 1655 | scsi_qla_host_t *ha; |
1679 | struct device_reg_24xx __iomem *reg; | 1656 | struct device_reg_24xx __iomem *reg; |
1680 | int status; | 1657 | int status; |
1681 | unsigned long flags; | ||
1682 | uint32_t stat; | 1658 | uint32_t stat; |
1683 | uint32_t hccr; | 1659 | uint32_t hccr; |
1684 | uint16_t mb[4]; | 1660 | uint16_t mb[4]; |
@@ -1687,13 +1663,19 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1687 | reg = &ha->iobase->isp24; | 1663 | reg = &ha->iobase->isp24; |
1688 | status = 0; | 1664 | status = 0; |
1689 | 1665 | ||
1690 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1666 | spin_lock(&ha->hardware_lock); |
1691 | do { | 1667 | do { |
1692 | stat = RD_REG_DWORD(®->host_status); | 1668 | stat = RD_REG_DWORD(®->host_status); |
1693 | if (stat & HSRX_RISC_PAUSED) { | 1669 | if (stat & HSRX_RISC_PAUSED) { |
1694 | if (pci_channel_offline(ha->pdev)) | 1670 | if (pci_channel_offline(ha->pdev)) |
1695 | break; | 1671 | break; |
1696 | 1672 | ||
1673 | if (ha->hw_event_pause_errors == 0) | ||
1674 | qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, | ||
1675 | 0, MSW(stat), LSW(stat)); | ||
1676 | else if (ha->hw_event_pause_errors < 0xffffffff) | ||
1677 | ha->hw_event_pause_errors++; | ||
1678 | |||
1697 | hccr = RD_REG_DWORD(®->hccr); | 1679 | hccr = RD_REG_DWORD(®->hccr); |
1698 | 1680 | ||
1699 | qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " | 1681 | qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " |
@@ -1734,7 +1716,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1734 | } | 1716 | } |
1735 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1717 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
1736 | } while (0); | 1718 | } while (0); |
1737 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1719 | spin_unlock(&ha->hardware_lock); |
1738 | 1720 | ||
1739 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 1721 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
1740 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 1722 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
@@ -1821,10 +1803,9 @@ qla2x00_request_irqs(scsi_qla_host_t *ha) | |||
1821 | { | 1803 | { |
1822 | int ret; | 1804 | int ret; |
1823 | device_reg_t __iomem *reg = ha->iobase; | 1805 | device_reg_t __iomem *reg = ha->iobase; |
1824 | unsigned long flags; | ||
1825 | 1806 | ||
1826 | /* If possible, enable MSI-X. */ | 1807 | /* If possible, enable MSI-X. */ |
1827 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha)) | 1808 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) |
1828 | goto skip_msix; | 1809 | goto skip_msix; |
1829 | 1810 | ||
1830 | if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || | 1811 | if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || |
@@ -1859,7 +1840,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha) | |||
1859 | "MSI-X: Falling back-to INTa mode -- %d.\n", ret); | 1840 | "MSI-X: Falling back-to INTa mode -- %d.\n", ret); |
1860 | skip_msix: | 1841 | skip_msix: |
1861 | 1842 | ||
1862 | if (!IS_QLA24XX(ha) && !IS_QLA2532(ha)) | 1843 | if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) |
1863 | goto skip_msi; | 1844 | goto skip_msi; |
1864 | 1845 | ||
1865 | ret = pci_enable_msi(ha->pdev); | 1846 | ret = pci_enable_msi(ha->pdev); |
@@ -1882,7 +1863,7 @@ skip_msi: | |||
1882 | clear_risc_ints: | 1863 | clear_risc_ints: |
1883 | 1864 | ||
1884 | ha->isp_ops->disable_intrs(ha); | 1865 | ha->isp_ops->disable_intrs(ha); |
1885 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1866 | spin_lock_irq(&ha->hardware_lock); |
1886 | if (IS_FWI2_CAPABLE(ha)) { | 1867 | if (IS_FWI2_CAPABLE(ha)) { |
1887 | WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); | 1868 | WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); |
1888 | WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); | 1869 | WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); |
@@ -1891,7 +1872,7 @@ clear_risc_ints: | |||
1891 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); | 1872 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); |
1892 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); | 1873 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); |
1893 | } | 1874 | } |
1894 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1875 | spin_unlock_irq(&ha->hardware_lock); |
1895 | ha->isp_ops->enable_intrs(ha); | 1876 | ha->isp_ops->enable_intrs(ha); |
1896 | 1877 | ||
1897 | fail: | 1878 | fail: |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index bb103580e1ba..7d0a8a4c7719 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -310,7 +310,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr, | |||
310 | } | 310 | } |
311 | 311 | ||
312 | mcp->in_mb = MBX_0; | 312 | mcp->in_mb = MBX_0; |
313 | mcp->tov = 30; | 313 | mcp->tov = MBX_TOV_SECONDS; |
314 | mcp->flags = 0; | 314 | mcp->flags = 0; |
315 | rval = qla2x00_mailbox_command(ha, mcp); | 315 | rval = qla2x00_mailbox_command(ha, mcp); |
316 | 316 | ||
@@ -367,7 +367,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr) | |||
367 | } | 367 | } |
368 | } | 368 | } |
369 | 369 | ||
370 | mcp->tov = 30; | 370 | mcp->tov = MBX_TOV_SECONDS; |
371 | mcp->flags = 0; | 371 | mcp->flags = 0; |
372 | rval = qla2x00_mailbox_command(ha, mcp); | 372 | rval = qla2x00_mailbox_command(ha, mcp); |
373 | 373 | ||
@@ -417,7 +417,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor, | |||
417 | mcp->out_mb = MBX_0; | 417 | mcp->out_mb = MBX_0; |
418 | mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 418 | mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
419 | mcp->flags = 0; | 419 | mcp->flags = 0; |
420 | mcp->tov = 30; | 420 | mcp->tov = MBX_TOV_SECONDS; |
421 | rval = qla2x00_mailbox_command(ha, mcp); | 421 | rval = qla2x00_mailbox_command(ha, mcp); |
422 | 422 | ||
423 | /* Return mailbox data. */ | 423 | /* Return mailbox data. */ |
@@ -466,7 +466,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts) | |||
466 | mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; | 466 | mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; |
467 | mcp->out_mb = MBX_0; | 467 | mcp->out_mb = MBX_0; |
468 | mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; | 468 | mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; |
469 | mcp->tov = 30; | 469 | mcp->tov = MBX_TOV_SECONDS; |
470 | mcp->flags = 0; | 470 | mcp->flags = 0; |
471 | rval = qla2x00_mailbox_command(ha, mcp); | 471 | rval = qla2x00_mailbox_command(ha, mcp); |
472 | 472 | ||
@@ -524,7 +524,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts) | |||
524 | mcp->mb[12] = 0; /* Undocumented, but used */ | 524 | mcp->mb[12] = 0; /* Undocumented, but used */ |
525 | mcp->out_mb |= MBX_12|MBX_11|MBX_10; | 525 | mcp->out_mb |= MBX_12|MBX_11|MBX_10; |
526 | } | 526 | } |
527 | mcp->tov = 30; | 527 | mcp->tov = MBX_TOV_SECONDS; |
528 | mcp->flags = 0; | 528 | mcp->flags = 0; |
529 | rval = qla2x00_mailbox_command(ha, mcp); | 529 | rval = qla2x00_mailbox_command(ha, mcp); |
530 | 530 | ||
@@ -576,7 +576,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha) | |||
576 | mcp->mb[7] = 0x2525; | 576 | mcp->mb[7] = 0x2525; |
577 | mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 577 | mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
578 | mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 578 | mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
579 | mcp->tov = 30; | 579 | mcp->tov = MBX_TOV_SECONDS; |
580 | mcp->flags = 0; | 580 | mcp->flags = 0; |
581 | rval = qla2x00_mailbox_command(ha, mcp); | 581 | rval = qla2x00_mailbox_command(ha, mcp); |
582 | 582 | ||
@@ -587,6 +587,14 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha) | |||
587 | if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || | 587 | if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || |
588 | mcp->mb[7] != 0x2525) | 588 | mcp->mb[7] != 0x2525) |
589 | rval = QLA_FUNCTION_FAILED; | 589 | rval = QLA_FUNCTION_FAILED; |
590 | if (rval == QLA_FUNCTION_FAILED) { | ||
591 | struct device_reg_24xx __iomem *reg = | ||
592 | &ha->iobase->isp24; | ||
593 | |||
594 | qla2xxx_hw_event_log(ha, HW_EVENT_ISP_ERR, 0, | ||
595 | LSW(RD_REG_DWORD(®->hccr)), | ||
596 | LSW(RD_REG_DWORD(®->istatus))); | ||
597 | } | ||
590 | } | 598 | } |
591 | 599 | ||
592 | if (rval != QLA_SUCCESS) { | 600 | if (rval != QLA_SUCCESS) { |
@@ -640,7 +648,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr) | |||
640 | mcp->in_mb |= MBX_1; | 648 | mcp->in_mb |= MBX_1; |
641 | } | 649 | } |
642 | 650 | ||
643 | mcp->tov = 30; | 651 | mcp->tov = MBX_TOV_SECONDS; |
644 | mcp->flags = 0; | 652 | mcp->flags = 0; |
645 | rval = qla2x00_mailbox_command(ha, mcp); | 653 | rval = qla2x00_mailbox_command(ha, mcp); |
646 | 654 | ||
@@ -674,8 +682,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr) | |||
674 | * Kernel context. | 682 | * Kernel context. |
675 | */ | 683 | */ |
676 | int | 684 | int |
677 | qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr, | 685 | qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer, |
678 | size_t size) | 686 | dma_addr_t phys_addr, size_t size, uint32_t tov) |
679 | { | 687 | { |
680 | int rval; | 688 | int rval; |
681 | mbx_cmd_t mc; | 689 | mbx_cmd_t mc; |
@@ -689,7 +697,7 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr, | |||
689 | mcp->mb[7] = LSW(MSD(phys_addr)); | 697 | mcp->mb[7] = LSW(MSD(phys_addr)); |
690 | mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 698 | mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
691 | mcp->in_mb = MBX_2|MBX_0; | 699 | mcp->in_mb = MBX_2|MBX_0; |
692 | mcp->tov = 30; | 700 | mcp->tov = tov; |
693 | mcp->flags = 0; | 701 | mcp->flags = 0; |
694 | rval = qla2x00_mailbox_command(ha, mcp); | 702 | rval = qla2x00_mailbox_command(ha, mcp); |
695 | 703 | ||
@@ -710,6 +718,14 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr, | |||
710 | return rval; | 718 | return rval; |
711 | } | 719 | } |
712 | 720 | ||
721 | int | ||
722 | qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr, | ||
723 | size_t size) | ||
724 | { | ||
725 | return qla2x00_issue_iocb_timeout(ha, buffer, phys_addr, size, | ||
726 | MBX_TOV_SECONDS); | ||
727 | } | ||
728 | |||
713 | /* | 729 | /* |
714 | * qla2x00_abort_command | 730 | * qla2x00_abort_command |
715 | * Abort command aborts a specified IOCB. | 731 | * Abort command aborts a specified IOCB. |
@@ -760,7 +776,7 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp) | |||
760 | mcp->mb[6] = (uint16_t)sp->cmd->device->lun; | 776 | mcp->mb[6] = (uint16_t)sp->cmd->device->lun; |
761 | mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 777 | mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
762 | mcp->in_mb = MBX_0; | 778 | mcp->in_mb = MBX_0; |
763 | mcp->tov = 30; | 779 | mcp->tov = MBX_TOV_SECONDS; |
764 | mcp->flags = 0; | 780 | mcp->flags = 0; |
765 | rval = qla2x00_mailbox_command(ha, mcp); | 781 | rval = qla2x00_mailbox_command(ha, mcp); |
766 | 782 | ||
@@ -776,36 +792,20 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp) | |||
776 | return rval; | 792 | return rval; |
777 | } | 793 | } |
778 | 794 | ||
779 | #if USE_ABORT_TGT | ||
780 | /* | ||
781 | * qla2x00_abort_target | ||
782 | * Issue abort target mailbox command. | ||
783 | * | ||
784 | * Input: | ||
785 | * ha = adapter block pointer. | ||
786 | * | ||
787 | * Returns: | ||
788 | * qla2x00 local function return status code. | ||
789 | * | ||
790 | * Context: | ||
791 | * Kernel context. | ||
792 | */ | ||
793 | int | 795 | int |
794 | qla2x00_abort_target(fc_port_t *fcport) | 796 | qla2x00_abort_target(struct fc_port *fcport, unsigned int l) |
795 | { | 797 | { |
796 | int rval; | 798 | int rval, rval2; |
797 | mbx_cmd_t mc; | 799 | mbx_cmd_t mc; |
798 | mbx_cmd_t *mcp = &mc; | 800 | mbx_cmd_t *mcp = &mc; |
799 | scsi_qla_host_t *ha; | 801 | scsi_qla_host_t *ha; |
800 | 802 | ||
801 | if (fcport == NULL) | ||
802 | return 0; | ||
803 | |||
804 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); | 803 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); |
805 | 804 | ||
805 | l = l; | ||
806 | ha = fcport->ha; | 806 | ha = fcport->ha; |
807 | mcp->mb[0] = MBC_ABORT_TARGET; | 807 | mcp->mb[0] = MBC_ABORT_TARGET; |
808 | mcp->out_mb = MBX_2|MBX_1|MBX_0; | 808 | mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; |
809 | if (HAS_EXTENDED_IDS(ha)) { | 809 | if (HAS_EXTENDED_IDS(ha)) { |
810 | mcp->mb[1] = fcport->loop_id; | 810 | mcp->mb[1] = fcport->loop_id; |
811 | mcp->mb[10] = 0; | 811 | mcp->mb[10] = 0; |
@@ -814,27 +814,70 @@ qla2x00_abort_target(fc_port_t *fcport) | |||
814 | mcp->mb[1] = fcport->loop_id << 8; | 814 | mcp->mb[1] = fcport->loop_id << 8; |
815 | } | 815 | } |
816 | mcp->mb[2] = ha->loop_reset_delay; | 816 | mcp->mb[2] = ha->loop_reset_delay; |
817 | mcp->mb[9] = ha->vp_idx; | ||
817 | 818 | ||
818 | mcp->in_mb = MBX_0; | 819 | mcp->in_mb = MBX_0; |
819 | mcp->tov = 30; | 820 | mcp->tov = MBX_TOV_SECONDS; |
820 | mcp->flags = 0; | 821 | mcp->flags = 0; |
821 | rval = qla2x00_mailbox_command(ha, mcp); | 822 | rval = qla2x00_mailbox_command(ha, mcp); |
823 | if (rval != QLA_SUCCESS) { | ||
824 | DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, | ||
825 | ha->host_no, rval)); | ||
826 | } | ||
822 | 827 | ||
823 | /* Issue marker command. */ | 828 | /* Issue marker IOCB. */ |
824 | ha->marker_needed = 1; | 829 | rval2 = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID); |
830 | if (rval2 != QLA_SUCCESS) { | ||
831 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " | ||
832 | "(%x).\n", __func__, ha->host_no, rval2)); | ||
833 | } else { | ||
834 | DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); | ||
835 | } | ||
836 | |||
837 | return rval; | ||
838 | } | ||
839 | |||
840 | int | ||
841 | qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) | ||
842 | { | ||
843 | int rval, rval2; | ||
844 | mbx_cmd_t mc; | ||
845 | mbx_cmd_t *mcp = &mc; | ||
846 | scsi_qla_host_t *ha; | ||
847 | |||
848 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); | ||
849 | |||
850 | ha = fcport->ha; | ||
851 | mcp->mb[0] = MBC_LUN_RESET; | ||
852 | mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; | ||
853 | if (HAS_EXTENDED_IDS(ha)) | ||
854 | mcp->mb[1] = fcport->loop_id; | ||
855 | else | ||
856 | mcp->mb[1] = fcport->loop_id << 8; | ||
857 | mcp->mb[2] = l; | ||
858 | mcp->mb[3] = 0; | ||
859 | mcp->mb[9] = ha->vp_idx; | ||
825 | 860 | ||
861 | mcp->in_mb = MBX_0; | ||
862 | mcp->tov = MBX_TOV_SECONDS; | ||
863 | mcp->flags = 0; | ||
864 | rval = qla2x00_mailbox_command(ha, mcp); | ||
826 | if (rval != QLA_SUCCESS) { | 865 | if (rval != QLA_SUCCESS) { |
827 | DEBUG2_3_11(printk("qla2x00_abort_target(%ld): failed=%x.\n", | 866 | DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, |
828 | ha->host_no, rval)); | 867 | ha->host_no, rval)); |
868 | } | ||
869 | |||
870 | /* Issue marker IOCB. */ | ||
871 | rval2 = qla2x00_marker(ha, fcport->loop_id, l, MK_SYNC_ID_LUN); | ||
872 | if (rval2 != QLA_SUCCESS) { | ||
873 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " | ||
874 | "(%x).\n", __func__, ha->host_no, rval2)); | ||
829 | } else { | 875 | } else { |
830 | /*EMPTY*/ | 876 | DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); |
831 | DEBUG11(printk("qla2x00_abort_target(%ld): done.\n", | ||
832 | ha->host_no)); | ||
833 | } | 877 | } |
834 | 878 | ||
835 | return rval; | 879 | return rval; |
836 | } | 880 | } |
837 | #endif | ||
838 | 881 | ||
839 | /* | 882 | /* |
840 | * qla2x00_get_adapter_id | 883 | * qla2x00_get_adapter_id |
@@ -871,7 +914,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa, | |||
871 | mcp->mb[9] = ha->vp_idx; | 914 | mcp->mb[9] = ha->vp_idx; |
872 | mcp->out_mb = MBX_9|MBX_0; | 915 | mcp->out_mb = MBX_9|MBX_0; |
873 | mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 916 | mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
874 | mcp->tov = 30; | 917 | mcp->tov = MBX_TOV_SECONDS; |
875 | mcp->flags = 0; | 918 | mcp->flags = 0; |
876 | rval = qla2x00_mailbox_command(ha, mcp); | 919 | rval = qla2x00_mailbox_command(ha, mcp); |
877 | if (mcp->mb[0] == MBS_COMMAND_ERROR) | 920 | if (mcp->mb[0] == MBS_COMMAND_ERROR) |
@@ -928,7 +971,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov, | |||
928 | mcp->mb[0] = MBC_GET_RETRY_COUNT; | 971 | mcp->mb[0] = MBC_GET_RETRY_COUNT; |
929 | mcp->out_mb = MBX_0; | 972 | mcp->out_mb = MBX_0; |
930 | mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; | 973 | mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; |
931 | mcp->tov = 30; | 974 | mcp->tov = MBX_TOV_SECONDS; |
932 | mcp->flags = 0; | 975 | mcp->flags = 0; |
933 | rval = qla2x00_mailbox_command(ha, mcp); | 976 | rval = qla2x00_mailbox_command(ha, mcp); |
934 | 977 | ||
@@ -995,7 +1038,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size) | |||
995 | mcp->in_mb = MBX_5|MBX_4|MBX_0; | 1038 | mcp->in_mb = MBX_5|MBX_4|MBX_0; |
996 | mcp->buf_size = size; | 1039 | mcp->buf_size = size; |
997 | mcp->flags = MBX_DMA_OUT; | 1040 | mcp->flags = MBX_DMA_OUT; |
998 | mcp->tov = 30; | 1041 | mcp->tov = MBX_TOV_SECONDS; |
999 | rval = qla2x00_mailbox_command(ha, mcp); | 1042 | rval = qla2x00_mailbox_command(ha, mcp); |
1000 | 1043 | ||
1001 | if (rval != QLA_SUCCESS) { | 1044 | if (rval != QLA_SUCCESS) { |
@@ -1173,7 +1216,7 @@ gpd_error_out: | |||
1173 | * Kernel context. | 1216 | * Kernel context. |
1174 | */ | 1217 | */ |
1175 | int | 1218 | int |
1176 | qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *dptr) | 1219 | qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states) |
1177 | { | 1220 | { |
1178 | int rval; | 1221 | int rval; |
1179 | mbx_cmd_t mc; | 1222 | mbx_cmd_t mc; |
@@ -1184,13 +1227,15 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *dptr) | |||
1184 | 1227 | ||
1185 | mcp->mb[0] = MBC_GET_FIRMWARE_STATE; | 1228 | mcp->mb[0] = MBC_GET_FIRMWARE_STATE; |
1186 | mcp->out_mb = MBX_0; | 1229 | mcp->out_mb = MBX_0; |
1187 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | 1230 | mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; |
1188 | mcp->tov = 30; | 1231 | mcp->tov = MBX_TOV_SECONDS; |
1189 | mcp->flags = 0; | 1232 | mcp->flags = 0; |
1190 | rval = qla2x00_mailbox_command(ha, mcp); | 1233 | rval = qla2x00_mailbox_command(ha, mcp); |
1191 | 1234 | ||
1192 | /* Return firmware state. */ | 1235 | /* Return firmware states. */ |
1193 | *dptr = mcp->mb[1]; | 1236 | states[0] = mcp->mb[1]; |
1237 | states[1] = mcp->mb[2]; | ||
1238 | states[2] = mcp->mb[3]; | ||
1194 | 1239 | ||
1195 | if (rval != QLA_SUCCESS) { | 1240 | if (rval != QLA_SUCCESS) { |
1196 | /*EMPTY*/ | 1241 | /*EMPTY*/ |
@@ -1246,7 +1291,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name, | |||
1246 | } | 1291 | } |
1247 | 1292 | ||
1248 | mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 1293 | mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
1249 | mcp->tov = 30; | 1294 | mcp->tov = MBX_TOV_SECONDS; |
1250 | mcp->flags = 0; | 1295 | mcp->flags = 0; |
1251 | rval = qla2x00_mailbox_command(ha, mcp); | 1296 | rval = qla2x00_mailbox_command(ha, mcp); |
1252 | 1297 | ||
@@ -1318,7 +1363,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha) | |||
1318 | mcp->mb[3] = 0; | 1363 | mcp->mb[3] = 0; |
1319 | } | 1364 | } |
1320 | mcp->in_mb = MBX_0; | 1365 | mcp->in_mb = MBX_0; |
1321 | mcp->tov = 30; | 1366 | mcp->tov = MBX_TOV_SECONDS; |
1322 | mcp->flags = 0; | 1367 | mcp->flags = 0; |
1323 | rval = qla2x00_mailbox_command(ha, mcp); | 1368 | rval = qla2x00_mailbox_command(ha, mcp); |
1324 | 1369 | ||
@@ -1743,7 +1788,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, | |||
1743 | } | 1788 | } |
1744 | 1789 | ||
1745 | mcp->in_mb = MBX_1|MBX_0; | 1790 | mcp->in_mb = MBX_1|MBX_0; |
1746 | mcp->tov = 30; | 1791 | mcp->tov = MBX_TOV_SECONDS; |
1747 | mcp->flags = 0; | 1792 | mcp->flags = 0; |
1748 | rval = qla2x00_mailbox_command(ha, mcp); | 1793 | rval = qla2x00_mailbox_command(ha, mcp); |
1749 | 1794 | ||
@@ -1791,7 +1836,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha) | |||
1791 | mcp->mb[3] = 0; | 1836 | mcp->mb[3] = 0; |
1792 | mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; | 1837 | mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; |
1793 | mcp->in_mb = MBX_0; | 1838 | mcp->in_mb = MBX_0; |
1794 | mcp->tov = 30; | 1839 | mcp->tov = MBX_TOV_SECONDS; |
1795 | mcp->flags = 0; | 1840 | mcp->flags = 0; |
1796 | rval = qla2x00_mailbox_command(ha, mcp); | 1841 | rval = qla2x00_mailbox_command(ha, mcp); |
1797 | 1842 | ||
@@ -1852,7 +1897,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma, | |||
1852 | mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; | 1897 | mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; |
1853 | } | 1898 | } |
1854 | mcp->in_mb = MBX_1|MBX_0; | 1899 | mcp->in_mb = MBX_1|MBX_0; |
1855 | mcp->tov = 30; | 1900 | mcp->tov = MBX_TOV_SECONDS; |
1856 | mcp->flags = 0; | 1901 | mcp->flags = 0; |
1857 | rval = qla2x00_mailbox_command(ha, mcp); | 1902 | rval = qla2x00_mailbox_command(ha, mcp); |
1858 | 1903 | ||
@@ -1896,7 +1941,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt, | |||
1896 | mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; | 1941 | mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; |
1897 | mcp->out_mb = MBX_0; | 1942 | mcp->out_mb = MBX_0; |
1898 | mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 1943 | mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
1899 | mcp->tov = 30; | 1944 | mcp->tov = MBX_TOV_SECONDS; |
1900 | mcp->flags = 0; | 1945 | mcp->flags = 0; |
1901 | rval = qla2x00_mailbox_command(ha, mcp); | 1946 | rval = qla2x00_mailbox_command(ha, mcp); |
1902 | 1947 | ||
@@ -2036,7 +2081,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, | |||
2036 | mcp->mb[1] = loop_id << 8; | 2081 | mcp->mb[1] = loop_id << 8; |
2037 | mcp->out_mb |= MBX_1; | 2082 | mcp->out_mb |= MBX_1; |
2038 | } | 2083 | } |
2039 | mcp->tov = 30; | 2084 | mcp->tov = MBX_TOV_SECONDS; |
2040 | mcp->flags = IOCTL_CMD; | 2085 | mcp->flags = IOCTL_CMD; |
2041 | rval = qla2x00_mailbox_command(ha, mcp); | 2086 | rval = qla2x00_mailbox_command(ha, mcp); |
2042 | 2087 | ||
@@ -2082,7 +2127,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats, | |||
2082 | mcp->mb[10] = 0; | 2127 | mcp->mb[10] = 0; |
2083 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; | 2128 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; |
2084 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | 2129 | mcp->in_mb = MBX_2|MBX_1|MBX_0; |
2085 | mcp->tov = 30; | 2130 | mcp->tov = MBX_TOV_SECONDS; |
2086 | mcp->flags = IOCTL_CMD; | 2131 | mcp->flags = IOCTL_CMD; |
2087 | rval = qla2x00_mailbox_command(ha, mcp); | 2132 | rval = qla2x00_mailbox_command(ha, mcp); |
2088 | 2133 | ||
@@ -2180,17 +2225,15 @@ struct tsk_mgmt_cmd { | |||
2180 | } p; | 2225 | } p; |
2181 | }; | 2226 | }; |
2182 | 2227 | ||
2183 | int | 2228 | static int |
2184 | qla24xx_abort_target(fc_port_t *fcport) | 2229 | __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, |
2230 | unsigned int l) | ||
2185 | { | 2231 | { |
2186 | int rval; | 2232 | int rval, rval2; |
2187 | struct tsk_mgmt_cmd *tsk; | 2233 | struct tsk_mgmt_cmd *tsk; |
2188 | dma_addr_t tsk_dma; | 2234 | dma_addr_t tsk_dma; |
2189 | scsi_qla_host_t *ha, *pha; | 2235 | scsi_qla_host_t *ha, *pha; |
2190 | 2236 | ||
2191 | if (fcport == NULL) | ||
2192 | return 0; | ||
2193 | |||
2194 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); | 2237 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); |
2195 | 2238 | ||
2196 | ha = fcport->ha; | 2239 | ha = fcport->ha; |
@@ -2207,47 +2250,61 @@ qla24xx_abort_target(fc_port_t *fcport) | |||
2207 | tsk->p.tsk.entry_count = 1; | 2250 | tsk->p.tsk.entry_count = 1; |
2208 | tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); | 2251 | tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); |
2209 | tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); | 2252 | tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
2210 | tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); | 2253 | tsk->p.tsk.control_flags = cpu_to_le32(type); |
2211 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; | 2254 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; |
2212 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; | 2255 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; |
2213 | tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; | 2256 | tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; |
2214 | tsk->p.tsk.vp_index = fcport->vp_idx; | 2257 | tsk->p.tsk.vp_index = fcport->vp_idx; |
2258 | if (type == TCF_LUN_RESET) { | ||
2259 | int_to_scsilun(l, &tsk->p.tsk.lun); | ||
2260 | host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, | ||
2261 | sizeof(tsk->p.tsk.lun)); | ||
2262 | } | ||
2215 | 2263 | ||
2216 | rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0); | 2264 | rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0); |
2217 | if (rval != QLA_SUCCESS) { | 2265 | if (rval != QLA_SUCCESS) { |
2218 | DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB " | 2266 | DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " |
2219 | "(%x).\n", __func__, ha->host_no, rval)); | 2267 | "(%x).\n", __func__, ha->host_no, name, rval)); |
2220 | goto atarget_done; | ||
2221 | } else if (tsk->p.sts.entry_status != 0) { | 2268 | } else if (tsk->p.sts.entry_status != 0) { |
2222 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " | 2269 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " |
2223 | "-- error status (%x).\n", __func__, ha->host_no, | 2270 | "-- error status (%x).\n", __func__, ha->host_no, |
2224 | tsk->p.sts.entry_status)); | 2271 | tsk->p.sts.entry_status)); |
2225 | rval = QLA_FUNCTION_FAILED; | 2272 | rval = QLA_FUNCTION_FAILED; |
2226 | goto atarget_done; | ||
2227 | } else if (tsk->p.sts.comp_status != | 2273 | } else if (tsk->p.sts.comp_status != |
2228 | __constant_cpu_to_le16(CS_COMPLETE)) { | 2274 | __constant_cpu_to_le16(CS_COMPLETE)) { |
2229 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " | 2275 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " |
2230 | "-- completion status (%x).\n", __func__, | 2276 | "-- completion status (%x).\n", __func__, |
2231 | ha->host_no, le16_to_cpu(tsk->p.sts.comp_status))); | 2277 | ha->host_no, le16_to_cpu(tsk->p.sts.comp_status))); |
2232 | rval = QLA_FUNCTION_FAILED; | 2278 | rval = QLA_FUNCTION_FAILED; |
2233 | goto atarget_done; | ||
2234 | } | 2279 | } |
2235 | 2280 | ||
2236 | /* Issue marker IOCB. */ | 2281 | /* Issue marker IOCB. */ |
2237 | rval = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID); | 2282 | rval2 = qla2x00_marker(ha, fcport->loop_id, l, |
2238 | if (rval != QLA_SUCCESS) { | 2283 | type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); |
2284 | if (rval2 != QLA_SUCCESS) { | ||
2239 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " | 2285 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " |
2240 | "(%x).\n", __func__, ha->host_no, rval)); | 2286 | "(%x).\n", __func__, ha->host_no, rval2)); |
2241 | } else { | 2287 | } else { |
2242 | DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); | 2288 | DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); |
2243 | } | 2289 | } |
2244 | 2290 | ||
2245 | atarget_done: | ||
2246 | dma_pool_free(pha->s_dma_pool, tsk, tsk_dma); | 2291 | dma_pool_free(pha->s_dma_pool, tsk, tsk_dma); |
2247 | 2292 | ||
2248 | return rval; | 2293 | return rval; |
2249 | } | 2294 | } |
2250 | 2295 | ||
2296 | int | ||
2297 | qla24xx_abort_target(struct fc_port *fcport, unsigned int l) | ||
2298 | { | ||
2299 | return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); | ||
2300 | } | ||
2301 | |||
2302 | int | ||
2303 | qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) | ||
2304 | { | ||
2305 | return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); | ||
2306 | } | ||
2307 | |||
2251 | #if 0 | 2308 | #if 0 |
2252 | 2309 | ||
2253 | int | 2310 | int |
@@ -2304,7 +2361,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g, | |||
2304 | mcp->mb[4] = sw_em_4g | BIT_15; | 2361 | mcp->mb[4] = sw_em_4g | BIT_15; |
2305 | mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 2362 | mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
2306 | mcp->in_mb = MBX_0; | 2363 | mcp->in_mb = MBX_0; |
2307 | mcp->tov = 30; | 2364 | mcp->tov = MBX_TOV_SECONDS; |
2308 | mcp->flags = 0; | 2365 | mcp->flags = 0; |
2309 | rval = qla2x00_mailbox_command(ha, mcp); | 2366 | rval = qla2x00_mailbox_command(ha, mcp); |
2310 | 2367 | ||
@@ -2372,7 +2429,7 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma, | |||
2372 | mcp->mb[7] = TC_AEN_DISABLE; | 2429 | mcp->mb[7] = TC_AEN_DISABLE; |
2373 | mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 2430 | mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
2374 | mcp->in_mb = MBX_1|MBX_0; | 2431 | mcp->in_mb = MBX_1|MBX_0; |
2375 | mcp->tov = 30; | 2432 | mcp->tov = MBX_TOV_SECONDS; |
2376 | mcp->flags = 0; | 2433 | mcp->flags = 0; |
2377 | rval = qla2x00_mailbox_command(ha, mcp); | 2434 | rval = qla2x00_mailbox_command(ha, mcp); |
2378 | if (rval != QLA_SUCCESS) { | 2435 | if (rval != QLA_SUCCESS) { |
@@ -2401,7 +2458,7 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *ha) | |||
2401 | mcp->mb[1] = TC_EFT_DISABLE; | 2458 | mcp->mb[1] = TC_EFT_DISABLE; |
2402 | mcp->out_mb = MBX_1|MBX_0; | 2459 | mcp->out_mb = MBX_1|MBX_0; |
2403 | mcp->in_mb = MBX_1|MBX_0; | 2460 | mcp->in_mb = MBX_1|MBX_0; |
2404 | mcp->tov = 30; | 2461 | mcp->tov = MBX_TOV_SECONDS; |
2405 | mcp->flags = 0; | 2462 | mcp->flags = 0; |
2406 | rval = qla2x00_mailbox_command(ha, mcp); | 2463 | rval = qla2x00_mailbox_command(ha, mcp); |
2407 | if (rval != QLA_SUCCESS) { | 2464 | if (rval != QLA_SUCCESS) { |
@@ -2441,7 +2498,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma, | |||
2441 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| | 2498 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| |
2442 | MBX_1|MBX_0; | 2499 | MBX_1|MBX_0; |
2443 | mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 2500 | mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
2444 | mcp->tov = 30; | 2501 | mcp->tov = MBX_TOV_SECONDS; |
2445 | mcp->flags = 0; | 2502 | mcp->flags = 0; |
2446 | rval = qla2x00_mailbox_command(ha, mcp); | 2503 | rval = qla2x00_mailbox_command(ha, mcp); |
2447 | if (rval != QLA_SUCCESS) { | 2504 | if (rval != QLA_SUCCESS) { |
@@ -2477,7 +2534,7 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd) | |||
2477 | mcp->out_mb = MBX_2|MBX_1|MBX_0; | 2534 | mcp->out_mb = MBX_2|MBX_1|MBX_0; |
2478 | mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| | 2535 | mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| |
2479 | MBX_1|MBX_0; | 2536 | MBX_1|MBX_0; |
2480 | mcp->tov = 30; | 2537 | mcp->tov = MBX_TOV_SECONDS; |
2481 | mcp->flags = 0; | 2538 | mcp->flags = 0; |
2482 | rval = qla2x00_mailbox_command(ha, mcp); | 2539 | rval = qla2x00_mailbox_command(ha, mcp); |
2483 | if (rval != QLA_SUCCESS) { | 2540 | if (rval != QLA_SUCCESS) { |
@@ -2525,7 +2582,7 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr, | |||
2525 | mcp->mb[10] = 0; | 2582 | mcp->mb[10] = 0; |
2526 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | 2583 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; |
2527 | mcp->in_mb = MBX_0; | 2584 | mcp->in_mb = MBX_0; |
2528 | mcp->tov = 30; | 2585 | mcp->tov = MBX_TOV_SECONDS; |
2529 | mcp->flags = 0; | 2586 | mcp->flags = 0; |
2530 | rval = qla2x00_mailbox_command(ha, mcp); | 2587 | rval = qla2x00_mailbox_command(ha, mcp); |
2531 | 2588 | ||
@@ -2559,7 +2616,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id, | |||
2559 | mcp->mb[4] = mcp->mb[5] = 0; | 2616 | mcp->mb[4] = mcp->mb[5] = 0; |
2560 | mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 2617 | mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; |
2561 | mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; | 2618 | mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; |
2562 | mcp->tov = 30; | 2619 | mcp->tov = MBX_TOV_SECONDS; |
2563 | mcp->flags = 0; | 2620 | mcp->flags = 0; |
2564 | rval = qla2x00_mailbox_command(ha, mcp); | 2621 | rval = qla2x00_mailbox_command(ha, mcp); |
2565 | 2622 | ||
@@ -2877,7 +2934,7 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr, | |||
2877 | } | 2934 | } |
2878 | 2935 | ||
2879 | mcp->in_mb = MBX_0; | 2936 | mcp->in_mb = MBX_0; |
2880 | mcp->tov = 30; | 2937 | mcp->tov = MBX_TOV_SECONDS; |
2881 | mcp->flags = 0; | 2938 | mcp->flags = 0; |
2882 | rval = qla2x00_mailbox_command(ha, mcp); | 2939 | rval = qla2x00_mailbox_command(ha, mcp); |
2883 | 2940 | ||
@@ -2890,3 +2947,104 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr, | |||
2890 | 2947 | ||
2891 | return rval; | 2948 | return rval; |
2892 | } | 2949 | } |
2950 | |||
2951 | /* 84XX Support **************************************************************/ | ||
2952 | |||
2953 | struct cs84xx_mgmt_cmd { | ||
2954 | union { | ||
2955 | struct verify_chip_entry_84xx req; | ||
2956 | struct verify_chip_rsp_84xx rsp; | ||
2957 | } p; | ||
2958 | }; | ||
2959 | |||
2960 | int | ||
2961 | qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status) | ||
2962 | { | ||
2963 | int rval, retry; | ||
2964 | struct cs84xx_mgmt_cmd *mn; | ||
2965 | dma_addr_t mn_dma; | ||
2966 | uint16_t options; | ||
2967 | unsigned long flags; | ||
2968 | |||
2969 | DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no)); | ||
2970 | |||
2971 | mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); | ||
2972 | if (mn == NULL) { | ||
2973 | DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX " | ||
2974 | "IOCB.\n", __func__, ha->host_no)); | ||
2975 | return QLA_MEMORY_ALLOC_FAILED; | ||
2976 | } | ||
2977 | |||
2978 | /* Force Update? */ | ||
2979 | options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; | ||
2980 | /* Diagnostic firmware? */ | ||
2981 | /* options |= MENLO_DIAG_FW; */ | ||
2982 | /* We update the firmware with only one data sequence. */ | ||
2983 | options |= VCO_END_OF_DATA; | ||
2984 | |||
2985 | retry = 0; | ||
2986 | do { | ||
2987 | memset(mn, 0, sizeof(*mn)); | ||
2988 | mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; | ||
2989 | mn->p.req.entry_count = 1; | ||
2990 | mn->p.req.options = cpu_to_le16(options); | ||
2991 | |||
2992 | DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, | ||
2993 | ha->host_no)); | ||
2994 | DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, | ||
2995 | sizeof(*mn))); | ||
2996 | |||
2997 | rval = qla2x00_issue_iocb_timeout(ha, mn, mn_dma, 0, 120); | ||
2998 | if (rval != QLA_SUCCESS) { | ||
2999 | DEBUG2_16(printk("%s(%ld): failed to issue Verify " | ||
3000 | "IOCB (%x).\n", __func__, ha->host_no, rval)); | ||
3001 | goto verify_done; | ||
3002 | } | ||
3003 | |||
3004 | DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, | ||
3005 | ha->host_no)); | ||
3006 | DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, | ||
3007 | sizeof(*mn))); | ||
3008 | |||
3009 | status[0] = le16_to_cpu(mn->p.rsp.comp_status); | ||
3010 | status[1] = status[0] == CS_VCS_CHIP_FAILURE ? | ||
3011 | le16_to_cpu(mn->p.rsp.failure_code) : 0; | ||
3012 | DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, | ||
3013 | ha->host_no, status[0], status[1])); | ||
3014 | |||
3015 | if (status[0] != CS_COMPLETE) { | ||
3016 | rval = QLA_FUNCTION_FAILED; | ||
3017 | if (!(options & VCO_DONT_UPDATE_FW)) { | ||
3018 | DEBUG2_16(printk("%s(%ld): Firmware update " | ||
3019 | "failed. Retrying without update " | ||
3020 | "firmware.\n", __func__, ha->host_no)); | ||
3021 | options |= VCO_DONT_UPDATE_FW; | ||
3022 | options &= ~VCO_FORCE_UPDATE; | ||
3023 | retry = 1; | ||
3024 | } | ||
3025 | } else { | ||
3026 | DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", | ||
3027 | __func__, ha->host_no, | ||
3028 | le32_to_cpu(mn->p.rsp.fw_ver))); | ||
3029 | |||
3030 | /* NOTE: we only update OP firmware. */ | ||
3031 | spin_lock_irqsave(&ha->cs84xx->access_lock, flags); | ||
3032 | ha->cs84xx->op_fw_version = | ||
3033 | le32_to_cpu(mn->p.rsp.fw_ver); | ||
3034 | spin_unlock_irqrestore(&ha->cs84xx->access_lock, | ||
3035 | flags); | ||
3036 | } | ||
3037 | } while (retry); | ||
3038 | |||
3039 | verify_done: | ||
3040 | dma_pool_free(ha->s_dma_pool, mn, mn_dma); | ||
3041 | |||
3042 | if (rval != QLA_SUCCESS) { | ||
3043 | DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, | ||
3044 | ha->host_no, rval)); | ||
3045 | } else { | ||
3046 | DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); | ||
3047 | } | ||
3048 | |||
3049 | return rval; | ||
3050 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index cf784cdafb01..f2b04979e5f0 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -1,20 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * QLOGIC LINUX SOFTWARE | 2 | * QLogic Fibre Channel HBA Driver |
3 | * | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * QLogic ISP2x00 device driver for Linux 2.6.x | ||
5 | * Copyright (C) 2003-2005 QLogic Corporation | ||
6 | * (www.qlogic.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2, or (at your option) any | ||
11 | * later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | ||
18 | */ | 6 | */ |
19 | #include "qla_def.h" | 7 | #include "qla_def.h" |
20 | 8 | ||
@@ -28,8 +16,6 @@ | |||
28 | #include <scsi/scsicam.h> | 16 | #include <scsi/scsicam.h> |
29 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
30 | 18 | ||
31 | void qla2x00_vp_stop_timer(scsi_qla_host_t *); | ||
32 | |||
33 | void | 19 | void |
34 | qla2x00_vp_stop_timer(scsi_qla_host_t *vha) | 20 | qla2x00_vp_stop_timer(scsi_qla_host_t *vha) |
35 | { | 21 | { |
@@ -268,9 +254,17 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) | |||
268 | static int | 254 | static int |
269 | qla2x00_do_dpc_vp(scsi_qla_host_t *vha) | 255 | qla2x00_do_dpc_vp(scsi_qla_host_t *vha) |
270 | { | 256 | { |
257 | scsi_qla_host_t *ha = vha->parent; | ||
258 | |||
271 | if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { | 259 | if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { |
272 | /* VP acquired. complete port configuration */ | 260 | /* VP acquired. complete port configuration */ |
273 | qla24xx_configure_vp(vha); | 261 | if (atomic_read(&ha->loop_state) == LOOP_READY) { |
262 | qla24xx_configure_vp(vha); | ||
263 | } else { | ||
264 | set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); | ||
265 | set_bit(VP_DPC_NEEDED, &ha->dpc_flags); | ||
266 | } | ||
267 | |||
274 | return 0; | 268 | return 0; |
275 | } | 269 | } |
276 | 270 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3c1b43356adb..8b33b163b1d4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -26,9 +26,6 @@ char qla2x00_version_str[40]; | |||
26 | */ | 26 | */ |
27 | static struct kmem_cache *srb_cachep; | 27 | static struct kmem_cache *srb_cachep; |
28 | 28 | ||
29 | /* | ||
30 | * Ioctl related information. | ||
31 | */ | ||
32 | int num_hosts; | 29 | int num_hosts; |
33 | int ql2xlogintimeout = 20; | 30 | int ql2xlogintimeout = 20; |
34 | module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); | 31 | module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); |
@@ -103,9 +100,9 @@ static int qla24xx_queuecommand(struct scsi_cmnd *cmd, | |||
103 | void (*fn)(struct scsi_cmnd *)); | 100 | void (*fn)(struct scsi_cmnd *)); |
104 | static int qla2xxx_eh_abort(struct scsi_cmnd *); | 101 | static int qla2xxx_eh_abort(struct scsi_cmnd *); |
105 | static int qla2xxx_eh_device_reset(struct scsi_cmnd *); | 102 | static int qla2xxx_eh_device_reset(struct scsi_cmnd *); |
103 | static int qla2xxx_eh_target_reset(struct scsi_cmnd *); | ||
106 | static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); | 104 | static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); |
107 | static int qla2xxx_eh_host_reset(struct scsi_cmnd *); | 105 | static int qla2xxx_eh_host_reset(struct scsi_cmnd *); |
108 | static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *); | ||
109 | 106 | ||
110 | static int qla2x00_change_queue_depth(struct scsi_device *, int); | 107 | static int qla2x00_change_queue_depth(struct scsi_device *, int); |
111 | static int qla2x00_change_queue_type(struct scsi_device *, int); | 108 | static int qla2x00_change_queue_type(struct scsi_device *, int); |
@@ -117,6 +114,7 @@ static struct scsi_host_template qla2x00_driver_template = { | |||
117 | 114 | ||
118 | .eh_abort_handler = qla2xxx_eh_abort, | 115 | .eh_abort_handler = qla2xxx_eh_abort, |
119 | .eh_device_reset_handler = qla2xxx_eh_device_reset, | 116 | .eh_device_reset_handler = qla2xxx_eh_device_reset, |
117 | .eh_target_reset_handler = qla2xxx_eh_target_reset, | ||
120 | .eh_bus_reset_handler = qla2xxx_eh_bus_reset, | 118 | .eh_bus_reset_handler = qla2xxx_eh_bus_reset, |
121 | .eh_host_reset_handler = qla2xxx_eh_host_reset, | 119 | .eh_host_reset_handler = qla2xxx_eh_host_reset, |
122 | 120 | ||
@@ -148,6 +146,7 @@ struct scsi_host_template qla24xx_driver_template = { | |||
148 | 146 | ||
149 | .eh_abort_handler = qla2xxx_eh_abort, | 147 | .eh_abort_handler = qla2xxx_eh_abort, |
150 | .eh_device_reset_handler = qla2xxx_eh_device_reset, | 148 | .eh_device_reset_handler = qla2xxx_eh_device_reset, |
149 | .eh_target_reset_handler = qla2xxx_eh_target_reset, | ||
151 | .eh_bus_reset_handler = qla2xxx_eh_bus_reset, | 150 | .eh_bus_reset_handler = qla2xxx_eh_bus_reset, |
152 | .eh_host_reset_handler = qla2xxx_eh_host_reset, | 151 | .eh_host_reset_handler = qla2xxx_eh_host_reset, |
153 | 152 | ||
@@ -253,9 +252,9 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) | |||
253 | 252 | ||
254 | strcpy(str, "PCIe ("); | 253 | strcpy(str, "PCIe ("); |
255 | if (lspeed == 1) | 254 | if (lspeed == 1) |
256 | strcat(str, "2.5Gb/s "); | 255 | strcat(str, "2.5GT/s "); |
257 | else if (lspeed == 2) | 256 | else if (lspeed == 2) |
258 | strcat(str, "5.0Gb/s "); | 257 | strcat(str, "5.0GT/s "); |
259 | else | 258 | else |
260 | strcat(str, "<unknown> "); | 259 | strcat(str, "<unknown> "); |
261 | snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); | 260 | snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); |
@@ -340,6 +339,8 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) | |||
340 | strcat(str, "[T10 CRC] "); | 339 | strcat(str, "[T10 CRC] "); |
341 | if (ha->fw_attributes & BIT_5) | 340 | if (ha->fw_attributes & BIT_5) |
342 | strcat(str, "[VI] "); | 341 | strcat(str, "[VI] "); |
342 | if (ha->fw_attributes & BIT_10) | ||
343 | strcat(str, "[84XX] "); | ||
343 | if (ha->fw_attributes & BIT_13) | 344 | if (ha->fw_attributes & BIT_13) |
344 | strcat(str, "[Experimental]"); | 345 | strcat(str, "[Experimental]"); |
345 | return str; | 346 | return str; |
@@ -570,8 +571,6 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) | |||
570 | else | 571 | else |
571 | return_status = QLA_FUNCTION_FAILED; | 572 | return_status = QLA_FUNCTION_FAILED; |
572 | 573 | ||
573 | DEBUG2(printk("%s return_status=%d\n",__func__,return_status)); | ||
574 | |||
575 | return (return_status); | 574 | return (return_status); |
576 | } | 575 | } |
577 | 576 | ||
@@ -685,7 +684,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
685 | 684 | ||
686 | DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", | 685 | DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", |
687 | __func__, ha->host_no, sp, serial)); | 686 | __func__, ha->host_no, sp, serial)); |
688 | DEBUG3(qla2x00_print_scsi_cmd(cmd)); | ||
689 | 687 | ||
690 | spin_unlock_irqrestore(&pha->hardware_lock, flags); | 688 | spin_unlock_irqrestore(&pha->hardware_lock, flags); |
691 | if (ha->isp_ops->abort_command(ha, sp)) { | 689 | if (ha->isp_ops->abort_command(ha, sp)) { |
@@ -719,190 +717,122 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
719 | return ret; | 717 | return ret; |
720 | } | 718 | } |
721 | 719 | ||
722 | /************************************************************************** | 720 | enum nexus_wait_type { |
723 | * qla2x00_eh_wait_for_pending_target_commands | 721 | WAIT_HOST = 0, |
724 | * | 722 | WAIT_TARGET, |
725 | * Description: | 723 | WAIT_LUN, |
726 | * Waits for all the commands to come back from the specified target. | 724 | }; |
727 | * | 725 | |
728 | * Input: | ||
729 | * ha - pointer to scsi_qla_host structure. | ||
730 | * t - target | ||
731 | * Returns: | ||
732 | * Either SUCCESS or FAILED. | ||
733 | * | ||
734 | * Note: | ||
735 | **************************************************************************/ | ||
736 | static int | 726 | static int |
737 | qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t) | 727 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, |
728 | unsigned int l, enum nexus_wait_type type) | ||
738 | { | 729 | { |
739 | int cnt; | 730 | int cnt, match, status; |
740 | int status; | 731 | srb_t *sp; |
741 | srb_t *sp; | ||
742 | struct scsi_cmnd *cmd; | ||
743 | unsigned long flags; | 732 | unsigned long flags; |
744 | scsi_qla_host_t *pha = to_qla_parent(ha); | 733 | scsi_qla_host_t *pha = to_qla_parent(ha); |
745 | 734 | ||
746 | status = 0; | 735 | status = QLA_SUCCESS; |
747 | 736 | spin_lock_irqsave(&pha->hardware_lock, flags); | |
748 | /* | 737 | for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; |
749 | * Waiting for all commands for the designated target in the active | 738 | cnt++) { |
750 | * array | ||
751 | */ | ||
752 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | ||
753 | spin_lock_irqsave(&pha->hardware_lock, flags); | ||
754 | sp = pha->outstanding_cmds[cnt]; | 739 | sp = pha->outstanding_cmds[cnt]; |
755 | if (sp) { | 740 | if (!sp) |
756 | cmd = sp->cmd; | 741 | continue; |
757 | spin_unlock_irqrestore(&pha->hardware_lock, flags); | 742 | if (ha->vp_idx != sp->ha->vp_idx) |
758 | if (cmd->device->id == t && | 743 | continue; |
759 | ha->vp_idx == sp->ha->vp_idx) { | 744 | match = 0; |
760 | if (!qla2x00_eh_wait_on_command(ha, cmd)) { | 745 | switch (type) { |
761 | status = 1; | 746 | case WAIT_HOST: |
762 | break; | 747 | match = 1; |
763 | } | 748 | break; |
764 | } | 749 | case WAIT_TARGET: |
765 | } else { | 750 | match = sp->cmd->device->id == t; |
766 | spin_unlock_irqrestore(&pha->hardware_lock, flags); | 751 | break; |
752 | case WAIT_LUN: | ||
753 | match = (sp->cmd->device->id == t && | ||
754 | sp->cmd->device->lun == l); | ||
755 | break; | ||
767 | } | 756 | } |
757 | if (!match) | ||
758 | continue; | ||
759 | |||
760 | spin_unlock_irqrestore(&pha->hardware_lock, flags); | ||
761 | status = qla2x00_eh_wait_on_command(ha, sp->cmd); | ||
762 | spin_lock_irqsave(&pha->hardware_lock, flags); | ||
768 | } | 763 | } |
769 | return (status); | 764 | spin_unlock_irqrestore(&pha->hardware_lock, flags); |
765 | |||
766 | return status; | ||
770 | } | 767 | } |
771 | 768 | ||
769 | static char *reset_errors[] = { | ||
770 | "HBA not online", | ||
771 | "HBA not ready", | ||
772 | "Task management failed", | ||
773 | "Waiting for command completions", | ||
774 | }; | ||
772 | 775 | ||
773 | /************************************************************************** | ||
774 | * qla2xxx_eh_device_reset | ||
775 | * | ||
776 | * Description: | ||
777 | * The device reset function will reset the target and abort any | ||
778 | * executing commands. | ||
779 | * | ||
780 | * NOTE: The use of SP is undefined within this context. Do *NOT* | ||
781 | * attempt to use this value, even if you determine it is | ||
782 | * non-null. | ||
783 | * | ||
784 | * Input: | ||
785 | * cmd = Linux SCSI command packet of the command that cause the | ||
786 | * bus device reset. | ||
787 | * | ||
788 | * Returns: | ||
789 | * SUCCESS/FAILURE (defined as macro in scsi.h). | ||
790 | * | ||
791 | **************************************************************************/ | ||
792 | static int | 776 | static int |
793 | qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) | 777 | __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, |
778 | struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) | ||
794 | { | 779 | { |
795 | scsi_qla_host_t *ha = shost_priv(cmd->device->host); | 780 | scsi_qla_host_t *ha = shost_priv(cmd->device->host); |
796 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 781 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
797 | int ret = FAILED; | 782 | int err; |
798 | unsigned int id, lun; | ||
799 | unsigned long serial; | ||
800 | 783 | ||
801 | qla2x00_block_error_handler(cmd); | 784 | qla2x00_block_error_handler(cmd); |
802 | 785 | ||
803 | id = cmd->device->id; | ||
804 | lun = cmd->device->lun; | ||
805 | serial = cmd->serial_number; | ||
806 | |||
807 | if (!fcport) | 786 | if (!fcport) |
808 | return ret; | 787 | return FAILED; |
809 | 788 | ||
810 | qla_printk(KERN_INFO, ha, | 789 | qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", |
811 | "scsi(%ld:%d:%d): DEVICE RESET ISSUED.\n", ha->host_no, id, lun); | 790 | ha->host_no, cmd->device->id, cmd->device->lun, name); |
812 | 791 | ||
792 | err = 0; | ||
813 | if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) | 793 | if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) |
814 | goto eh_dev_reset_done; | 794 | goto eh_reset_failed; |
815 | 795 | err = 1; | |
816 | if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { | 796 | if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS) |
817 | if (qla2x00_device_reset(ha, fcport) == 0) | 797 | goto eh_reset_failed; |
818 | ret = SUCCESS; | 798 | err = 2; |
819 | 799 | if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) | |
820 | #if defined(LOGOUT_AFTER_DEVICE_RESET) | 800 | goto eh_reset_failed; |
821 | if (ret == SUCCESS) { | 801 | err = 3; |
822 | if (fcport->flags & FC_FABRIC_DEVICE) { | 802 | if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id, |
823 | ha->isp_ops->fabric_logout(ha, fcport->loop_id); | 803 | cmd->device->lun, type) != QLA_SUCCESS) |
824 | qla2x00_mark_device_lost(ha, fcport, 0, 0); | 804 | goto eh_reset_failed; |
825 | } | 805 | |
826 | } | 806 | qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", |
827 | #endif | 807 | ha->host_no, cmd->device->id, cmd->device->lun, name); |
828 | } else { | 808 | |
829 | DEBUG2(printk(KERN_INFO | 809 | return SUCCESS; |
830 | "%s failed: loop not ready\n",__func__)); | 810 | |
831 | } | 811 | eh_reset_failed: |
832 | 812 | qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n", | |
833 | if (ret == FAILED) { | 813 | ha->host_no, cmd->device->id, cmd->device->lun, name, |
834 | DEBUG3(printk("%s(%ld): device reset failed\n", | 814 | reset_errors[err]); |
835 | __func__, ha->host_no)); | 815 | return FAILED; |
836 | qla_printk(KERN_INFO, ha, "%s: device reset failed\n", | 816 | } |
837 | __func__); | ||
838 | 817 | ||
839 | goto eh_dev_reset_done; | 818 | static int |
840 | } | 819 | qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) |
820 | { | ||
821 | scsi_qla_host_t *ha = shost_priv(cmd->device->host); | ||
841 | 822 | ||
842 | /* Flush outstanding commands. */ | 823 | return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, |
843 | if (qla2x00_eh_wait_for_pending_target_commands(ha, id)) | 824 | ha->isp_ops->lun_reset); |
844 | ret = FAILED; | ||
845 | if (ret == FAILED) { | ||
846 | DEBUG3(printk("%s(%ld): failed while waiting for commands\n", | ||
847 | __func__, ha->host_no)); | ||
848 | qla_printk(KERN_INFO, ha, | ||
849 | "%s: failed while waiting for commands\n", __func__); | ||
850 | } else | ||
851 | qla_printk(KERN_INFO, ha, | ||
852 | "scsi(%ld:%d:%d): DEVICE RESET SUCCEEDED.\n", ha->host_no, | ||
853 | id, lun); | ||
854 | eh_dev_reset_done: | ||
855 | return ret; | ||
856 | } | 825 | } |
857 | 826 | ||
858 | /************************************************************************** | ||
859 | * qla2x00_eh_wait_for_pending_commands | ||
860 | * | ||
861 | * Description: | ||
862 | * Waits for all the commands to come back from the specified host. | ||
863 | * | ||
864 | * Input: | ||
865 | * ha - pointer to scsi_qla_host structure. | ||
866 | * | ||
867 | * Returns: | ||
868 | * 1 : SUCCESS | ||
869 | * 0 : FAILED | ||
870 | * | ||
871 | * Note: | ||
872 | **************************************************************************/ | ||
873 | static int | 827 | static int |
874 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha) | 828 | qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) |
875 | { | 829 | { |
876 | int cnt; | 830 | scsi_qla_host_t *ha = shost_priv(cmd->device->host); |
877 | int status; | ||
878 | srb_t *sp; | ||
879 | struct scsi_cmnd *cmd; | ||
880 | unsigned long flags; | ||
881 | |||
882 | status = 1; | ||
883 | 831 | ||
884 | /* | 832 | return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, |
885 | * Waiting for all commands for the designated target in the active | 833 | ha->isp_ops->target_reset); |
886 | * array | ||
887 | */ | ||
888 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | ||
889 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
890 | sp = ha->outstanding_cmds[cnt]; | ||
891 | if (sp) { | ||
892 | cmd = sp->cmd; | ||
893 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
894 | status = qla2x00_eh_wait_on_command(ha, cmd); | ||
895 | if (status == 0) | ||
896 | break; | ||
897 | } | ||
898 | else { | ||
899 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
900 | } | ||
901 | } | ||
902 | return (status); | ||
903 | } | 834 | } |
904 | 835 | ||
905 | |||
906 | /************************************************************************** | 836 | /************************************************************************** |
907 | * qla2xxx_eh_bus_reset | 837 | * qla2xxx_eh_bus_reset |
908 | * | 838 | * |
@@ -953,7 +883,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
953 | goto eh_bus_reset_done; | 883 | goto eh_bus_reset_done; |
954 | 884 | ||
955 | /* Flush outstanding commands. */ | 885 | /* Flush outstanding commands. */ |
956 | if (!qla2x00_eh_wait_for_pending_commands(pha)) | 886 | if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) != |
887 | QLA_SUCCESS) | ||
957 | ret = FAILED; | 888 | ret = FAILED; |
958 | 889 | ||
959 | eh_bus_reset_done: | 890 | eh_bus_reset_done: |
@@ -1024,7 +955,8 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
1024 | clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); | 955 | clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); |
1025 | 956 | ||
1026 | /* Waiting for our command in done_queue to be returned to OS.*/ | 957 | /* Waiting for our command in done_queue to be returned to OS.*/ |
1027 | if (qla2x00_eh_wait_for_pending_commands(pha)) | 958 | if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) == |
959 | QLA_SUCCESS) | ||
1028 | ret = SUCCESS; | 960 | ret = SUCCESS; |
1029 | 961 | ||
1030 | if (ha->parent) | 962 | if (ha->parent) |
@@ -1080,7 +1012,7 @@ qla2x00_loop_reset(scsi_qla_host_t *ha) | |||
1080 | if (fcport->port_type != FCT_TARGET) | 1012 | if (fcport->port_type != FCT_TARGET) |
1081 | continue; | 1013 | continue; |
1082 | 1014 | ||
1083 | ret = qla2x00_device_reset(ha, fcport); | 1015 | ret = ha->isp_ops->target_reset(fcport, 0); |
1084 | if (ret != QLA_SUCCESS) { | 1016 | if (ret != QLA_SUCCESS) { |
1085 | DEBUG2_3(printk("%s(%ld): bus_reset failed: " | 1017 | DEBUG2_3(printk("%s(%ld): bus_reset failed: " |
1086 | "target_reset=%d d_id=%x.\n", __func__, | 1018 | "target_reset=%d d_id=%x.\n", __func__, |
@@ -1095,26 +1027,6 @@ qla2x00_loop_reset(scsi_qla_host_t *ha) | |||
1095 | return QLA_SUCCESS; | 1027 | return QLA_SUCCESS; |
1096 | } | 1028 | } |
1097 | 1029 | ||
1098 | /* | ||
1099 | * qla2x00_device_reset | ||
1100 | * Issue bus device reset message to the target. | ||
1101 | * | ||
1102 | * Input: | ||
1103 | * ha = adapter block pointer. | ||
1104 | * t = SCSI ID. | ||
1105 | * TARGET_QUEUE_LOCK must be released. | ||
1106 | * ADAPTER_STATE_LOCK must be released. | ||
1107 | * | ||
1108 | * Context: | ||
1109 | * Kernel context. | ||
1110 | */ | ||
1111 | static int | ||
1112 | qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport) | ||
1113 | { | ||
1114 | /* Abort Target command will clear Reservation */ | ||
1115 | return ha->isp_ops->abort_target(reset_fcport); | ||
1116 | } | ||
1117 | |||
1118 | void | 1030 | void |
1119 | qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) | 1031 | qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) |
1120 | { | 1032 | { |
@@ -1292,7 +1204,8 @@ static struct isp_operations qla2100_isp_ops = { | |||
1292 | .enable_intrs = qla2x00_enable_intrs, | 1204 | .enable_intrs = qla2x00_enable_intrs, |
1293 | .disable_intrs = qla2x00_disable_intrs, | 1205 | .disable_intrs = qla2x00_disable_intrs, |
1294 | .abort_command = qla2x00_abort_command, | 1206 | .abort_command = qla2x00_abort_command, |
1295 | .abort_target = qla2x00_abort_target, | 1207 | .target_reset = qla2x00_abort_target, |
1208 | .lun_reset = qla2x00_lun_reset, | ||
1296 | .fabric_login = qla2x00_login_fabric, | 1209 | .fabric_login = qla2x00_login_fabric, |
1297 | .fabric_logout = qla2x00_fabric_logout, | 1210 | .fabric_logout = qla2x00_fabric_logout, |
1298 | .calc_req_entries = qla2x00_calc_iocbs_32, | 1211 | .calc_req_entries = qla2x00_calc_iocbs_32, |
@@ -1325,7 +1238,8 @@ static struct isp_operations qla2300_isp_ops = { | |||
1325 | .enable_intrs = qla2x00_enable_intrs, | 1238 | .enable_intrs = qla2x00_enable_intrs, |
1326 | .disable_intrs = qla2x00_disable_intrs, | 1239 | .disable_intrs = qla2x00_disable_intrs, |
1327 | .abort_command = qla2x00_abort_command, | 1240 | .abort_command = qla2x00_abort_command, |
1328 | .abort_target = qla2x00_abort_target, | 1241 | .target_reset = qla2x00_abort_target, |
1242 | .lun_reset = qla2x00_lun_reset, | ||
1329 | .fabric_login = qla2x00_login_fabric, | 1243 | .fabric_login = qla2x00_login_fabric, |
1330 | .fabric_logout = qla2x00_fabric_logout, | 1244 | .fabric_logout = qla2x00_fabric_logout, |
1331 | .calc_req_entries = qla2x00_calc_iocbs_32, | 1245 | .calc_req_entries = qla2x00_calc_iocbs_32, |
@@ -1358,7 +1272,8 @@ static struct isp_operations qla24xx_isp_ops = { | |||
1358 | .enable_intrs = qla24xx_enable_intrs, | 1272 | .enable_intrs = qla24xx_enable_intrs, |
1359 | .disable_intrs = qla24xx_disable_intrs, | 1273 | .disable_intrs = qla24xx_disable_intrs, |
1360 | .abort_command = qla24xx_abort_command, | 1274 | .abort_command = qla24xx_abort_command, |
1361 | .abort_target = qla24xx_abort_target, | 1275 | .target_reset = qla24xx_abort_target, |
1276 | .lun_reset = qla24xx_lun_reset, | ||
1362 | .fabric_login = qla24xx_login_fabric, | 1277 | .fabric_login = qla24xx_login_fabric, |
1363 | .fabric_logout = qla24xx_fabric_logout, | 1278 | .fabric_logout = qla24xx_fabric_logout, |
1364 | .calc_req_entries = NULL, | 1279 | .calc_req_entries = NULL, |
@@ -1391,7 +1306,8 @@ static struct isp_operations qla25xx_isp_ops = { | |||
1391 | .enable_intrs = qla24xx_enable_intrs, | 1306 | .enable_intrs = qla24xx_enable_intrs, |
1392 | .disable_intrs = qla24xx_disable_intrs, | 1307 | .disable_intrs = qla24xx_disable_intrs, |
1393 | .abort_command = qla24xx_abort_command, | 1308 | .abort_command = qla24xx_abort_command, |
1394 | .abort_target = qla24xx_abort_target, | 1309 | .target_reset = qla24xx_abort_target, |
1310 | .lun_reset = qla24xx_lun_reset, | ||
1395 | .fabric_login = qla24xx_login_fabric, | 1311 | .fabric_login = qla24xx_login_fabric, |
1396 | .fabric_logout = qla24xx_fabric_logout, | 1312 | .fabric_logout = qla24xx_fabric_logout, |
1397 | .calc_req_entries = NULL, | 1313 | .calc_req_entries = NULL, |
@@ -1464,6 +1380,13 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha) | |||
1464 | ha->device_type |= DT_IIDMA; | 1380 | ha->device_type |= DT_IIDMA; |
1465 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; | 1381 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
1466 | break; | 1382 | break; |
1383 | case PCI_DEVICE_ID_QLOGIC_ISP8432: | ||
1384 | ha->device_type |= DT_ISP8432; | ||
1385 | ha->device_type |= DT_ZIO_SUPPORTED; | ||
1386 | ha->device_type |= DT_FWI2; | ||
1387 | ha->device_type |= DT_IIDMA; | ||
1388 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; | ||
1389 | break; | ||
1467 | case PCI_DEVICE_ID_QLOGIC_ISP5422: | 1390 | case PCI_DEVICE_ID_QLOGIC_ISP5422: |
1468 | ha->device_type |= DT_ISP5422; | 1391 | ha->device_type |= DT_ISP5422; |
1469 | ha->device_type |= DT_FWI2; | 1392 | ha->device_type |= DT_FWI2; |
@@ -1587,6 +1510,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1587 | sht = &qla2x00_driver_template; | 1510 | sht = &qla2x00_driver_template; |
1588 | if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || | 1511 | if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || |
1589 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || | 1512 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || |
1513 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || | ||
1590 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || | 1514 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || |
1591 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || | 1515 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || |
1592 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) { | 1516 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) { |
@@ -1677,7 +1601,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1677 | if (IS_QLA2322(ha) || IS_QLA6322(ha)) | 1601 | if (IS_QLA2322(ha) || IS_QLA6322(ha)) |
1678 | ha->optrom_size = OPTROM_SIZE_2322; | 1602 | ha->optrom_size = OPTROM_SIZE_2322; |
1679 | ha->isp_ops = &qla2300_isp_ops; | 1603 | ha->isp_ops = &qla2300_isp_ops; |
1680 | } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { | 1604 | } else if (IS_QLA24XX_TYPE(ha)) { |
1681 | host->max_id = MAX_TARGETS_2200; | 1605 | host->max_id = MAX_TARGETS_2200; |
1682 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 1606 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
1683 | ha->request_q_length = REQUEST_ENTRY_CNT_24XX; | 1607 | ha->request_q_length = REQUEST_ENTRY_CNT_24XX; |
@@ -1699,6 +1623,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1699 | ha->gid_list_info_size = 8; | 1623 | ha->gid_list_info_size = 8; |
1700 | ha->optrom_size = OPTROM_SIZE_25XX; | 1624 | ha->optrom_size = OPTROM_SIZE_25XX; |
1701 | ha->isp_ops = &qla25xx_isp_ops; | 1625 | ha->isp_ops = &qla25xx_isp_ops; |
1626 | ha->hw_event_start = PCI_FUNC(pdev->devfn) ? | ||
1627 | FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR; | ||
1702 | } | 1628 | } |
1703 | host->can_queue = ha->request_q_length + 128; | 1629 | host->can_queue = ha->request_q_length + 128; |
1704 | 1630 | ||
@@ -1713,6 +1639,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1713 | INIT_LIST_HEAD(&ha->list); | 1639 | INIT_LIST_HEAD(&ha->list); |
1714 | INIT_LIST_HEAD(&ha->fcports); | 1640 | INIT_LIST_HEAD(&ha->fcports); |
1715 | INIT_LIST_HEAD(&ha->vp_list); | 1641 | INIT_LIST_HEAD(&ha->vp_list); |
1642 | INIT_LIST_HEAD(&ha->work_list); | ||
1716 | 1643 | ||
1717 | set_bit(0, (unsigned long *) ha->vp_idx_map); | 1644 | set_bit(0, (unsigned long *) ha->vp_idx_map); |
1718 | 1645 | ||
@@ -1819,6 +1746,8 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
1819 | 1746 | ||
1820 | qla2x00_dfs_remove(ha); | 1747 | qla2x00_dfs_remove(ha); |
1821 | 1748 | ||
1749 | qla84xx_put_chip(ha); | ||
1750 | |||
1822 | qla2x00_free_sysfs_attr(ha); | 1751 | qla2x00_free_sysfs_attr(ha); |
1823 | 1752 | ||
1824 | fc_remove_host(ha->host); | 1753 | fc_remove_host(ha->host); |
@@ -2206,6 +2135,97 @@ qla2x00_mem_free(scsi_qla_host_t *ha) | |||
2206 | kfree(ha->nvram); | 2135 | kfree(ha->nvram); |
2207 | } | 2136 | } |
2208 | 2137 | ||
2138 | struct qla_work_evt * | ||
2139 | qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, | ||
2140 | int locked) | ||
2141 | { | ||
2142 | struct qla_work_evt *e; | ||
2143 | |||
2144 | e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: | ||
2145 | GFP_KERNEL); | ||
2146 | if (!e) | ||
2147 | return NULL; | ||
2148 | |||
2149 | INIT_LIST_HEAD(&e->list); | ||
2150 | e->type = type; | ||
2151 | e->flags = QLA_EVT_FLAG_FREE; | ||
2152 | return e; | ||
2153 | } | ||
2154 | |||
2155 | int | ||
2156 | qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) | ||
2157 | { | ||
2158 | unsigned long flags; | ||
2159 | |||
2160 | if (!locked) | ||
2161 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2162 | list_add_tail(&e->list, &ha->work_list); | ||
2163 | qla2xxx_wake_dpc(ha); | ||
2164 | if (!locked) | ||
2165 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2166 | return QLA_SUCCESS; | ||
2167 | } | ||
2168 | |||
2169 | int | ||
2170 | qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code, | ||
2171 | u32 data) | ||
2172 | { | ||
2173 | struct qla_work_evt *e; | ||
2174 | |||
2175 | e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1); | ||
2176 | if (!e) | ||
2177 | return QLA_FUNCTION_FAILED; | ||
2178 | |||
2179 | e->u.aen.code = code; | ||
2180 | e->u.aen.data = data; | ||
2181 | return qla2x00_post_work(ha, e, 1); | ||
2182 | } | ||
2183 | |||
2184 | int | ||
2185 | qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1, | ||
2186 | uint16_t d2, uint16_t d3) | ||
2187 | { | ||
2188 | struct qla_work_evt *e; | ||
2189 | |||
2190 | e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1); | ||
2191 | if (!e) | ||
2192 | return QLA_FUNCTION_FAILED; | ||
2193 | |||
2194 | e->u.hwe.code = code; | ||
2195 | e->u.hwe.d1 = d1; | ||
2196 | e->u.hwe.d2 = d2; | ||
2197 | e->u.hwe.d3 = d3; | ||
2198 | return qla2x00_post_work(ha, e, 1); | ||
2199 | } | ||
2200 | |||
2201 | static void | ||
2202 | qla2x00_do_work(struct scsi_qla_host *ha) | ||
2203 | { | ||
2204 | struct qla_work_evt *e; | ||
2205 | |||
2206 | spin_lock_irq(&ha->hardware_lock); | ||
2207 | while (!list_empty(&ha->work_list)) { | ||
2208 | e = list_entry(ha->work_list.next, struct qla_work_evt, list); | ||
2209 | list_del_init(&e->list); | ||
2210 | spin_unlock_irq(&ha->hardware_lock); | ||
2211 | |||
2212 | switch (e->type) { | ||
2213 | case QLA_EVT_AEN: | ||
2214 | fc_host_post_event(ha->host, fc_get_event_number(), | ||
2215 | e->u.aen.code, e->u.aen.data); | ||
2216 | break; | ||
2217 | case QLA_EVT_HWE_LOG: | ||
2218 | qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1, | ||
2219 | e->u.hwe.d2, e->u.hwe.d3); | ||
2220 | break; | ||
2221 | } | ||
2222 | if (e->flags & QLA_EVT_FLAG_FREE) | ||
2223 | kfree(e); | ||
2224 | spin_lock_irq(&ha->hardware_lock); | ||
2225 | } | ||
2226 | spin_unlock_irq(&ha->hardware_lock); | ||
2227 | } | ||
2228 | |||
2209 | /************************************************************************** | 2229 | /************************************************************************** |
2210 | * qla2x00_do_dpc | 2230 | * qla2x00_do_dpc |
2211 | * This kernel thread is a task that is schedule by the interrupt handler | 2231 | * This kernel thread is a task that is schedule by the interrupt handler |
@@ -2257,6 +2277,8 @@ qla2x00_do_dpc(void *data) | |||
2257 | continue; | 2277 | continue; |
2258 | } | 2278 | } |
2259 | 2279 | ||
2280 | qla2x00_do_work(ha); | ||
2281 | |||
2260 | if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { | 2282 | if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { |
2261 | 2283 | ||
2262 | DEBUG(printk("scsi(%ld): dpc: sched " | 2284 | DEBUG(printk("scsi(%ld): dpc: sched " |
@@ -2291,12 +2313,6 @@ qla2x00_do_dpc(void *data) | |||
2291 | if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) | 2313 | if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) |
2292 | qla2x00_update_fcports(ha); | 2314 | qla2x00_update_fcports(ha); |
2293 | 2315 | ||
2294 | if (test_and_clear_bit(LOOP_RESET_NEEDED, &ha->dpc_flags)) { | ||
2295 | DEBUG(printk("scsi(%ld): dpc: sched loop_reset()\n", | ||
2296 | ha->host_no)); | ||
2297 | qla2x00_loop_reset(ha); | ||
2298 | } | ||
2299 | |||
2300 | if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && | 2316 | if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && |
2301 | (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { | 2317 | (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { |
2302 | 2318 | ||
@@ -2367,19 +2383,6 @@ qla2x00_do_dpc(void *data) | |||
2367 | ha->host_no)); | 2383 | ha->host_no)); |
2368 | } | 2384 | } |
2369 | 2385 | ||
2370 | if ((test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags)) && | ||
2371 | atomic_read(&ha->loop_state) != LOOP_DOWN) { | ||
2372 | |||
2373 | clear_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags); | ||
2374 | DEBUG(printk("scsi(%ld): qla2x00_login_retry()\n", | ||
2375 | ha->host_no)); | ||
2376 | |||
2377 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); | ||
2378 | |||
2379 | DEBUG(printk("scsi(%ld): qla2x00_login_retry - end\n", | ||
2380 | ha->host_no)); | ||
2381 | } | ||
2382 | |||
2383 | if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { | 2386 | if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { |
2384 | 2387 | ||
2385 | DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", | 2388 | DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", |
@@ -2397,18 +2400,6 @@ qla2x00_do_dpc(void *data) | |||
2397 | ha->host_no)); | 2400 | ha->host_no)); |
2398 | } | 2401 | } |
2399 | 2402 | ||
2400 | if (test_and_clear_bit(FCPORT_RESCAN_NEEDED, &ha->dpc_flags)) { | ||
2401 | |||
2402 | DEBUG(printk("scsi(%ld): Rescan flagged fcports...\n", | ||
2403 | ha->host_no)); | ||
2404 | |||
2405 | qla2x00_rescan_fcports(ha); | ||
2406 | |||
2407 | DEBUG(printk("scsi(%ld): Rescan flagged fcports..." | ||
2408 | "end.\n", | ||
2409 | ha->host_no)); | ||
2410 | } | ||
2411 | |||
2412 | if (!ha->interrupts_on) | 2403 | if (!ha->interrupts_on) |
2413 | ha->isp_ops->enable_intrs(ha); | 2404 | ha->isp_ops->enable_intrs(ha); |
2414 | 2405 | ||
@@ -2586,7 +2577,8 @@ qla2x00_timer(scsi_qla_host_t *ha) | |||
2586 | set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); | 2577 | set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); |
2587 | start_dpc++; | 2578 | start_dpc++; |
2588 | 2579 | ||
2589 | if (!(ha->device_flags & DFLG_NO_CABLE)) { | 2580 | if (!(ha->device_flags & DFLG_NO_CABLE) && |
2581 | !ha->parent) { | ||
2590 | DEBUG(printk("scsi(%ld): Loop down - " | 2582 | DEBUG(printk("scsi(%ld): Loop down - " |
2591 | "aborting ISP.\n", | 2583 | "aborting ISP.\n", |
2592 | ha->host_no)); | 2584 | ha->host_no)); |
@@ -2610,10 +2602,8 @@ qla2x00_timer(scsi_qla_host_t *ha) | |||
2610 | /* Schedule the DPC routine if needed */ | 2602 | /* Schedule the DPC routine if needed */ |
2611 | if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || | 2603 | if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || |
2612 | test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || | 2604 | test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || |
2613 | test_bit(LOOP_RESET_NEEDED, &ha->dpc_flags) || | ||
2614 | test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || | 2605 | test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || |
2615 | start_dpc || | 2606 | start_dpc || |
2616 | test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || | ||
2617 | test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || | 2607 | test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || |
2618 | test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || | 2608 | test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || |
2619 | test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || | 2609 | test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || |
@@ -2665,7 +2655,7 @@ qla2x00_request_firmware(scsi_qla_host_t *ha) | |||
2665 | blob = &qla_fw_blobs[FW_ISP2300]; | 2655 | blob = &qla_fw_blobs[FW_ISP2300]; |
2666 | } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { | 2656 | } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { |
2667 | blob = &qla_fw_blobs[FW_ISP2322]; | 2657 | blob = &qla_fw_blobs[FW_ISP2322]; |
2668 | } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { | 2658 | } else if (IS_QLA24XX_TYPE(ha)) { |
2669 | blob = &qla_fw_blobs[FW_ISP24XX]; | 2659 | blob = &qla_fw_blobs[FW_ISP24XX]; |
2670 | } else if (IS_QLA25XX(ha)) { | 2660 | } else if (IS_QLA25XX(ha)) { |
2671 | blob = &qla_fw_blobs[FW_ISP25XX]; | 2661 | blob = &qla_fw_blobs[FW_ISP25XX]; |
@@ -2815,6 +2805,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { | |||
2815 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, | 2805 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, |
2816 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, | 2806 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, |
2817 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, | 2807 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, |
2808 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, | ||
2818 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, | 2809 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, |
2819 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, | 2810 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, |
2820 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, | 2811 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, |
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h index 249e4d90fdc5..2801c2664b40 100644 --- a/drivers/scsi/qla2xxx/qla_settings.h +++ b/drivers/scsi/qla2xxx/qla_settings.h | |||
@@ -1,26 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | /* | ||
8 | * Compile time Options: | ||
9 | * 0 - Disable and 1 - Enable | ||
10 | */ | ||
11 | #define DEBUG_QLA2100 0 /* For Debug of qla2x00 */ | ||
12 | |||
13 | #define USE_ABORT_TGT 1 /* Use Abort Target mbx cmd */ | ||
14 | |||
15 | #define MAX_RETRIES_OF_ISP_ABORT 5 | 7 | #define MAX_RETRIES_OF_ISP_ABORT 5 |
16 | 8 | ||
17 | /* Max time to wait for the loop to be in LOOP_READY state */ | 9 | /* Max time to wait for the loop to be in LOOP_READY state */ |
18 | #define MAX_LOOP_TIMEOUT (60 * 5) | 10 | #define MAX_LOOP_TIMEOUT (60 * 5) |
19 | 11 | ||
20 | /* | ||
21 | * Some vendor subsystems do not recover properly after a device reset. Define | ||
22 | * the following to force a logout after a successful device reset. | ||
23 | */ | ||
24 | #undef LOGOUT_AFTER_DEVICE_RESET | ||
25 | |||
26 | #include "qla_version.h" | 12 | #include "qla_version.h" |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 26822c8807ee..1728ab3ccb20 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -543,79 +543,174 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, | |||
543 | } | 543 | } |
544 | } | 544 | } |
545 | 545 | ||
546 | static int | 546 | void |
547 | qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, | 547 | qla2xxx_get_flash_info(scsi_qla_host_t *ha) |
548 | uint32_t dwords) | ||
549 | { | 548 | { |
550 | int ret; | 549 | #define FLASH_BLK_SIZE_32K 0x8000 |
551 | uint32_t liter, miter; | 550 | #define FLASH_BLK_SIZE_64K 0x10000 |
552 | uint32_t sec_mask, rest_addr, conf_addr; | 551 | uint16_t cnt, chksum; |
553 | uint32_t fdata, findex, cnt; | 552 | uint16_t *wptr; |
553 | struct qla_fdt_layout *fdt; | ||
554 | uint8_t man_id, flash_id; | 554 | uint8_t man_id, flash_id; |
555 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | ||
556 | dma_addr_t optrom_dma; | ||
557 | void *optrom = NULL; | ||
558 | uint32_t *s, *d; | ||
559 | 555 | ||
560 | ret = QLA_SUCCESS; | 556 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) |
557 | return; | ||
561 | 558 | ||
562 | /* Prepare burst-capable write on supported ISPs. */ | 559 | wptr = (uint16_t *)ha->request_ring; |
563 | if (IS_QLA25XX(ha) && !(faddr & 0xfff) && | 560 | fdt = (struct qla_fdt_layout *)ha->request_ring; |
564 | dwords > OPTROM_BURST_DWORDS) { | 561 | ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, |
565 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, | 562 | FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE); |
566 | &optrom_dma, GFP_KERNEL); | 563 | if (*wptr == __constant_cpu_to_le16(0xffff)) |
567 | if (!optrom) { | 564 | goto no_flash_data; |
568 | qla_printk(KERN_DEBUG, ha, | 565 | if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || |
569 | "Unable to allocate memory for optrom burst write " | 566 | fdt->sig[3] != 'D') |
570 | "(%x KB).\n", OPTROM_BURST_SIZE / 1024); | 567 | goto no_flash_data; |
571 | } | 568 | |
569 | for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1; | ||
570 | cnt++) | ||
571 | chksum += le16_to_cpu(*wptr++); | ||
572 | if (chksum) { | ||
573 | DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: " | ||
574 | "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0], | ||
575 | le16_to_cpu(fdt->version))); | ||
576 | DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt))); | ||
577 | goto no_flash_data; | ||
572 | } | 578 | } |
573 | 579 | ||
574 | qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); | 580 | ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f; |
575 | DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__, | 581 | ha->fdt_wrt_disable = fdt->wrt_disable_bits; |
576 | ha->host_no, man_id, flash_id)); | 582 | ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); |
583 | ha->fdt_block_size = le32_to_cpu(fdt->block_size); | ||
584 | if (fdt->unprotect_sec_cmd) { | ||
585 | ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0300 | | ||
586 | fdt->unprotect_sec_cmd); | ||
587 | ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ? | ||
588 | flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): | ||
589 | flash_conf_to_access_addr(0x0336); | ||
590 | } | ||
577 | 591 | ||
578 | conf_addr = flash_conf_to_access_addr(0x03d8); | 592 | DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x " |
593 | "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", | ||
594 | le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd, | ||
595 | ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd, | ||
596 | ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size)); | ||
597 | return; | ||
598 | |||
599 | no_flash_data: | ||
600 | qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); | ||
601 | ha->fdt_wrt_disable = 0x9c; | ||
602 | ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); | ||
579 | switch (man_id) { | 603 | switch (man_id) { |
580 | case 0xbf: /* STT flash. */ | 604 | case 0xbf: /* STT flash. */ |
581 | if (flash_id == 0x8e) { | 605 | if (flash_id == 0x8e) |
582 | rest_addr = 0x3fff; | 606 | ha->fdt_block_size = FLASH_BLK_SIZE_64K; |
583 | sec_mask = 0x7c000; | 607 | else |
584 | } else { | 608 | ha->fdt_block_size = FLASH_BLK_SIZE_32K; |
585 | rest_addr = 0x1fff; | 609 | |
586 | sec_mask = 0x7e000; | ||
587 | } | ||
588 | if (flash_id == 0x80) | 610 | if (flash_id == 0x80) |
589 | conf_addr = flash_conf_to_access_addr(0x0352); | 611 | ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0352); |
590 | break; | 612 | break; |
591 | case 0x13: /* ST M25P80. */ | 613 | case 0x13: /* ST M25P80. */ |
592 | rest_addr = 0x3fff; | 614 | ha->fdt_block_size = FLASH_BLK_SIZE_64K; |
593 | sec_mask = 0x7c000; | ||
594 | break; | 615 | break; |
595 | case 0x1f: // Atmel 26DF081A | 616 | case 0x1f: /* Atmel 26DF081A. */ |
596 | rest_addr = 0x3fff; | 617 | ha->fdt_odd_index = 1; |
597 | sec_mask = 0x7c000; | 618 | ha->fdt_block_size = FLASH_BLK_SIZE_64K; |
598 | conf_addr = flash_conf_to_access_addr(0x0320); | 619 | ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320); |
620 | ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339); | ||
621 | ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336); | ||
599 | break; | 622 | break; |
600 | default: | 623 | default: |
601 | /* Default to 64 kb sector size. */ | 624 | /* Default to 64 kb sector size. */ |
602 | rest_addr = 0x3fff; | 625 | ha->fdt_block_size = FLASH_BLK_SIZE_64K; |
603 | sec_mask = 0x7c000; | ||
604 | break; | 626 | break; |
605 | } | 627 | } |
606 | 628 | ||
629 | DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x " | ||
630 | "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id, | ||
631 | ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, | ||
632 | ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, | ||
633 | ha->fdt_block_size)); | ||
634 | } | ||
635 | |||
636 | static void | ||
637 | qla24xx_unprotect_flash(scsi_qla_host_t *ha) | ||
638 | { | ||
639 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | ||
640 | |||
607 | /* Enable flash write. */ | 641 | /* Enable flash write. */ |
608 | WRT_REG_DWORD(®->ctrl_status, | 642 | WRT_REG_DWORD(®->ctrl_status, |
609 | RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE); | 643 | RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE); |
610 | RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ | 644 | RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ |
611 | 645 | ||
646 | if (!ha->fdt_wrt_disable) | ||
647 | return; | ||
648 | |||
612 | /* Disable flash write-protection. */ | 649 | /* Disable flash write-protection. */ |
613 | qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0); | 650 | qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0); |
614 | /* Some flash parts need an additional zero-write to clear bits.*/ | 651 | /* Some flash parts need an additional zero-write to clear bits.*/ |
615 | qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0); | 652 | qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0); |
653 | } | ||
654 | |||
655 | static void | ||
656 | qla24xx_protect_flash(scsi_qla_host_t *ha) | ||
657 | { | ||
658 | uint32_t cnt; | ||
659 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | ||
660 | |||
661 | if (!ha->fdt_wrt_disable) | ||
662 | goto skip_wrt_protect; | ||
663 | |||
664 | /* Enable flash write-protection and wait for completion. */ | ||
665 | qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), | ||
666 | ha->fdt_wrt_disable); | ||
667 | for (cnt = 300; cnt && | ||
668 | qla24xx_read_flash_dword(ha, | ||
669 | flash_conf_to_access_addr(0x005)) & BIT_0; | ||
670 | cnt--) { | ||
671 | udelay(10); | ||
672 | } | ||
673 | |||
674 | skip_wrt_protect: | ||
675 | /* Disable flash write. */ | ||
676 | WRT_REG_DWORD(®->ctrl_status, | ||
677 | RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); | ||
678 | RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ | ||
679 | } | ||
680 | |||
681 | static int | ||
682 | qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, | ||
683 | uint32_t dwords) | ||
684 | { | ||
685 | int ret; | ||
686 | uint32_t liter, miter; | ||
687 | uint32_t sec_mask, rest_addr; | ||
688 | uint32_t fdata, findex; | ||
689 | dma_addr_t optrom_dma; | ||
690 | void *optrom = NULL; | ||
691 | uint32_t *s, *d; | ||
692 | |||
693 | ret = QLA_SUCCESS; | ||
694 | |||
695 | /* Prepare burst-capable write on supported ISPs. */ | ||
696 | if (IS_QLA25XX(ha) && !(faddr & 0xfff) && | ||
697 | dwords > OPTROM_BURST_DWORDS) { | ||
698 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, | ||
699 | &optrom_dma, GFP_KERNEL); | ||
700 | if (!optrom) { | ||
701 | qla_printk(KERN_DEBUG, ha, | ||
702 | "Unable to allocate memory for optrom burst write " | ||
703 | "(%x KB).\n", OPTROM_BURST_SIZE / 1024); | ||
704 | } | ||
705 | } | ||
706 | |||
707 | rest_addr = (ha->fdt_block_size >> 2) - 1; | ||
708 | sec_mask = 0x80000 - (ha->fdt_block_size >> 2); | ||
709 | |||
710 | qla24xx_unprotect_flash(ha); | ||
616 | 711 | ||
617 | for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { | 712 | for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { |
618 | if (man_id == 0x1f) { | 713 | if (ha->fdt_odd_index) { |
619 | findex = faddr << 2; | 714 | findex = faddr << 2; |
620 | fdata = findex & sec_mask; | 715 | fdata = findex & sec_mask; |
621 | } else { | 716 | } else { |
@@ -625,13 +720,13 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, | |||
625 | 720 | ||
626 | /* Are we at the beginning of a sector? */ | 721 | /* Are we at the beginning of a sector? */ |
627 | if ((findex & rest_addr) == 0) { | 722 | if ((findex & rest_addr) == 0) { |
628 | /* Do sector unprotect at 4K boundry for Atmel part. */ | 723 | /* Do sector unprotect. */ |
629 | if (man_id == 0x1f) | 724 | if (ha->fdt_unprotect_sec_cmd) |
630 | qla24xx_write_flash_dword(ha, | 725 | qla24xx_write_flash_dword(ha, |
631 | flash_conf_to_access_addr(0x0339), | 726 | ha->fdt_unprotect_sec_cmd, |
632 | (fdata & 0xff00) | ((fdata << 16) & | 727 | (fdata & 0xff00) | ((fdata << 16) & |
633 | 0xff0000) | ((fdata >> 16) & 0xff)); | 728 | 0xff0000) | ((fdata >> 16) & 0xff)); |
634 | ret = qla24xx_write_flash_dword(ha, conf_addr, | 729 | ret = qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd, |
635 | (fdata & 0xff00) |((fdata << 16) & | 730 | (fdata & 0xff00) |((fdata << 16) & |
636 | 0xff0000) | ((fdata >> 16) & 0xff)); | 731 | 0xff0000) | ((fdata >> 16) & 0xff)); |
637 | if (ret != QLA_SUCCESS) { | 732 | if (ret != QLA_SUCCESS) { |
@@ -681,28 +776,16 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, | |||
681 | break; | 776 | break; |
682 | } | 777 | } |
683 | 778 | ||
684 | /* Do sector protect at 4K boundry for Atmel part. */ | 779 | /* Do sector protect. */ |
685 | if (man_id == 0x1f && | 780 | if (ha->fdt_unprotect_sec_cmd && |
686 | ((faddr & rest_addr) == rest_addr)) | 781 | ((faddr & rest_addr) == rest_addr)) |
687 | qla24xx_write_flash_dword(ha, | 782 | qla24xx_write_flash_dword(ha, |
688 | flash_conf_to_access_addr(0x0336), | 783 | ha->fdt_protect_sec_cmd, |
689 | (fdata & 0xff00) | ((fdata << 16) & | 784 | (fdata & 0xff00) | ((fdata << 16) & |
690 | 0xff0000) | ((fdata >> 16) & 0xff)); | 785 | 0xff0000) | ((fdata >> 16) & 0xff)); |
691 | } | 786 | } |
692 | 787 | ||
693 | /* Enable flash write-protection and wait for completion. */ | 788 | qla24xx_protect_flash(ha); |
694 | qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0x9c); | ||
695 | for (cnt = 300; cnt && | ||
696 | qla24xx_read_flash_dword(ha, | ||
697 | flash_conf_to_access_addr(0x005)) & BIT_0; | ||
698 | cnt--) { | ||
699 | udelay(10); | ||
700 | } | ||
701 | |||
702 | /* Disable flash write. */ | ||
703 | WRT_REG_DWORD(®->ctrl_status, | ||
704 | RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); | ||
705 | RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ | ||
706 | 789 | ||
707 | if (optrom) | 790 | if (optrom) |
708 | dma_free_coherent(&ha->pdev->dev, | 791 | dma_free_coherent(&ha->pdev->dev, |
@@ -2221,3 +2304,107 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf) | |||
2221 | 2304 | ||
2222 | return ret; | 2305 | return ret; |
2223 | } | 2306 | } |
2307 | |||
2308 | static int | ||
2309 | qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata) | ||
2310 | { | ||
2311 | uint32_t d[2], faddr; | ||
2312 | |||
2313 | /* Locate first empty entry. */ | ||
2314 | for (;;) { | ||
2315 | if (ha->hw_event_ptr >= | ||
2316 | ha->hw_event_start + FA_HW_EVENT_SIZE) { | ||
2317 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2318 | "HW event -- Log Full!\n")); | ||
2319 | return QLA_MEMORY_ALLOC_FAILED; | ||
2320 | } | ||
2321 | |||
2322 | qla24xx_read_flash_data(ha, d, ha->hw_event_ptr, 2); | ||
2323 | faddr = flash_data_to_access_addr(ha->hw_event_ptr); | ||
2324 | ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; | ||
2325 | if (d[0] == __constant_cpu_to_le32(0xffffffff) && | ||
2326 | d[1] == __constant_cpu_to_le32(0xffffffff)) { | ||
2327 | qla24xx_unprotect_flash(ha); | ||
2328 | |||
2329 | qla24xx_write_flash_dword(ha, faddr++, | ||
2330 | cpu_to_le32(jiffies)); | ||
2331 | qla24xx_write_flash_dword(ha, faddr++, 0); | ||
2332 | qla24xx_write_flash_dword(ha, faddr++, *fdata++); | ||
2333 | qla24xx_write_flash_dword(ha, faddr++, *fdata); | ||
2334 | |||
2335 | qla24xx_protect_flash(ha); | ||
2336 | break; | ||
2337 | } | ||
2338 | } | ||
2339 | return QLA_SUCCESS; | ||
2340 | } | ||
2341 | |||
2342 | int | ||
2343 | qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1, | ||
2344 | uint16_t d2, uint16_t d3) | ||
2345 | { | ||
2346 | #define QMARK(a, b, c, d) \ | ||
2347 | cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d)) | ||
2348 | |||
2349 | int rval; | ||
2350 | uint32_t marker[2], fdata[4]; | ||
2351 | |||
2352 | if (ha->hw_event_start == 0) | ||
2353 | return QLA_FUNCTION_FAILED; | ||
2354 | |||
2355 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2356 | "HW event -- code=%x, d1=%x, d2=%x, d3=%x.\n", code, d1, d2, d3)); | ||
2357 | |||
2358 | /* If marker not already found, locate or write. */ | ||
2359 | if (!ha->flags.hw_event_marker_found) { | ||
2360 | /* Create marker. */ | ||
2361 | marker[0] = QMARK('L', ha->fw_major_version, | ||
2362 | ha->fw_minor_version, ha->fw_subminor_version); | ||
2363 | marker[1] = QMARK(QLA_DRIVER_MAJOR_VER, QLA_DRIVER_MINOR_VER, | ||
2364 | QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER); | ||
2365 | |||
2366 | /* Locate marker. */ | ||
2367 | ha->hw_event_ptr = ha->hw_event_start; | ||
2368 | for (;;) { | ||
2369 | qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, | ||
2370 | 4); | ||
2371 | if (fdata[0] == __constant_cpu_to_le32(0xffffffff) && | ||
2372 | fdata[1] == __constant_cpu_to_le32(0xffffffff)) | ||
2373 | break; | ||
2374 | ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; | ||
2375 | if (ha->hw_event_ptr >= | ||
2376 | ha->hw_event_start + FA_HW_EVENT_SIZE) { | ||
2377 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2378 | "HW event -- Log Full!\n")); | ||
2379 | return QLA_MEMORY_ALLOC_FAILED; | ||
2380 | } | ||
2381 | if (fdata[2] == marker[0] && fdata[3] == marker[1]) { | ||
2382 | ha->flags.hw_event_marker_found = 1; | ||
2383 | break; | ||
2384 | } | ||
2385 | } | ||
2386 | /* No marker, write it. */ | ||
2387 | if (!ha->flags.hw_event_marker_found) { | ||
2388 | rval = qla2xxx_hw_event_store(ha, marker); | ||
2389 | if (rval != QLA_SUCCESS) { | ||
2390 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2391 | "HW event -- Failed marker write=%x.!\n", | ||
2392 | rval)); | ||
2393 | return rval; | ||
2394 | } | ||
2395 | ha->flags.hw_event_marker_found = 1; | ||
2396 | } | ||
2397 | } | ||
2398 | |||
2399 | /* Store error. */ | ||
2400 | fdata[0] = cpu_to_le32(code << 16 | d1); | ||
2401 | fdata[1] = cpu_to_le32(d2 << 16 | d3); | ||
2402 | rval = qla2xxx_hw_event_store(ha, fdata); | ||
2403 | if (rval != QLA_SUCCESS) { | ||
2404 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2405 | "HW event -- Failed error write=%x.!\n", | ||
2406 | rval)); | ||
2407 | } | ||
2408 | |||
2409 | return rval; | ||
2410 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index ea08a129fee9..f42f17acf2cf 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -1,15 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic Fibre Channel HBA Driver |
3 | * Copyright (c) 2003-2005 QLogic Corporation | 3 | * Copyright (c) 2003-2008 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.02.00-k9" | 10 | #define QLA2XXX_VERSION "8.02.01-k1" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 2 | 13 | #define QLA_DRIVER_MINOR_VER 2 |
14 | #define QLA_DRIVER_PATCH_VER 0 | 14 | #define QLA_DRIVER_PATCH_VER 1 |
15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 0 |
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index fe415ec85655..1b667a70cffa 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h | |||
@@ -216,6 +216,7 @@ union external_hw_config_reg { | |||
216 | #define MBOX_CMD_ABOUT_FW 0x0009 | 216 | #define MBOX_CMD_ABOUT_FW 0x0009 |
217 | #define MBOX_CMD_PING 0x000B | 217 | #define MBOX_CMD_PING 0x000B |
218 | #define MBOX_CMD_LUN_RESET 0x0016 | 218 | #define MBOX_CMD_LUN_RESET 0x0016 |
219 | #define MBOX_CMD_TARGET_WARM_RESET 0x0017 | ||
219 | #define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E | 220 | #define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E |
220 | #define MBOX_CMD_GET_FW_STATUS 0x001F | 221 | #define MBOX_CMD_GET_FW_STATUS 0x001F |
221 | #define MBOX_CMD_SET_ISNS_SERVICE 0x0021 | 222 | #define MBOX_CMD_SET_ISNS_SERVICE 0x0021 |
@@ -677,7 +678,8 @@ struct qla4_marker_entry { | |||
677 | uint32_t system_defined; /* 04-07 */ | 678 | uint32_t system_defined; /* 04-07 */ |
678 | uint16_t target; /* 08-09 */ | 679 | uint16_t target; /* 08-09 */ |
679 | uint16_t modifier; /* 0A-0B */ | 680 | uint16_t modifier; /* 0A-0B */ |
680 | #define MM_LUN_RESET 0 | 681 | #define MM_LUN_RESET 0 |
682 | #define MM_TGT_WARM_RESET 1 | ||
681 | 683 | ||
682 | uint16_t flags; /* 0C-0D */ | 684 | uint16_t flags; /* 0C-0D */ |
683 | uint16_t reserved1; /* 0E-0F */ | 685 | uint16_t reserved1; /* 0E-0F */ |
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index a3608e028bf6..96ebfb021f6c 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h | |||
@@ -27,6 +27,8 @@ int qla4xxx_relogin_device(struct scsi_qla_host * ha, | |||
27 | struct ddb_entry * ddb_entry); | 27 | struct ddb_entry * ddb_entry); |
28 | int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, | 28 | int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, |
29 | int lun); | 29 | int lun); |
30 | int qla4xxx_reset_target(struct scsi_qla_host * ha, | ||
31 | struct ddb_entry * ddb_entry); | ||
30 | int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr, | 32 | int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr, |
31 | uint32_t offset, uint32_t len); | 33 | uint32_t offset, uint32_t len); |
32 | int qla4xxx_get_firmware_status(struct scsi_qla_host * ha); | 34 | int qla4xxx_get_firmware_status(struct scsi_qla_host * ha); |
@@ -68,6 +70,8 @@ int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha); | |||
68 | int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha, | 70 | int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha, |
69 | uint32_t fw_ddb_index, uint32_t state); | 71 | uint32_t fw_ddb_index, uint32_t state); |
70 | void qla4xxx_dump_buffer(void *b, uint32_t size); | 72 | void qla4xxx_dump_buffer(void *b, uint32_t size); |
73 | int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, | ||
74 | struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod); | ||
71 | 75 | ||
72 | extern int ql4xextended_error_logging; | 76 | extern int ql4xextended_error_logging; |
73 | extern int ql4xdiscoverywait; | 77 | extern int ql4xdiscoverywait; |
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index e4461b5d767a..912a67494adf 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c | |||
@@ -66,8 +66,8 @@ static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, | |||
66 | * | 66 | * |
67 | * This routine issues a marker IOCB. | 67 | * This routine issues a marker IOCB. |
68 | **/ | 68 | **/ |
69 | static int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, | 69 | int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, |
70 | struct ddb_entry *ddb_entry, int lun) | 70 | struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod) |
71 | { | 71 | { |
72 | struct qla4_marker_entry *marker_entry; | 72 | struct qla4_marker_entry *marker_entry; |
73 | unsigned long flags = 0; | 73 | unsigned long flags = 0; |
@@ -87,7 +87,7 @@ static int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, | |||
87 | marker_entry->hdr.entryType = ET_MARKER; | 87 | marker_entry->hdr.entryType = ET_MARKER; |
88 | marker_entry->hdr.entryCount = 1; | 88 | marker_entry->hdr.entryCount = 1; |
89 | marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); | 89 | marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); |
90 | marker_entry->modifier = cpu_to_le16(MM_LUN_RESET); | 90 | marker_entry->modifier = cpu_to_le16(mrkr_mod); |
91 | int_to_scsilun(lun, &marker_entry->lun); | 91 | int_to_scsilun(lun, &marker_entry->lun); |
92 | wmb(); | 92 | wmb(); |
93 | 93 | ||
@@ -210,14 +210,6 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) | |||
210 | /* Get real lun and adapter */ | 210 | /* Get real lun and adapter */ |
211 | ddb_entry = srb->ddb; | 211 | ddb_entry = srb->ddb; |
212 | 212 | ||
213 | /* Send marker(s) if needed. */ | ||
214 | if (ha->marker_needed == 1) { | ||
215 | if (qla4xxx_send_marker_iocb(ha, ddb_entry, | ||
216 | cmd->device->lun) != QLA_SUCCESS) | ||
217 | return QLA_ERROR; | ||
218 | |||
219 | ha->marker_needed = 0; | ||
220 | } | ||
221 | tot_dsds = 0; | 213 | tot_dsds = 0; |
222 | 214 | ||
223 | /* Acquire hardware specific lock */ | 215 | /* Acquire hardware specific lock */ |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index fc84db4069f4..a91a57c57bff 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -11,28 +11,6 @@ | |||
11 | #include "ql4_inline.h" | 11 | #include "ql4_inline.h" |
12 | 12 | ||
13 | /** | 13 | /** |
14 | * qla2x00_process_completed_request() - Process a Fast Post response. | ||
15 | * @ha: SCSI driver HA context | ||
16 | * @index: SRB index | ||
17 | **/ | ||
18 | static void qla4xxx_process_completed_request(struct scsi_qla_host *ha, | ||
19 | uint32_t index) | ||
20 | { | ||
21 | struct srb *srb; | ||
22 | |||
23 | srb = qla4xxx_del_from_active_array(ha, index); | ||
24 | if (srb) { | ||
25 | /* Save ISP completion status */ | ||
26 | srb->cmd->result = DID_OK << 16; | ||
27 | qla4xxx_srb_compl(ha, srb); | ||
28 | } else { | ||
29 | DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = " | ||
30 | "%d\n", ha->host_no, index)); | ||
31 | set_bit(DPC_RESET_HA, &ha->dpc_flags); | ||
32 | } | ||
33 | } | ||
34 | |||
35 | /** | ||
36 | * qla4xxx_status_entry - processes status IOCBs | 14 | * qla4xxx_status_entry - processes status IOCBs |
37 | * @ha: Pointer to host adapter structure. | 15 | * @ha: Pointer to host adapter structure. |
38 | * @sts_entry: Pointer to status entry structure. | 16 | * @sts_entry: Pointer to status entry structure. |
@@ -47,14 +25,6 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
47 | uint32_t residual; | 25 | uint32_t residual; |
48 | uint16_t sensebytecnt; | 26 | uint16_t sensebytecnt; |
49 | 27 | ||
50 | if (sts_entry->completionStatus == SCS_COMPLETE && | ||
51 | sts_entry->scsiStatus == 0) { | ||
52 | qla4xxx_process_completed_request(ha, | ||
53 | le32_to_cpu(sts_entry-> | ||
54 | handle)); | ||
55 | return; | ||
56 | } | ||
57 | |||
58 | srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); | 28 | srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); |
59 | if (!srb) { | 29 | if (!srb) { |
60 | /* FIXMEdg: Don't we need to reset ISP in this case??? */ | 30 | /* FIXMEdg: Don't we need to reset ISP in this case??? */ |
@@ -62,6 +32,9 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
62 | "handle 0x%x, sp=%p. This cmd may have already " | 32 | "handle 0x%x, sp=%p. This cmd may have already " |
63 | "been completed.\n", ha->host_no, __func__, | 33 | "been completed.\n", ha->host_no, __func__, |
64 | le32_to_cpu(sts_entry->handle), srb)); | 34 | le32_to_cpu(sts_entry->handle), srb)); |
35 | dev_warn(&ha->pdev->dev, "%s invalid status entry:" | ||
36 | " handle=0x%0x\n", __func__, sts_entry->handle); | ||
37 | set_bit(DPC_RESET_HA, &ha->dpc_flags); | ||
65 | return; | 38 | return; |
66 | } | 39 | } |
67 | 40 | ||
@@ -88,10 +61,6 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
88 | scsi_status = sts_entry->scsiStatus; | 61 | scsi_status = sts_entry->scsiStatus; |
89 | switch (sts_entry->completionStatus) { | 62 | switch (sts_entry->completionStatus) { |
90 | case SCS_COMPLETE: | 63 | case SCS_COMPLETE: |
91 | if (scsi_status == 0) { | ||
92 | cmd->result = DID_OK << 16; | ||
93 | break; | ||
94 | } | ||
95 | 64 | ||
96 | if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) { | 65 | if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) { |
97 | cmd->result = DID_ERROR << 16; | 66 | cmd->result = DID_ERROR << 16; |
@@ -100,7 +69,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
100 | 69 | ||
101 | if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { | 70 | if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { |
102 | scsi_set_resid(cmd, residual); | 71 | scsi_set_resid(cmd, residual); |
103 | if ((scsi_bufflen(cmd) - residual) < cmd->underflow) { | 72 | if (!scsi_status && ((scsi_bufflen(cmd) - residual) < |
73 | cmd->underflow)) { | ||
104 | 74 | ||
105 | cmd->result = DID_ERROR << 16; | 75 | cmd->result = DID_ERROR << 16; |
106 | 76 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 35cd73c72a68..c577d79bd7e8 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -713,6 +713,45 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, | |||
713 | return status; | 713 | return status; |
714 | } | 714 | } |
715 | 715 | ||
716 | /** | ||
717 | * qla4xxx_reset_target - issues target Reset | ||
718 | * @ha: Pointer to host adapter structure. | ||
719 | * @db_entry: Pointer to device database entry | ||
720 | * @un_entry: Pointer to lun entry structure | ||
721 | * | ||
722 | * This routine performs a TARGET RESET on the specified target. | ||
723 | * The caller must ensure that the ddb_entry pointers | ||
724 | * are valid before calling this routine. | ||
725 | **/ | ||
726 | int qla4xxx_reset_target(struct scsi_qla_host *ha, | ||
727 | struct ddb_entry *ddb_entry) | ||
728 | { | ||
729 | uint32_t mbox_cmd[MBOX_REG_COUNT]; | ||
730 | uint32_t mbox_sts[MBOX_REG_COUNT]; | ||
731 | int status = QLA_SUCCESS; | ||
732 | |||
733 | DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no, | ||
734 | ddb_entry->os_target_id)); | ||
735 | |||
736 | /* | ||
737 | * Send target reset command to ISP, so that the ISP will return all | ||
738 | * outstanding requests with RESET status | ||
739 | */ | ||
740 | memset(&mbox_cmd, 0, sizeof(mbox_cmd)); | ||
741 | memset(&mbox_sts, 0, sizeof(mbox_sts)); | ||
742 | |||
743 | mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET; | ||
744 | mbox_cmd[1] = ddb_entry->fw_ddb_index; | ||
745 | mbox_cmd[5] = 0x01; /* Immediate Command Enable */ | ||
746 | |||
747 | qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], | ||
748 | &mbox_sts[0]); | ||
749 | if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE && | ||
750 | mbox_sts[0] != MBOX_STS_COMMAND_ERROR) | ||
751 | status = QLA_ERROR; | ||
752 | |||
753 | return status; | ||
754 | } | ||
716 | 755 | ||
717 | int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr, | 756 | int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr, |
718 | uint32_t offset, uint32_t len) | 757 | uint32_t offset, uint32_t len) |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 8b92f348f02c..0c786944d2c2 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -71,6 +71,7 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); | |||
71 | static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, | 71 | static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, |
72 | void (*done) (struct scsi_cmnd *)); | 72 | void (*done) (struct scsi_cmnd *)); |
73 | static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); | 73 | static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); |
74 | static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); | ||
74 | static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); | 75 | static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); |
75 | static int qla4xxx_slave_alloc(struct scsi_device *device); | 76 | static int qla4xxx_slave_alloc(struct scsi_device *device); |
76 | static int qla4xxx_slave_configure(struct scsi_device *device); | 77 | static int qla4xxx_slave_configure(struct scsi_device *device); |
@@ -84,6 +85,7 @@ static struct scsi_host_template qla4xxx_driver_template = { | |||
84 | .queuecommand = qla4xxx_queuecommand, | 85 | .queuecommand = qla4xxx_queuecommand, |
85 | 86 | ||
86 | .eh_device_reset_handler = qla4xxx_eh_device_reset, | 87 | .eh_device_reset_handler = qla4xxx_eh_device_reset, |
88 | .eh_target_reset_handler = qla4xxx_eh_target_reset, | ||
87 | .eh_host_reset_handler = qla4xxx_eh_host_reset, | 89 | .eh_host_reset_handler = qla4xxx_eh_host_reset, |
88 | 90 | ||
89 | .slave_configure = qla4xxx_slave_configure, | 91 | .slave_configure = qla4xxx_slave_configure, |
@@ -1482,7 +1484,7 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) | |||
1482 | } | 1484 | } |
1483 | 1485 | ||
1484 | /** | 1486 | /** |
1485 | * qla4xxx_eh_wait_for_active_target_commands - wait for active cmds to finish. | 1487 | * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. |
1486 | * @ha: pointer to to HBA | 1488 | * @ha: pointer to to HBA |
1487 | * @t: target id | 1489 | * @t: target id |
1488 | * @l: lun id | 1490 | * @l: lun id |
@@ -1490,20 +1492,22 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) | |||
1490 | * This function waits for all outstanding commands to a lun to complete. It | 1492 | * This function waits for all outstanding commands to a lun to complete. It |
1491 | * returns 0 if all pending commands are returned and 1 otherwise. | 1493 | * returns 0 if all pending commands are returned and 1 otherwise. |
1492 | **/ | 1494 | **/ |
1493 | static int qla4xxx_eh_wait_for_active_target_commands(struct scsi_qla_host *ha, | 1495 | static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, |
1494 | int t, int l) | 1496 | struct scsi_target *stgt, |
1497 | struct scsi_device *sdev) | ||
1495 | { | 1498 | { |
1496 | int cnt; | 1499 | int cnt; |
1497 | int status = 0; | 1500 | int status = 0; |
1498 | struct scsi_cmnd *cmd; | 1501 | struct scsi_cmnd *cmd; |
1499 | 1502 | ||
1500 | /* | 1503 | /* |
1501 | * Waiting for all commands for the designated target in the active | 1504 | * Waiting for all commands for the designated target or dev |
1502 | * array | 1505 | * in the active array |
1503 | */ | 1506 | */ |
1504 | for (cnt = 0; cnt < ha->host->can_queue; cnt++) { | 1507 | for (cnt = 0; cnt < ha->host->can_queue; cnt++) { |
1505 | cmd = scsi_host_find_tag(ha->host, cnt); | 1508 | cmd = scsi_host_find_tag(ha->host, cnt); |
1506 | if (cmd && cmd->device->id == t && cmd->device->lun == l) { | 1509 | if (cmd && stgt == scsi_target(cmd->device) && |
1510 | (!sdev || sdev == cmd->device)) { | ||
1507 | if (!qla4xxx_eh_wait_on_command(ha, cmd)) { | 1511 | if (!qla4xxx_eh_wait_on_command(ha, cmd)) { |
1508 | status++; | 1512 | status++; |
1509 | break; | 1513 | break; |
@@ -1548,24 +1552,19 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
1548 | goto eh_dev_reset_done; | 1552 | goto eh_dev_reset_done; |
1549 | } | 1553 | } |
1550 | 1554 | ||
1551 | /* Send marker. */ | 1555 | if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), |
1552 | ha->marker_needed = 1; | 1556 | cmd->device)) { |
1553 | 1557 | dev_info(&ha->pdev->dev, | |
1554 | /* | 1558 | "DEVICE RESET FAILED - waiting for " |
1555 | * If we are coming down the EH path, wait for all commands to complete | 1559 | "commands.\n"); |
1556 | * for the device. | 1560 | goto eh_dev_reset_done; |
1557 | */ | ||
1558 | if (cmd->device->host->shost_state == SHOST_RECOVERY) { | ||
1559 | if (qla4xxx_eh_wait_for_active_target_commands(ha, | ||
1560 | cmd->device->id, | ||
1561 | cmd->device->lun)){ | ||
1562 | dev_info(&ha->pdev->dev, | ||
1563 | "DEVICE RESET FAILED - waiting for " | ||
1564 | "commands.\n"); | ||
1565 | goto eh_dev_reset_done; | ||
1566 | } | ||
1567 | } | 1561 | } |
1568 | 1562 | ||
1563 | /* Send marker. */ | ||
1564 | if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, | ||
1565 | MM_LUN_RESET) != QLA_SUCCESS) | ||
1566 | goto eh_dev_reset_done; | ||
1567 | |||
1569 | dev_info(&ha->pdev->dev, | 1568 | dev_info(&ha->pdev->dev, |
1570 | "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n", | 1569 | "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n", |
1571 | ha->host_no, cmd->device->channel, cmd->device->id, | 1570 | ha->host_no, cmd->device->channel, cmd->device->id, |
@@ -1579,6 +1578,59 @@ eh_dev_reset_done: | |||
1579 | } | 1578 | } |
1580 | 1579 | ||
1581 | /** | 1580 | /** |
1581 | * qla4xxx_eh_target_reset - callback for target reset. | ||
1582 | * @cmd: Pointer to Linux's SCSI command structure | ||
1583 | * | ||
1584 | * This routine is called by the Linux OS to reset the target. | ||
1585 | **/ | ||
1586 | static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) | ||
1587 | { | ||
1588 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | ||
1589 | struct ddb_entry *ddb_entry = cmd->device->hostdata; | ||
1590 | int stat; | ||
1591 | |||
1592 | if (!ddb_entry) | ||
1593 | return FAILED; | ||
1594 | |||
1595 | starget_printk(KERN_INFO, scsi_target(cmd->device), | ||
1596 | "WARM TARGET RESET ISSUED.\n"); | ||
1597 | |||
1598 | DEBUG2(printk(KERN_INFO | ||
1599 | "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " | ||
1600 | "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", | ||
1601 | ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ, | ||
1602 | ha->dpc_flags, cmd->result, cmd->allowed)); | ||
1603 | |||
1604 | stat = qla4xxx_reset_target(ha, ddb_entry); | ||
1605 | if (stat != QLA_SUCCESS) { | ||
1606 | starget_printk(KERN_INFO, scsi_target(cmd->device), | ||
1607 | "WARM TARGET RESET FAILED.\n"); | ||
1608 | return FAILED; | ||
1609 | } | ||
1610 | |||
1611 | if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), | ||
1612 | NULL)) { | ||
1613 | starget_printk(KERN_INFO, scsi_target(cmd->device), | ||
1614 | "WARM TARGET DEVICE RESET FAILED - " | ||
1615 | "waiting for commands.\n"); | ||
1616 | return FAILED; | ||
1617 | } | ||
1618 | |||
1619 | /* Send marker. */ | ||
1620 | if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, | ||
1621 | MM_TGT_WARM_RESET) != QLA_SUCCESS) { | ||
1622 | starget_printk(KERN_INFO, scsi_target(cmd->device), | ||
1623 | "WARM TARGET DEVICE RESET FAILED - " | ||
1624 | "marker iocb failed.\n"); | ||
1625 | return FAILED; | ||
1626 | } | ||
1627 | |||
1628 | starget_printk(KERN_INFO, scsi_target(cmd->device), | ||
1629 | "WARM TARGET RESET SUCCEEDED.\n"); | ||
1630 | return SUCCESS; | ||
1631 | } | ||
1632 | |||
1633 | /** | ||
1582 | * qla4xxx_eh_host_reset - kernel callback | 1634 | * qla4xxx_eh_host_reset - kernel callback |
1583 | * @cmd: Pointer to Linux's SCSI command structure | 1635 | * @cmd: Pointer to Linux's SCSI command structure |
1584 | * | 1636 | * |
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 86e13183c9ba..52182a744ba6 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c | |||
@@ -289,7 +289,7 @@ raid_class_release(struct raid_template *r) | |||
289 | { | 289 | { |
290 | struct raid_internal *i = to_raid_internal(r); | 290 | struct raid_internal *i = to_raid_internal(r); |
291 | 291 | ||
292 | attribute_container_unregister(&i->r.raid_attrs.ac); | 292 | BUG_ON(attribute_container_unregister(&i->r.raid_attrs.ac)); |
293 | 293 | ||
294 | kfree(i); | 294 | kfree(i); |
295 | } | 295 | } |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index c78b836f59dd..f6980bd9d8f9 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -166,6 +166,51 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { | |||
166 | static DEFINE_MUTEX(host_cmd_pool_mutex); | 166 | static DEFINE_MUTEX(host_cmd_pool_mutex); |
167 | 167 | ||
168 | /** | 168 | /** |
169 | * scsi_pool_alloc_command - internal function to get a fully allocated command | ||
170 | * @pool: slab pool to allocate the command from | ||
171 | * @gfp_mask: mask for the allocation | ||
172 | * | ||
173 | * Returns a fully allocated command (with the allied sense buffer) or | ||
174 | * NULL on failure | ||
175 | */ | ||
176 | static struct scsi_cmnd * | ||
177 | scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask) | ||
178 | { | ||
179 | struct scsi_cmnd *cmd; | ||
180 | |||
181 | cmd = kmem_cache_alloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); | ||
182 | if (!cmd) | ||
183 | return NULL; | ||
184 | |||
185 | memset(cmd, 0, sizeof(*cmd)); | ||
186 | |||
187 | cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, | ||
188 | gfp_mask | pool->gfp_mask); | ||
189 | if (!cmd->sense_buffer) { | ||
190 | kmem_cache_free(pool->cmd_slab, cmd); | ||
191 | return NULL; | ||
192 | } | ||
193 | |||
194 | return cmd; | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * scsi_pool_free_command - internal function to release a command | ||
199 | * @pool: slab pool to allocate the command from | ||
200 | * @cmd: command to release | ||
201 | * | ||
202 | * the command must previously have been allocated by | ||
203 | * scsi_pool_alloc_command. | ||
204 | */ | ||
205 | static void | ||
206 | scsi_pool_free_command(struct scsi_host_cmd_pool *pool, | ||
207 | struct scsi_cmnd *cmd) | ||
208 | { | ||
209 | kmem_cache_free(pool->sense_slab, cmd->sense_buffer); | ||
210 | kmem_cache_free(pool->cmd_slab, cmd); | ||
211 | } | ||
212 | |||
213 | /** | ||
169 | * __scsi_get_command - Allocate a struct scsi_cmnd | 214 | * __scsi_get_command - Allocate a struct scsi_cmnd |
170 | * @shost: host to transmit command | 215 | * @shost: host to transmit command |
171 | * @gfp_mask: allocation mask | 216 | * @gfp_mask: allocation mask |
@@ -178,20 +223,7 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) | |||
178 | struct scsi_cmnd *cmd; | 223 | struct scsi_cmnd *cmd; |
179 | unsigned char *buf; | 224 | unsigned char *buf; |
180 | 225 | ||
181 | cmd = kmem_cache_alloc(shost->cmd_pool->cmd_slab, | 226 | cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); |
182 | gfp_mask | shost->cmd_pool->gfp_mask); | ||
183 | |||
184 | if (likely(cmd)) { | ||
185 | buf = kmem_cache_alloc(shost->cmd_pool->sense_slab, | ||
186 | gfp_mask | shost->cmd_pool->gfp_mask); | ||
187 | if (likely(buf)) { | ||
188 | memset(cmd, 0, sizeof(*cmd)); | ||
189 | cmd->sense_buffer = buf; | ||
190 | } else { | ||
191 | kmem_cache_free(shost->cmd_pool->cmd_slab, cmd); | ||
192 | cmd = NULL; | ||
193 | } | ||
194 | } | ||
195 | 227 | ||
196 | if (unlikely(!cmd)) { | 228 | if (unlikely(!cmd)) { |
197 | unsigned long flags; | 229 | unsigned long flags; |
@@ -268,11 +300,8 @@ void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd, | |||
268 | } | 300 | } |
269 | spin_unlock_irqrestore(&shost->free_list_lock, flags); | 301 | spin_unlock_irqrestore(&shost->free_list_lock, flags); |
270 | 302 | ||
271 | if (likely(cmd != NULL)) { | 303 | if (likely(cmd != NULL)) |
272 | kmem_cache_free(shost->cmd_pool->sense_slab, | 304 | scsi_pool_free_command(shost->cmd_pool, cmd); |
273 | cmd->sense_buffer); | ||
274 | kmem_cache_free(shost->cmd_pool->cmd_slab, cmd); | ||
275 | } | ||
276 | 305 | ||
277 | put_device(dev); | 306 | put_device(dev); |
278 | } | 307 | } |
@@ -301,30 +330,16 @@ void scsi_put_command(struct scsi_cmnd *cmd) | |||
301 | } | 330 | } |
302 | EXPORT_SYMBOL(scsi_put_command); | 331 | EXPORT_SYMBOL(scsi_put_command); |
303 | 332 | ||
304 | /** | 333 | static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask) |
305 | * scsi_setup_command_freelist - Setup the command freelist for a scsi host. | ||
306 | * @shost: host to allocate the freelist for. | ||
307 | * | ||
308 | * Description: The command freelist protects against system-wide out of memory | ||
309 | * deadlock by preallocating one SCSI command structure for each host, so the | ||
310 | * system can always write to a swap file on a device associated with that host. | ||
311 | * | ||
312 | * Returns: Nothing. | ||
313 | */ | ||
314 | int scsi_setup_command_freelist(struct Scsi_Host *shost) | ||
315 | { | 334 | { |
316 | struct scsi_host_cmd_pool *pool; | 335 | struct scsi_host_cmd_pool *retval = NULL, *pool; |
317 | struct scsi_cmnd *cmd; | ||
318 | |||
319 | spin_lock_init(&shost->free_list_lock); | ||
320 | INIT_LIST_HEAD(&shost->free_list); | ||
321 | |||
322 | /* | 336 | /* |
323 | * Select a command slab for this host and create it if not | 337 | * Select a command slab for this host and create it if not |
324 | * yet existent. | 338 | * yet existent. |
325 | */ | 339 | */ |
326 | mutex_lock(&host_cmd_pool_mutex); | 340 | mutex_lock(&host_cmd_pool_mutex); |
327 | pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); | 341 | pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : |
342 | &scsi_cmd_pool; | ||
328 | if (!pool->users) { | 343 | if (!pool->users) { |
329 | pool->cmd_slab = kmem_cache_create(pool->cmd_name, | 344 | pool->cmd_slab = kmem_cache_create(pool->cmd_name, |
330 | sizeof(struct scsi_cmnd), 0, | 345 | sizeof(struct scsi_cmnd), 0, |
@@ -342,37 +357,122 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) | |||
342 | } | 357 | } |
343 | 358 | ||
344 | pool->users++; | 359 | pool->users++; |
345 | shost->cmd_pool = pool; | 360 | retval = pool; |
361 | fail: | ||
346 | mutex_unlock(&host_cmd_pool_mutex); | 362 | mutex_unlock(&host_cmd_pool_mutex); |
363 | return retval; | ||
364 | } | ||
365 | |||
366 | static void scsi_put_host_cmd_pool(gfp_t gfp_mask) | ||
367 | { | ||
368 | struct scsi_host_cmd_pool *pool; | ||
347 | 369 | ||
370 | mutex_lock(&host_cmd_pool_mutex); | ||
371 | pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : | ||
372 | &scsi_cmd_pool; | ||
348 | /* | 373 | /* |
349 | * Get one backup command for this host. | 374 | * This may happen if a driver has a mismatched get and put |
375 | * of the command pool; the driver should be implicated in | ||
376 | * the stack trace | ||
350 | */ | 377 | */ |
351 | cmd = kmem_cache_alloc(shost->cmd_pool->cmd_slab, | 378 | BUG_ON(pool->users == 0); |
352 | GFP_KERNEL | shost->cmd_pool->gfp_mask); | ||
353 | if (!cmd) | ||
354 | goto fail2; | ||
355 | 379 | ||
356 | cmd->sense_buffer = kmem_cache_alloc(shost->cmd_pool->sense_slab, | ||
357 | GFP_KERNEL | | ||
358 | shost->cmd_pool->gfp_mask); | ||
359 | if (!cmd->sense_buffer) | ||
360 | goto fail2; | ||
361 | |||
362 | list_add(&cmd->list, &shost->free_list); | ||
363 | return 0; | ||
364 | |||
365 | fail2: | ||
366 | if (cmd) | ||
367 | kmem_cache_free(shost->cmd_pool->cmd_slab, cmd); | ||
368 | mutex_lock(&host_cmd_pool_mutex); | ||
369 | if (!--pool->users) { | 380 | if (!--pool->users) { |
370 | kmem_cache_destroy(pool->cmd_slab); | 381 | kmem_cache_destroy(pool->cmd_slab); |
371 | kmem_cache_destroy(pool->sense_slab); | 382 | kmem_cache_destroy(pool->sense_slab); |
372 | } | 383 | } |
373 | fail: | ||
374 | mutex_unlock(&host_cmd_pool_mutex); | 384 | mutex_unlock(&host_cmd_pool_mutex); |
375 | return -ENOMEM; | 385 | } |
386 | |||
387 | /** | ||
388 | * scsi_allocate_command - get a fully allocated SCSI command | ||
389 | * @gfp_mask: allocation mask | ||
390 | * | ||
391 | * This function is for use outside of the normal host based pools. | ||
392 | * It allocates the relevant command and takes an additional reference | ||
393 | * on the pool it used. This function *must* be paired with | ||
394 | * scsi_free_command which also has the identical mask, otherwise the | ||
395 | * free pool counts will eventually go wrong and you'll trigger a bug. | ||
396 | * | ||
397 | * This function should *only* be used by drivers that need a static | ||
398 | * command allocation at start of day for internal functions. | ||
399 | */ | ||
400 | struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask) | ||
401 | { | ||
402 | struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask); | ||
403 | |||
404 | if (!pool) | ||
405 | return NULL; | ||
406 | |||
407 | return scsi_pool_alloc_command(pool, gfp_mask); | ||
408 | } | ||
409 | EXPORT_SYMBOL(scsi_allocate_command); | ||
410 | |||
411 | /** | ||
412 | * scsi_free_command - free a command allocated by scsi_allocate_command | ||
413 | * @gfp_mask: mask used in the original allocation | ||
414 | * @cmd: command to free | ||
415 | * | ||
416 | * Note: using the original allocation mask is vital because that's | ||
417 | * what determines which command pool we use to free the command. Any | ||
418 | * mismatch will cause the system to BUG eventually. | ||
419 | */ | ||
420 | void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd) | ||
421 | { | ||
422 | struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask); | ||
423 | |||
424 | /* | ||
425 | * this could trigger if the mask to scsi_allocate_command | ||
426 | * doesn't match this mask. Otherwise we're guaranteed that this | ||
427 | * succeeds because scsi_allocate_command must have taken a reference | ||
428 | * on the pool | ||
429 | */ | ||
430 | BUG_ON(!pool); | ||
431 | |||
432 | scsi_pool_free_command(pool, cmd); | ||
433 | /* | ||
434 | * scsi_put_host_cmd_pool is called twice; once to release the | ||
435 | * reference we took above, and once to release the reference | ||
436 | * originally taken by scsi_allocate_command | ||
437 | */ | ||
438 | scsi_put_host_cmd_pool(gfp_mask); | ||
439 | scsi_put_host_cmd_pool(gfp_mask); | ||
440 | } | ||
441 | EXPORT_SYMBOL(scsi_free_command); | ||
442 | |||
443 | /** | ||
444 | * scsi_setup_command_freelist - Setup the command freelist for a scsi host. | ||
445 | * @shost: host to allocate the freelist for. | ||
446 | * | ||
447 | * Description: The command freelist protects against system-wide out of memory | ||
448 | * deadlock by preallocating one SCSI command structure for each host, so the | ||
449 | * system can always write to a swap file on a device associated with that host. | ||
450 | * | ||
451 | * Returns: Nothing. | ||
452 | */ | ||
453 | int scsi_setup_command_freelist(struct Scsi_Host *shost) | ||
454 | { | ||
455 | struct scsi_cmnd *cmd; | ||
456 | const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL; | ||
457 | |||
458 | spin_lock_init(&shost->free_list_lock); | ||
459 | INIT_LIST_HEAD(&shost->free_list); | ||
460 | |||
461 | shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask); | ||
462 | |||
463 | if (!shost->cmd_pool) | ||
464 | return -ENOMEM; | ||
465 | |||
466 | /* | ||
467 | * Get one backup command for this host. | ||
468 | */ | ||
469 | cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); | ||
470 | if (!cmd) { | ||
471 | scsi_put_host_cmd_pool(gfp_mask); | ||
472 | return -ENOMEM; | ||
473 | } | ||
474 | list_add(&cmd->list, &shost->free_list); | ||
475 | return 0; | ||
376 | } | 476 | } |
377 | 477 | ||
378 | /** | 478 | /** |
@@ -386,17 +486,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost) | |||
386 | 486 | ||
387 | cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); | 487 | cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); |
388 | list_del_init(&cmd->list); | 488 | list_del_init(&cmd->list); |
389 | kmem_cache_free(shost->cmd_pool->sense_slab, | 489 | scsi_pool_free_command(shost->cmd_pool, cmd); |
390 | cmd->sense_buffer); | ||
391 | kmem_cache_free(shost->cmd_pool->cmd_slab, cmd); | ||
392 | } | 490 | } |
393 | 491 | shost->cmd_pool = NULL; | |
394 | mutex_lock(&host_cmd_pool_mutex); | 492 | scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL); |
395 | if (!--shost->cmd_pool->users) { | ||
396 | kmem_cache_destroy(shost->cmd_pool->cmd_slab); | ||
397 | kmem_cache_destroy(shost->cmd_pool->sense_slab); | ||
398 | } | ||
399 | mutex_unlock(&host_cmd_pool_mutex); | ||
400 | } | 493 | } |
401 | 494 | ||
402 | #ifdef CONFIG_SCSI_LOGGING | 495 | #ifdef CONFIG_SCSI_LOGGING |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index d1777a9a9625..07103c399fe0 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -39,16 +39,18 @@ | |||
39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
40 | #include <linux/moduleparam.h> | 40 | #include <linux/moduleparam.h> |
41 | #include <linux/scatterlist.h> | 41 | #include <linux/scatterlist.h> |
42 | |||
43 | #include <linux/blkdev.h> | 42 | #include <linux/blkdev.h> |
44 | #include "scsi.h" | 43 | |
44 | #include <scsi/scsi.h> | ||
45 | #include <scsi/scsi_cmnd.h> | ||
46 | #include <scsi/scsi_device.h> | ||
45 | #include <scsi/scsi_host.h> | 47 | #include <scsi/scsi_host.h> |
46 | #include <scsi/scsicam.h> | 48 | #include <scsi/scsicam.h> |
49 | #include <scsi/scsi_eh.h> | ||
47 | 50 | ||
48 | #include <linux/stat.h> | 51 | #include <linux/stat.h> |
49 | 52 | ||
50 | #include "scsi_logging.h" | 53 | #include "scsi_logging.h" |
51 | #include "scsi_debug.h" | ||
52 | 54 | ||
53 | #define SCSI_DEBUG_VERSION "1.81" | 55 | #define SCSI_DEBUG_VERSION "1.81" |
54 | static const char * scsi_debug_version_date = "20070104"; | 56 | static const char * scsi_debug_version_date = "20070104"; |
@@ -146,7 +148,6 @@ static int scsi_debug_cmnd_count = 0; | |||
146 | #define DEV_READONLY(TGT) (0) | 148 | #define DEV_READONLY(TGT) (0) |
147 | #define DEV_REMOVEABLE(TGT) (0) | 149 | #define DEV_REMOVEABLE(TGT) (0) |
148 | 150 | ||
149 | static unsigned int sdebug_store_size; /* in bytes */ | ||
150 | static unsigned int sdebug_store_sectors; | 151 | static unsigned int sdebug_store_sectors; |
151 | static sector_t sdebug_capacity; /* in sectors */ | 152 | static sector_t sdebug_capacity; /* in sectors */ |
152 | 153 | ||
@@ -165,6 +166,9 @@ static int sdebug_sectors_per; /* sectors per cylinder */ | |||
165 | 166 | ||
166 | #define SDEBUG_SENSE_LEN 32 | 167 | #define SDEBUG_SENSE_LEN 32 |
167 | 168 | ||
169 | #define SCSI_DEBUG_CANQUEUE 255 | ||
170 | #define SCSI_DEBUG_MAX_CMD_LEN 16 | ||
171 | |||
168 | struct sdebug_dev_info { | 172 | struct sdebug_dev_info { |
169 | struct list_head dev_list; | 173 | struct list_head dev_list; |
170 | unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */ | 174 | unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */ |
@@ -202,30 +206,6 @@ struct sdebug_queued_cmd { | |||
202 | }; | 206 | }; |
203 | static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; | 207 | static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; |
204 | 208 | ||
205 | static struct scsi_host_template sdebug_driver_template = { | ||
206 | .proc_info = scsi_debug_proc_info, | ||
207 | .name = "SCSI DEBUG", | ||
208 | .info = scsi_debug_info, | ||
209 | .slave_alloc = scsi_debug_slave_alloc, | ||
210 | .slave_configure = scsi_debug_slave_configure, | ||
211 | .slave_destroy = scsi_debug_slave_destroy, | ||
212 | .ioctl = scsi_debug_ioctl, | ||
213 | .queuecommand = scsi_debug_queuecommand, | ||
214 | .eh_abort_handler = scsi_debug_abort, | ||
215 | .eh_bus_reset_handler = scsi_debug_bus_reset, | ||
216 | .eh_device_reset_handler = scsi_debug_device_reset, | ||
217 | .eh_host_reset_handler = scsi_debug_host_reset, | ||
218 | .bios_param = scsi_debug_biosparam, | ||
219 | .can_queue = SCSI_DEBUG_CANQUEUE, | ||
220 | .this_id = 7, | ||
221 | .sg_tablesize = 256, | ||
222 | .cmd_per_lun = 16, | ||
223 | .max_sectors = 0xffff, | ||
224 | .unchecked_isa_dma = 0, | ||
225 | .use_clustering = DISABLE_CLUSTERING, | ||
226 | .module = THIS_MODULE, | ||
227 | }; | ||
228 | |||
229 | static unsigned char * fake_storep; /* ramdisk storage */ | 209 | static unsigned char * fake_storep; /* ramdisk storage */ |
230 | 210 | ||
231 | static int num_aborts = 0; | 211 | static int num_aborts = 0; |
@@ -238,8 +218,6 @@ static DEFINE_RWLOCK(atomic_rw); | |||
238 | 218 | ||
239 | static char sdebug_proc_name[] = "scsi_debug"; | 219 | static char sdebug_proc_name[] = "scsi_debug"; |
240 | 220 | ||
241 | static int sdebug_driver_probe(struct device *); | ||
242 | static int sdebug_driver_remove(struct device *); | ||
243 | static struct bus_type pseudo_lld_bus; | 221 | static struct bus_type pseudo_lld_bus; |
244 | 222 | ||
245 | static struct device_driver sdebug_driverfs_driver = { | 223 | static struct device_driver sdebug_driverfs_driver = { |
@@ -255,94 +233,77 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, | |||
255 | static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, | 233 | static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, |
256 | 0, 0, 0x0, 0x0}; | 234 | 0, 0, 0x0, 0x0}; |
257 | 235 | ||
258 | /* function declarations */ | ||
259 | static int resp_inquiry(struct scsi_cmnd * SCpnt, int target, | ||
260 | struct sdebug_dev_info * devip); | ||
261 | static int resp_requests(struct scsi_cmnd * SCpnt, | ||
262 | struct sdebug_dev_info * devip); | ||
263 | static int resp_start_stop(struct scsi_cmnd * scp, | ||
264 | struct sdebug_dev_info * devip); | ||
265 | static int resp_report_tgtpgs(struct scsi_cmnd * scp, | ||
266 | struct sdebug_dev_info * devip); | ||
267 | static int resp_readcap(struct scsi_cmnd * SCpnt, | ||
268 | struct sdebug_dev_info * devip); | ||
269 | static int resp_readcap16(struct scsi_cmnd * SCpnt, | ||
270 | struct sdebug_dev_info * devip); | ||
271 | static int resp_mode_sense(struct scsi_cmnd * scp, int target, | ||
272 | struct sdebug_dev_info * devip); | ||
273 | static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, | ||
274 | struct sdebug_dev_info * devip); | ||
275 | static int resp_log_sense(struct scsi_cmnd * scp, | ||
276 | struct sdebug_dev_info * devip); | ||
277 | static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba, | ||
278 | unsigned int num, struct sdebug_dev_info * devip); | ||
279 | static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba, | ||
280 | unsigned int num, struct sdebug_dev_info * devip); | ||
281 | static int resp_report_luns(struct scsi_cmnd * SCpnt, | ||
282 | struct sdebug_dev_info * devip); | ||
283 | static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, | ||
284 | unsigned int num, struct sdebug_dev_info *devip); | ||
285 | static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | ||
286 | int arr_len); | ||
287 | static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | ||
288 | int max_arr_len); | ||
289 | static void timer_intr_handler(unsigned long); | ||
290 | static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev); | ||
291 | static void mk_sense_buffer(struct sdebug_dev_info * devip, int key, | ||
292 | int asc, int asq); | ||
293 | static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only, | ||
294 | struct sdebug_dev_info * devip); | ||
295 | static int schedule_resp(struct scsi_cmnd * cmnd, | ||
296 | struct sdebug_dev_info * devip, | ||
297 | done_funct_t done, int scsi_result, int delta_jiff); | ||
298 | static void __init sdebug_build_parts(unsigned char * ramp); | ||
299 | static void __init init_all_queued(void); | ||
300 | static void stop_all_queued(void); | ||
301 | static int stop_queued_cmnd(struct scsi_cmnd * cmnd); | ||
302 | static int inquiry_evpd_83(unsigned char * arr, int port_group_id, | ||
303 | int target_dev_id, int dev_id_num, | ||
304 | const char * dev_id_str, int dev_id_str_len); | ||
305 | static int inquiry_evpd_88(unsigned char * arr, int target_dev_id); | ||
306 | static int do_create_driverfs_files(void); | ||
307 | static void do_remove_driverfs_files(void); | ||
308 | |||
309 | static int sdebug_add_adapter(void); | 236 | static int sdebug_add_adapter(void); |
310 | static void sdebug_remove_adapter(void); | 237 | static void sdebug_remove_adapter(void); |
311 | static void sdebug_max_tgts_luns(void); | ||
312 | 238 | ||
313 | static struct device pseudo_primary; | 239 | static void sdebug_max_tgts_luns(void) |
314 | static struct bus_type pseudo_lld_bus; | 240 | { |
241 | struct sdebug_host_info *sdbg_host; | ||
242 | struct Scsi_Host *hpnt; | ||
243 | |||
244 | spin_lock(&sdebug_host_list_lock); | ||
245 | list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { | ||
246 | hpnt = sdbg_host->shost; | ||
247 | if ((hpnt->this_id >= 0) && | ||
248 | (scsi_debug_num_tgts > hpnt->this_id)) | ||
249 | hpnt->max_id = scsi_debug_num_tgts + 1; | ||
250 | else | ||
251 | hpnt->max_id = scsi_debug_num_tgts; | ||
252 | /* scsi_debug_max_luns; */ | ||
253 | hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; | ||
254 | } | ||
255 | spin_unlock(&sdebug_host_list_lock); | ||
256 | } | ||
257 | |||
258 | static void mk_sense_buffer(struct sdebug_dev_info *devip, int key, | ||
259 | int asc, int asq) | ||
260 | { | ||
261 | unsigned char *sbuff; | ||
262 | |||
263 | sbuff = devip->sense_buff; | ||
264 | memset(sbuff, 0, SDEBUG_SENSE_LEN); | ||
265 | |||
266 | scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq); | ||
267 | |||
268 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
269 | printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: " | ||
270 | "[0x%x,0x%x,0x%x]\n", key, asc, asq); | ||
271 | } | ||
315 | 272 | ||
316 | static void get_data_transfer_info(unsigned char *cmd, | 273 | static void get_data_transfer_info(unsigned char *cmd, |
317 | unsigned long long *lba, unsigned int *num) | 274 | unsigned long long *lba, unsigned int *num) |
318 | { | 275 | { |
319 | int i; | ||
320 | |||
321 | switch (*cmd) { | 276 | switch (*cmd) { |
322 | case WRITE_16: | 277 | case WRITE_16: |
323 | case READ_16: | 278 | case READ_16: |
324 | for (*lba = 0, i = 0; i < 8; ++i) { | 279 | *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | |
325 | if (i > 0) | 280 | (u64)cmd[7] << 16 | (u64)cmd[6] << 24 | |
326 | *lba <<= 8; | 281 | (u64)cmd[5] << 32 | (u64)cmd[4] << 40 | |
327 | *lba += cmd[2 + i]; | 282 | (u64)cmd[3] << 48 | (u64)cmd[2] << 56; |
328 | } | 283 | |
329 | *num = cmd[13] + (cmd[12] << 8) + | 284 | *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 | |
330 | (cmd[11] << 16) + (cmd[10] << 24); | 285 | (u32)cmd[10] << 24; |
331 | break; | 286 | break; |
332 | case WRITE_12: | 287 | case WRITE_12: |
333 | case READ_12: | 288 | case READ_12: |
334 | *lba = cmd[5] + (cmd[4] << 8) + (cmd[3] << 16) + (cmd[2] << 24); | 289 | *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 | |
335 | *num = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); | 290 | (u32)cmd[2] << 24; |
291 | |||
292 | *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 | | ||
293 | (u32)cmd[6] << 24; | ||
336 | break; | 294 | break; |
337 | case WRITE_10: | 295 | case WRITE_10: |
338 | case READ_10: | 296 | case READ_10: |
339 | case XDWRITEREAD_10: | 297 | case XDWRITEREAD_10: |
340 | *lba = cmd[5] + (cmd[4] << 8) + (cmd[3] << 16) + (cmd[2] << 24); | 298 | *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 | |
341 | *num = cmd[8] + (cmd[7] << 8); | 299 | (u32)cmd[2] << 24; |
300 | |||
301 | *num = (u32)cmd[8] | (u32)cmd[7] << 8; | ||
342 | break; | 302 | break; |
343 | case WRITE_6: | 303 | case WRITE_6: |
344 | case READ_6: | 304 | case READ_6: |
345 | *lba = cmd[3] + (cmd[2] << 8) + ((cmd[1] & 0x1f) << 16); | 305 | *lba = (u32)cmd[3] | (u32)cmd[2] << 8 | |
306 | (u32)(cmd[1] & 0x1f) << 16; | ||
346 | *num = (0 == cmd[4]) ? 256 : cmd[4]; | 307 | *num = (0 == cmd[4]) ? 256 : cmd[4]; |
347 | break; | 308 | break; |
348 | default: | 309 | default: |
@@ -350,237 +311,6 @@ static void get_data_transfer_info(unsigned char *cmd, | |||
350 | } | 311 | } |
351 | } | 312 | } |
352 | 313 | ||
353 | static | ||
354 | int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done) | ||
355 | { | ||
356 | unsigned char *cmd = (unsigned char *) SCpnt->cmnd; | ||
357 | int len, k; | ||
358 | unsigned int num; | ||
359 | unsigned long long lba; | ||
360 | int errsts = 0; | ||
361 | int target = SCpnt->device->id; | ||
362 | struct sdebug_dev_info * devip = NULL; | ||
363 | int inj_recovered = 0; | ||
364 | int inj_transport = 0; | ||
365 | int delay_override = 0; | ||
366 | |||
367 | if (done == NULL) | ||
368 | return 0; /* assume mid level reprocessing command */ | ||
369 | |||
370 | scsi_set_resid(SCpnt, 0); | ||
371 | if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { | ||
372 | printk(KERN_INFO "scsi_debug: cmd "); | ||
373 | for (k = 0, len = SCpnt->cmd_len; k < len; ++k) | ||
374 | printk("%02x ", (int)cmd[k]); | ||
375 | printk("\n"); | ||
376 | } | ||
377 | if(target == sdebug_driver_template.this_id) { | ||
378 | printk(KERN_INFO "scsi_debug: initiator's id used as " | ||
379 | "target!\n"); | ||
380 | return schedule_resp(SCpnt, NULL, done, | ||
381 | DID_NO_CONNECT << 16, 0); | ||
382 | } | ||
383 | |||
384 | if ((SCpnt->device->lun >= scsi_debug_max_luns) && | ||
385 | (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS)) | ||
386 | return schedule_resp(SCpnt, NULL, done, | ||
387 | DID_NO_CONNECT << 16, 0); | ||
388 | devip = devInfoReg(SCpnt->device); | ||
389 | if (NULL == devip) | ||
390 | return schedule_resp(SCpnt, NULL, done, | ||
391 | DID_NO_CONNECT << 16, 0); | ||
392 | |||
393 | if ((scsi_debug_every_nth != 0) && | ||
394 | (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) { | ||
395 | scsi_debug_cmnd_count = 0; | ||
396 | if (scsi_debug_every_nth < -1) | ||
397 | scsi_debug_every_nth = -1; | ||
398 | if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) | ||
399 | return 0; /* ignore command causing timeout */ | ||
400 | else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts) | ||
401 | inj_recovered = 1; /* to reads and writes below */ | ||
402 | else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts) | ||
403 | inj_transport = 1; /* to reads and writes below */ | ||
404 | } | ||
405 | |||
406 | if (devip->wlun) { | ||
407 | switch (*cmd) { | ||
408 | case INQUIRY: | ||
409 | case REQUEST_SENSE: | ||
410 | case TEST_UNIT_READY: | ||
411 | case REPORT_LUNS: | ||
412 | break; /* only allowable wlun commands */ | ||
413 | default: | ||
414 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
415 | printk(KERN_INFO "scsi_debug: Opcode: 0x%x " | ||
416 | "not supported for wlun\n", *cmd); | ||
417 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
418 | INVALID_OPCODE, 0); | ||
419 | errsts = check_condition_result; | ||
420 | return schedule_resp(SCpnt, devip, done, errsts, | ||
421 | 0); | ||
422 | } | ||
423 | } | ||
424 | |||
425 | switch (*cmd) { | ||
426 | case INQUIRY: /* mandatory, ignore unit attention */ | ||
427 | delay_override = 1; | ||
428 | errsts = resp_inquiry(SCpnt, target, devip); | ||
429 | break; | ||
430 | case REQUEST_SENSE: /* mandatory, ignore unit attention */ | ||
431 | delay_override = 1; | ||
432 | errsts = resp_requests(SCpnt, devip); | ||
433 | break; | ||
434 | case REZERO_UNIT: /* actually this is REWIND for SSC */ | ||
435 | case START_STOP: | ||
436 | errsts = resp_start_stop(SCpnt, devip); | ||
437 | break; | ||
438 | case ALLOW_MEDIUM_REMOVAL: | ||
439 | if ((errsts = check_readiness(SCpnt, 1, devip))) | ||
440 | break; | ||
441 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
442 | printk(KERN_INFO "scsi_debug: Medium removal %s\n", | ||
443 | cmd[4] ? "inhibited" : "enabled"); | ||
444 | break; | ||
445 | case SEND_DIAGNOSTIC: /* mandatory */ | ||
446 | errsts = check_readiness(SCpnt, 1, devip); | ||
447 | break; | ||
448 | case TEST_UNIT_READY: /* mandatory */ | ||
449 | delay_override = 1; | ||
450 | errsts = check_readiness(SCpnt, 0, devip); | ||
451 | break; | ||
452 | case RESERVE: | ||
453 | errsts = check_readiness(SCpnt, 1, devip); | ||
454 | break; | ||
455 | case RESERVE_10: | ||
456 | errsts = check_readiness(SCpnt, 1, devip); | ||
457 | break; | ||
458 | case RELEASE: | ||
459 | errsts = check_readiness(SCpnt, 1, devip); | ||
460 | break; | ||
461 | case RELEASE_10: | ||
462 | errsts = check_readiness(SCpnt, 1, devip); | ||
463 | break; | ||
464 | case READ_CAPACITY: | ||
465 | errsts = resp_readcap(SCpnt, devip); | ||
466 | break; | ||
467 | case SERVICE_ACTION_IN: | ||
468 | if (SAI_READ_CAPACITY_16 != cmd[1]) { | ||
469 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
470 | INVALID_OPCODE, 0); | ||
471 | errsts = check_condition_result; | ||
472 | break; | ||
473 | } | ||
474 | errsts = resp_readcap16(SCpnt, devip); | ||
475 | break; | ||
476 | case MAINTENANCE_IN: | ||
477 | if (MI_REPORT_TARGET_PGS != cmd[1]) { | ||
478 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
479 | INVALID_OPCODE, 0); | ||
480 | errsts = check_condition_result; | ||
481 | break; | ||
482 | } | ||
483 | errsts = resp_report_tgtpgs(SCpnt, devip); | ||
484 | break; | ||
485 | case READ_16: | ||
486 | case READ_12: | ||
487 | case READ_10: | ||
488 | case READ_6: | ||
489 | if ((errsts = check_readiness(SCpnt, 0, devip))) | ||
490 | break; | ||
491 | if (scsi_debug_fake_rw) | ||
492 | break; | ||
493 | get_data_transfer_info(cmd, &lba, &num); | ||
494 | errsts = resp_read(SCpnt, lba, num, devip); | ||
495 | if (inj_recovered && (0 == errsts)) { | ||
496 | mk_sense_buffer(devip, RECOVERED_ERROR, | ||
497 | THRESHOLD_EXCEEDED, 0); | ||
498 | errsts = check_condition_result; | ||
499 | } else if (inj_transport && (0 == errsts)) { | ||
500 | mk_sense_buffer(devip, ABORTED_COMMAND, | ||
501 | TRANSPORT_PROBLEM, ACK_NAK_TO); | ||
502 | errsts = check_condition_result; | ||
503 | } | ||
504 | break; | ||
505 | case REPORT_LUNS: /* mandatory, ignore unit attention */ | ||
506 | delay_override = 1; | ||
507 | errsts = resp_report_luns(SCpnt, devip); | ||
508 | break; | ||
509 | case VERIFY: /* 10 byte SBC-2 command */ | ||
510 | errsts = check_readiness(SCpnt, 0, devip); | ||
511 | break; | ||
512 | case WRITE_16: | ||
513 | case WRITE_12: | ||
514 | case WRITE_10: | ||
515 | case WRITE_6: | ||
516 | if ((errsts = check_readiness(SCpnt, 0, devip))) | ||
517 | break; | ||
518 | if (scsi_debug_fake_rw) | ||
519 | break; | ||
520 | get_data_transfer_info(cmd, &lba, &num); | ||
521 | errsts = resp_write(SCpnt, lba, num, devip); | ||
522 | if (inj_recovered && (0 == errsts)) { | ||
523 | mk_sense_buffer(devip, RECOVERED_ERROR, | ||
524 | THRESHOLD_EXCEEDED, 0); | ||
525 | errsts = check_condition_result; | ||
526 | } | ||
527 | break; | ||
528 | case MODE_SENSE: | ||
529 | case MODE_SENSE_10: | ||
530 | errsts = resp_mode_sense(SCpnt, target, devip); | ||
531 | break; | ||
532 | case MODE_SELECT: | ||
533 | errsts = resp_mode_select(SCpnt, 1, devip); | ||
534 | break; | ||
535 | case MODE_SELECT_10: | ||
536 | errsts = resp_mode_select(SCpnt, 0, devip); | ||
537 | break; | ||
538 | case LOG_SENSE: | ||
539 | errsts = resp_log_sense(SCpnt, devip); | ||
540 | break; | ||
541 | case SYNCHRONIZE_CACHE: | ||
542 | delay_override = 1; | ||
543 | errsts = check_readiness(SCpnt, 0, devip); | ||
544 | break; | ||
545 | case WRITE_BUFFER: | ||
546 | errsts = check_readiness(SCpnt, 1, devip); | ||
547 | break; | ||
548 | case XDWRITEREAD_10: | ||
549 | if (!scsi_bidi_cmnd(SCpnt)) { | ||
550 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
551 | INVALID_FIELD_IN_CDB, 0); | ||
552 | errsts = check_condition_result; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | errsts = check_readiness(SCpnt, 0, devip); | ||
557 | if (errsts) | ||
558 | break; | ||
559 | if (scsi_debug_fake_rw) | ||
560 | break; | ||
561 | get_data_transfer_info(cmd, &lba, &num); | ||
562 | errsts = resp_read(SCpnt, lba, num, devip); | ||
563 | if (errsts) | ||
564 | break; | ||
565 | errsts = resp_write(SCpnt, lba, num, devip); | ||
566 | if (errsts) | ||
567 | break; | ||
568 | errsts = resp_xdwriteread(SCpnt, lba, num, devip); | ||
569 | break; | ||
570 | default: | ||
571 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
572 | printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " | ||
573 | "supported\n", *cmd); | ||
574 | if ((errsts = check_readiness(SCpnt, 1, devip))) | ||
575 | break; /* Unit attention takes precedence */ | ||
576 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); | ||
577 | errsts = check_condition_result; | ||
578 | break; | ||
579 | } | ||
580 | return schedule_resp(SCpnt, devip, done, errsts, | ||
581 | (delay_override ? 0 : scsi_debug_delay)); | ||
582 | } | ||
583 | |||
584 | static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) | 314 | static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) |
585 | { | 315 | { |
586 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { | 316 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { |
@@ -613,81 +343,37 @@ static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only, | |||
613 | } | 343 | } |
614 | 344 | ||
615 | /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */ | 345 | /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */ |
616 | static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | 346 | static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, |
617 | int arr_len) | 347 | int arr_len) |
618 | { | 348 | { |
619 | int k, req_len, act_len, len, active; | 349 | int act_len; |
620 | void * kaddr; | ||
621 | void * kaddr_off; | ||
622 | struct scatterlist *sg; | ||
623 | struct scsi_data_buffer *sdb = scsi_in(scp); | 350 | struct scsi_data_buffer *sdb = scsi_in(scp); |
624 | 351 | ||
625 | if (!sdb->length) | 352 | if (!sdb->length) |
626 | return 0; | 353 | return 0; |
627 | if (!sdb->table.sgl) | ||
628 | return (DID_ERROR << 16); | ||
629 | if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) | 354 | if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) |
630 | return (DID_ERROR << 16); | 355 | return (DID_ERROR << 16); |
631 | active = 1; | 356 | |
632 | req_len = act_len = 0; | 357 | act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, |
633 | for_each_sg(sdb->table.sgl, sg, sdb->table.nents, k) { | 358 | arr, arr_len); |
634 | if (active) { | ||
635 | kaddr = (unsigned char *) | ||
636 | kmap_atomic(sg_page(sg), KM_USER0); | ||
637 | if (NULL == kaddr) | ||
638 | return (DID_ERROR << 16); | ||
639 | kaddr_off = (unsigned char *)kaddr + sg->offset; | ||
640 | len = sg->length; | ||
641 | if ((req_len + len) > arr_len) { | ||
642 | active = 0; | ||
643 | len = arr_len - req_len; | ||
644 | } | ||
645 | memcpy(kaddr_off, arr + req_len, len); | ||
646 | kunmap_atomic(kaddr, KM_USER0); | ||
647 | act_len += len; | ||
648 | } | ||
649 | req_len += sg->length; | ||
650 | } | ||
651 | if (sdb->resid) | 359 | if (sdb->resid) |
652 | sdb->resid -= act_len; | 360 | sdb->resid -= act_len; |
653 | else | 361 | else |
654 | sdb->resid = req_len - act_len; | 362 | sdb->resid = scsi_bufflen(scp) - act_len; |
363 | |||
655 | return 0; | 364 | return 0; |
656 | } | 365 | } |
657 | 366 | ||
658 | /* Returns number of bytes fetched into 'arr' or -1 if error. */ | 367 | /* Returns number of bytes fetched into 'arr' or -1 if error. */ |
659 | static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | 368 | static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, |
660 | int max_arr_len) | 369 | int arr_len) |
661 | { | 370 | { |
662 | int k, req_len, len, fin; | 371 | if (!scsi_bufflen(scp)) |
663 | void * kaddr; | ||
664 | void * kaddr_off; | ||
665 | struct scatterlist * sg; | ||
666 | |||
667 | if (0 == scsi_bufflen(scp)) | ||
668 | return 0; | 372 | return 0; |
669 | if (NULL == scsi_sglist(scp)) | ||
670 | return -1; | ||
671 | if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE)) | 373 | if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE)) |
672 | return -1; | 374 | return -1; |
673 | req_len = fin = 0; | 375 | |
674 | scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) { | 376 | return scsi_sg_copy_to_buffer(scp, arr, arr_len); |
675 | kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0); | ||
676 | if (NULL == kaddr) | ||
677 | return -1; | ||
678 | kaddr_off = (unsigned char *)kaddr + sg->offset; | ||
679 | len = sg->length; | ||
680 | if ((req_len + len) > max_arr_len) { | ||
681 | len = max_arr_len - req_len; | ||
682 | fin = 1; | ||
683 | } | ||
684 | memcpy(arr + req_len, kaddr_off, len); | ||
685 | kunmap_atomic(kaddr, KM_USER0); | ||
686 | if (fin) | ||
687 | return req_len + len; | ||
688 | req_len += sg->length; | ||
689 | } | ||
690 | return req_len; | ||
691 | } | 377 | } |
692 | 378 | ||
693 | 379 | ||
@@ -1159,6 +845,14 @@ static int resp_start_stop(struct scsi_cmnd * scp, | |||
1159 | return 0; | 845 | return 0; |
1160 | } | 846 | } |
1161 | 847 | ||
848 | static sector_t get_sdebug_capacity(void) | ||
849 | { | ||
850 | if (scsi_debug_virtual_gb > 0) | ||
851 | return 2048 * 1024 * scsi_debug_virtual_gb; | ||
852 | else | ||
853 | return sdebug_store_sectors; | ||
854 | } | ||
855 | |||
1162 | #define SDEBUG_READCAP_ARR_SZ 8 | 856 | #define SDEBUG_READCAP_ARR_SZ 8 |
1163 | static int resp_readcap(struct scsi_cmnd * scp, | 857 | static int resp_readcap(struct scsi_cmnd * scp, |
1164 | struct sdebug_dev_info * devip) | 858 | struct sdebug_dev_info * devip) |
@@ -1170,11 +864,7 @@ static int resp_readcap(struct scsi_cmnd * scp, | |||
1170 | if ((errsts = check_readiness(scp, 1, devip))) | 864 | if ((errsts = check_readiness(scp, 1, devip))) |
1171 | return errsts; | 865 | return errsts; |
1172 | /* following just in case virtual_gb changed */ | 866 | /* following just in case virtual_gb changed */ |
1173 | if (scsi_debug_virtual_gb > 0) { | 867 | sdebug_capacity = get_sdebug_capacity(); |
1174 | sdebug_capacity = 2048 * 1024; | ||
1175 | sdebug_capacity *= scsi_debug_virtual_gb; | ||
1176 | } else | ||
1177 | sdebug_capacity = sdebug_store_sectors; | ||
1178 | memset(arr, 0, SDEBUG_READCAP_ARR_SZ); | 868 | memset(arr, 0, SDEBUG_READCAP_ARR_SZ); |
1179 | if (sdebug_capacity < 0xffffffff) { | 869 | if (sdebug_capacity < 0xffffffff) { |
1180 | capac = (unsigned int)sdebug_capacity - 1; | 870 | capac = (unsigned int)sdebug_capacity - 1; |
@@ -1207,11 +897,7 @@ static int resp_readcap16(struct scsi_cmnd * scp, | |||
1207 | alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) | 897 | alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) |
1208 | + cmd[13]); | 898 | + cmd[13]); |
1209 | /* following just in case virtual_gb changed */ | 899 | /* following just in case virtual_gb changed */ |
1210 | if (scsi_debug_virtual_gb > 0) { | 900 | sdebug_capacity = get_sdebug_capacity(); |
1211 | sdebug_capacity = 2048 * 1024; | ||
1212 | sdebug_capacity *= scsi_debug_virtual_gb; | ||
1213 | } else | ||
1214 | sdebug_capacity = sdebug_store_sectors; | ||
1215 | memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); | 901 | memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); |
1216 | capac = sdebug_capacity - 1; | 902 | capac = sdebug_capacity - 1; |
1217 | for (k = 0; k < 8; ++k, capac >>= 8) | 903 | for (k = 0; k < 8; ++k, capac >>= 8) |
@@ -1505,13 +1191,9 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, | |||
1505 | offset = 8; | 1191 | offset = 8; |
1506 | } | 1192 | } |
1507 | ap = arr + offset; | 1193 | ap = arr + offset; |
1508 | if ((bd_len > 0) && (0 == sdebug_capacity)) { | 1194 | if ((bd_len > 0) && (!sdebug_capacity)) |
1509 | if (scsi_debug_virtual_gb > 0) { | 1195 | sdebug_capacity = get_sdebug_capacity(); |
1510 | sdebug_capacity = 2048 * 1024; | 1196 | |
1511 | sdebug_capacity *= scsi_debug_virtual_gb; | ||
1512 | } else | ||
1513 | sdebug_capacity = sdebug_store_sectors; | ||
1514 | } | ||
1515 | if (8 == bd_len) { | 1197 | if (8 == bd_len) { |
1516 | if (sdebug_capacity > 0xfffffffe) { | 1198 | if (sdebug_capacity > 0xfffffffe) { |
1517 | ap[0] = 0xff; | 1199 | ap[0] = 0xff; |
@@ -1808,25 +1490,53 @@ static int resp_log_sense(struct scsi_cmnd * scp, | |||
1808 | min(len, SDEBUG_MAX_INQ_ARR_SZ)); | 1490 | min(len, SDEBUG_MAX_INQ_ARR_SZ)); |
1809 | } | 1491 | } |
1810 | 1492 | ||
1811 | static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba, | 1493 | static int check_device_access_params(struct sdebug_dev_info *devi, |
1812 | unsigned int num, struct sdebug_dev_info * devip) | 1494 | unsigned long long lba, unsigned int num) |
1813 | { | 1495 | { |
1814 | unsigned long iflags; | ||
1815 | unsigned int block, from_bottom; | ||
1816 | unsigned long long u; | ||
1817 | int ret; | ||
1818 | |||
1819 | if (lba + num > sdebug_capacity) { | 1496 | if (lba + num > sdebug_capacity) { |
1820 | mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, | 1497 | mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0); |
1821 | 0); | ||
1822 | return check_condition_result; | 1498 | return check_condition_result; |
1823 | } | 1499 | } |
1824 | /* transfer length excessive (tie in to block limits VPD page) */ | 1500 | /* transfer length excessive (tie in to block limits VPD page) */ |
1825 | if (num > sdebug_store_sectors) { | 1501 | if (num > sdebug_store_sectors) { |
1826 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, | 1502 | mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); |
1827 | 0); | ||
1828 | return check_condition_result; | 1503 | return check_condition_result; |
1829 | } | 1504 | } |
1505 | return 0; | ||
1506 | } | ||
1507 | |||
1508 | static int do_device_access(struct scsi_cmnd *scmd, | ||
1509 | struct sdebug_dev_info *devi, | ||
1510 | unsigned long long lba, unsigned int num, int write) | ||
1511 | { | ||
1512 | int ret; | ||
1513 | unsigned int block, rest = 0; | ||
1514 | int (*func)(struct scsi_cmnd *, unsigned char *, int); | ||
1515 | |||
1516 | func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; | ||
1517 | |||
1518 | block = do_div(lba, sdebug_store_sectors); | ||
1519 | if (block + num > sdebug_store_sectors) | ||
1520 | rest = block + num - sdebug_store_sectors; | ||
1521 | |||
1522 | ret = func(scmd, fake_storep + (block * SECT_SIZE), | ||
1523 | (num - rest) * SECT_SIZE); | ||
1524 | if (!ret && rest) | ||
1525 | ret = func(scmd, fake_storep, rest * SECT_SIZE); | ||
1526 | |||
1527 | return ret; | ||
1528 | } | ||
1529 | |||
1530 | static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, | ||
1531 | unsigned int num, struct sdebug_dev_info *devip) | ||
1532 | { | ||
1533 | unsigned long iflags; | ||
1534 | int ret; | ||
1535 | |||
1536 | ret = check_device_access_params(devip, lba, num); | ||
1537 | if (ret) | ||
1538 | return ret; | ||
1539 | |||
1830 | if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && | 1540 | if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && |
1831 | (lba <= OPT_MEDIUM_ERR_ADDR) && | 1541 | (lba <= OPT_MEDIUM_ERR_ADDR) && |
1832 | ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { | 1542 | ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { |
@@ -1845,74 +1555,30 @@ static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba, | |||
1845 | return check_condition_result; | 1555 | return check_condition_result; |
1846 | } | 1556 | } |
1847 | read_lock_irqsave(&atomic_rw, iflags); | 1557 | read_lock_irqsave(&atomic_rw, iflags); |
1848 | if ((lba + num) <= sdebug_store_sectors) | 1558 | ret = do_device_access(SCpnt, devip, lba, num, 0); |
1849 | ret = fill_from_dev_buffer(SCpnt, | ||
1850 | fake_storep + (lba * SECT_SIZE), | ||
1851 | num * SECT_SIZE); | ||
1852 | else { | ||
1853 | /* modulo when one arg is 64 bits needs do_div() */ | ||
1854 | u = lba; | ||
1855 | block = do_div(u, sdebug_store_sectors); | ||
1856 | from_bottom = 0; | ||
1857 | if ((block + num) > sdebug_store_sectors) | ||
1858 | from_bottom = (block + num) - sdebug_store_sectors; | ||
1859 | ret = fill_from_dev_buffer(SCpnt, | ||
1860 | fake_storep + (block * SECT_SIZE), | ||
1861 | (num - from_bottom) * SECT_SIZE); | ||
1862 | if ((0 == ret) && (from_bottom > 0)) | ||
1863 | ret = fill_from_dev_buffer(SCpnt, fake_storep, | ||
1864 | from_bottom * SECT_SIZE); | ||
1865 | } | ||
1866 | read_unlock_irqrestore(&atomic_rw, iflags); | 1559 | read_unlock_irqrestore(&atomic_rw, iflags); |
1867 | return ret; | 1560 | return ret; |
1868 | } | 1561 | } |
1869 | 1562 | ||
1870 | static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba, | 1563 | static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, |
1871 | unsigned int num, struct sdebug_dev_info * devip) | 1564 | unsigned int num, struct sdebug_dev_info *devip) |
1872 | { | 1565 | { |
1873 | unsigned long iflags; | 1566 | unsigned long iflags; |
1874 | unsigned int block, to_bottom; | 1567 | int ret; |
1875 | unsigned long long u; | ||
1876 | int res; | ||
1877 | 1568 | ||
1878 | if (lba + num > sdebug_capacity) { | 1569 | ret = check_device_access_params(devip, lba, num); |
1879 | mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, | 1570 | if (ret) |
1880 | 0); | 1571 | return ret; |
1881 | return check_condition_result; | ||
1882 | } | ||
1883 | /* transfer length excessive (tie in to block limits VPD page) */ | ||
1884 | if (num > sdebug_store_sectors) { | ||
1885 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, | ||
1886 | 0); | ||
1887 | return check_condition_result; | ||
1888 | } | ||
1889 | 1572 | ||
1890 | write_lock_irqsave(&atomic_rw, iflags); | 1573 | write_lock_irqsave(&atomic_rw, iflags); |
1891 | if ((lba + num) <= sdebug_store_sectors) | 1574 | ret = do_device_access(SCpnt, devip, lba, num, 1); |
1892 | res = fetch_to_dev_buffer(SCpnt, | ||
1893 | fake_storep + (lba * SECT_SIZE), | ||
1894 | num * SECT_SIZE); | ||
1895 | else { | ||
1896 | /* modulo when one arg is 64 bits needs do_div() */ | ||
1897 | u = lba; | ||
1898 | block = do_div(u, sdebug_store_sectors); | ||
1899 | to_bottom = 0; | ||
1900 | if ((block + num) > sdebug_store_sectors) | ||
1901 | to_bottom = (block + num) - sdebug_store_sectors; | ||
1902 | res = fetch_to_dev_buffer(SCpnt, | ||
1903 | fake_storep + (block * SECT_SIZE), | ||
1904 | (num - to_bottom) * SECT_SIZE); | ||
1905 | if ((0 == res) && (to_bottom > 0)) | ||
1906 | res = fetch_to_dev_buffer(SCpnt, fake_storep, | ||
1907 | to_bottom * SECT_SIZE); | ||
1908 | } | ||
1909 | write_unlock_irqrestore(&atomic_rw, iflags); | 1575 | write_unlock_irqrestore(&atomic_rw, iflags); |
1910 | if (-1 == res) | 1576 | if (-1 == ret) |
1911 | return (DID_ERROR << 16); | 1577 | return (DID_ERROR << 16); |
1912 | else if ((res < (num * SECT_SIZE)) && | 1578 | else if ((ret < (num * SECT_SIZE)) && |
1913 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | 1579 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) |
1914 | printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " | 1580 | printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " |
1915 | " IO sent=%d bytes\n", num * SECT_SIZE, res); | 1581 | " IO sent=%d bytes\n", num * SECT_SIZE, ret); |
1916 | return 0; | 1582 | return 0; |
1917 | } | 1583 | } |
1918 | 1584 | ||
@@ -1987,16 +1653,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, | |||
1987 | if (!buf) | 1653 | if (!buf) |
1988 | return ret; | 1654 | return ret; |
1989 | 1655 | ||
1990 | offset = 0; | 1656 | scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp)); |
1991 | scsi_for_each_sg(scp, sg, scsi_sg_count(scp), i) { | ||
1992 | kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0); | ||
1993 | if (!kaddr) | ||
1994 | goto out; | ||
1995 | |||
1996 | memcpy(buf + offset, kaddr + sg->offset, sg->length); | ||
1997 | offset += sg->length; | ||
1998 | kunmap_atomic(kaddr, KM_USER0); | ||
1999 | } | ||
2000 | 1657 | ||
2001 | offset = 0; | 1658 | offset = 0; |
2002 | for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { | 1659 | for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { |
@@ -2045,7 +1702,73 @@ static void timer_intr_handler(unsigned long indx) | |||
2045 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 1702 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
2046 | } | 1703 | } |
2047 | 1704 | ||
2048 | static int scsi_debug_slave_alloc(struct scsi_device * sdp) | 1705 | |
1706 | static struct sdebug_dev_info * | ||
1707 | sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) | ||
1708 | { | ||
1709 | struct sdebug_dev_info *devip; | ||
1710 | |||
1711 | devip = kzalloc(sizeof(*devip), flags); | ||
1712 | if (devip) { | ||
1713 | devip->sdbg_host = sdbg_host; | ||
1714 | list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); | ||
1715 | } | ||
1716 | return devip; | ||
1717 | } | ||
1718 | |||
1719 | static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) | ||
1720 | { | ||
1721 | struct sdebug_host_info * sdbg_host; | ||
1722 | struct sdebug_dev_info * open_devip = NULL; | ||
1723 | struct sdebug_dev_info * devip = | ||
1724 | (struct sdebug_dev_info *)sdev->hostdata; | ||
1725 | |||
1726 | if (devip) | ||
1727 | return devip; | ||
1728 | sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); | ||
1729 | if (!sdbg_host) { | ||
1730 | printk(KERN_ERR "Host info NULL\n"); | ||
1731 | return NULL; | ||
1732 | } | ||
1733 | list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { | ||
1734 | if ((devip->used) && (devip->channel == sdev->channel) && | ||
1735 | (devip->target == sdev->id) && | ||
1736 | (devip->lun == sdev->lun)) | ||
1737 | return devip; | ||
1738 | else { | ||
1739 | if ((!devip->used) && (!open_devip)) | ||
1740 | open_devip = devip; | ||
1741 | } | ||
1742 | } | ||
1743 | if (!open_devip) { /* try and make a new one */ | ||
1744 | open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); | ||
1745 | if (!open_devip) { | ||
1746 | printk(KERN_ERR "%s: out of memory at line %d\n", | ||
1747 | __FUNCTION__, __LINE__); | ||
1748 | return NULL; | ||
1749 | } | ||
1750 | } | ||
1751 | |||
1752 | open_devip->channel = sdev->channel; | ||
1753 | open_devip->target = sdev->id; | ||
1754 | open_devip->lun = sdev->lun; | ||
1755 | open_devip->sdbg_host = sdbg_host; | ||
1756 | open_devip->reset = 1; | ||
1757 | open_devip->used = 1; | ||
1758 | memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN); | ||
1759 | if (scsi_debug_dsense) | ||
1760 | open_devip->sense_buff[0] = 0x72; | ||
1761 | else { | ||
1762 | open_devip->sense_buff[0] = 0x70; | ||
1763 | open_devip->sense_buff[7] = 0xa; | ||
1764 | } | ||
1765 | if (sdev->lun == SAM2_WLUN_REPORT_LUNS) | ||
1766 | open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff; | ||
1767 | |||
1768 | return open_devip; | ||
1769 | } | ||
1770 | |||
1771 | static int scsi_debug_slave_alloc(struct scsi_device *sdp) | ||
2049 | { | 1772 | { |
2050 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 1773 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
2051 | printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", | 1774 | printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", |
@@ -2054,9 +1777,9 @@ static int scsi_debug_slave_alloc(struct scsi_device * sdp) | |||
2054 | return 0; | 1777 | return 0; |
2055 | } | 1778 | } |
2056 | 1779 | ||
2057 | static int scsi_debug_slave_configure(struct scsi_device * sdp) | 1780 | static int scsi_debug_slave_configure(struct scsi_device *sdp) |
2058 | { | 1781 | { |
2059 | struct sdebug_dev_info * devip; | 1782 | struct sdebug_dev_info *devip; |
2060 | 1783 | ||
2061 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 1784 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
2062 | printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n", | 1785 | printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n", |
@@ -2074,10 +1797,10 @@ static int scsi_debug_slave_configure(struct scsi_device * sdp) | |||
2074 | return 0; | 1797 | return 0; |
2075 | } | 1798 | } |
2076 | 1799 | ||
2077 | static void scsi_debug_slave_destroy(struct scsi_device * sdp) | 1800 | static void scsi_debug_slave_destroy(struct scsi_device *sdp) |
2078 | { | 1801 | { |
2079 | struct sdebug_dev_info * devip = | 1802 | struct sdebug_dev_info *devip = |
2080 | (struct sdebug_dev_info *)sdp->hostdata; | 1803 | (struct sdebug_dev_info *)sdp->hostdata; |
2081 | 1804 | ||
2082 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 1805 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
2083 | printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n", | 1806 | printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n", |
@@ -2089,84 +1812,44 @@ static void scsi_debug_slave_destroy(struct scsi_device * sdp) | |||
2089 | } | 1812 | } |
2090 | } | 1813 | } |
2091 | 1814 | ||
2092 | static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) | 1815 | /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */ |
1816 | static int stop_queued_cmnd(struct scsi_cmnd *cmnd) | ||
2093 | { | 1817 | { |
2094 | struct sdebug_host_info * sdbg_host; | 1818 | unsigned long iflags; |
2095 | struct sdebug_dev_info * open_devip = NULL; | 1819 | int k; |
2096 | struct sdebug_dev_info * devip = | 1820 | struct sdebug_queued_cmd *sqcp; |
2097 | (struct sdebug_dev_info *)sdev->hostdata; | ||
2098 | 1821 | ||
2099 | if (devip) | 1822 | spin_lock_irqsave(&queued_arr_lock, iflags); |
2100 | return devip; | 1823 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { |
2101 | sdbg_host = *(struct sdebug_host_info **) sdev->host->hostdata; | 1824 | sqcp = &queued_arr[k]; |
2102 | if(! sdbg_host) { | 1825 | if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) { |
2103 | printk(KERN_ERR "Host info NULL\n"); | 1826 | del_timer_sync(&sqcp->cmnd_timer); |
2104 | return NULL; | 1827 | sqcp->in_use = 0; |
2105 | } | 1828 | sqcp->a_cmnd = NULL; |
2106 | list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { | 1829 | break; |
2107 | if ((devip->used) && (devip->channel == sdev->channel) && | ||
2108 | (devip->target == sdev->id) && | ||
2109 | (devip->lun == sdev->lun)) | ||
2110 | return devip; | ||
2111 | else { | ||
2112 | if ((!devip->used) && (!open_devip)) | ||
2113 | open_devip = devip; | ||
2114 | } | 1830 | } |
2115 | } | 1831 | } |
2116 | if (NULL == open_devip) { /* try and make a new one */ | 1832 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
2117 | open_devip = kzalloc(sizeof(*open_devip),GFP_ATOMIC); | 1833 | return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0; |
2118 | if (NULL == open_devip) { | ||
2119 | printk(KERN_ERR "%s: out of memory at line %d\n", | ||
2120 | __FUNCTION__, __LINE__); | ||
2121 | return NULL; | ||
2122 | } | ||
2123 | open_devip->sdbg_host = sdbg_host; | ||
2124 | list_add_tail(&open_devip->dev_list, | ||
2125 | &sdbg_host->dev_info_list); | ||
2126 | } | ||
2127 | if (open_devip) { | ||
2128 | open_devip->channel = sdev->channel; | ||
2129 | open_devip->target = sdev->id; | ||
2130 | open_devip->lun = sdev->lun; | ||
2131 | open_devip->sdbg_host = sdbg_host; | ||
2132 | open_devip->reset = 1; | ||
2133 | open_devip->used = 1; | ||
2134 | memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN); | ||
2135 | if (scsi_debug_dsense) | ||
2136 | open_devip->sense_buff[0] = 0x72; | ||
2137 | else { | ||
2138 | open_devip->sense_buff[0] = 0x70; | ||
2139 | open_devip->sense_buff[7] = 0xa; | ||
2140 | } | ||
2141 | if (sdev->lun == SAM2_WLUN_REPORT_LUNS) | ||
2142 | open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff; | ||
2143 | return open_devip; | ||
2144 | } | ||
2145 | return NULL; | ||
2146 | } | 1834 | } |
2147 | 1835 | ||
2148 | static void mk_sense_buffer(struct sdebug_dev_info * devip, int key, | 1836 | /* Deletes (stops) timers of all queued commands */ |
2149 | int asc, int asq) | 1837 | static void stop_all_queued(void) |
2150 | { | 1838 | { |
2151 | unsigned char * sbuff; | 1839 | unsigned long iflags; |
1840 | int k; | ||
1841 | struct sdebug_queued_cmd *sqcp; | ||
2152 | 1842 | ||
2153 | sbuff = devip->sense_buff; | 1843 | spin_lock_irqsave(&queued_arr_lock, iflags); |
2154 | memset(sbuff, 0, SDEBUG_SENSE_LEN); | 1844 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { |
2155 | if (scsi_debug_dsense) { | 1845 | sqcp = &queued_arr[k]; |
2156 | sbuff[0] = 0x72; /* descriptor, current */ | 1846 | if (sqcp->in_use && sqcp->a_cmnd) { |
2157 | sbuff[1] = key; | 1847 | del_timer_sync(&sqcp->cmnd_timer); |
2158 | sbuff[2] = asc; | 1848 | sqcp->in_use = 0; |
2159 | sbuff[3] = asq; | 1849 | sqcp->a_cmnd = NULL; |
2160 | } else { | 1850 | } |
2161 | sbuff[0] = 0x70; /* fixed, current */ | ||
2162 | sbuff[2] = key; | ||
2163 | sbuff[7] = 0xa; /* implies 18 byte sense buffer */ | ||
2164 | sbuff[12] = asc; | ||
2165 | sbuff[13] = asq; | ||
2166 | } | 1851 | } |
2167 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 1852 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
2168 | printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: " | ||
2169 | "[0x%x,0x%x,0x%x]\n", key, asc, asq); | ||
2170 | } | 1853 | } |
2171 | 1854 | ||
2172 | static int scsi_debug_abort(struct scsi_cmnd * SCpnt) | 1855 | static int scsi_debug_abort(struct scsi_cmnd * SCpnt) |
@@ -2226,7 +1909,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt) | |||
2226 | printk(KERN_INFO "scsi_debug: bus_reset\n"); | 1909 | printk(KERN_INFO "scsi_debug: bus_reset\n"); |
2227 | ++num_bus_resets; | 1910 | ++num_bus_resets; |
2228 | if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) { | 1911 | if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) { |
2229 | sdbg_host = *(struct sdebug_host_info **) hp->hostdata; | 1912 | sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); |
2230 | if (sdbg_host) { | 1913 | if (sdbg_host) { |
2231 | list_for_each_entry(dev_info, | 1914 | list_for_each_entry(dev_info, |
2232 | &sdbg_host->dev_info_list, | 1915 | &sdbg_host->dev_info_list, |
@@ -2256,46 +1939,6 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt) | |||
2256 | return SUCCESS; | 1939 | return SUCCESS; |
2257 | } | 1940 | } |
2258 | 1941 | ||
2259 | /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */ | ||
2260 | static int stop_queued_cmnd(struct scsi_cmnd * cmnd) | ||
2261 | { | ||
2262 | unsigned long iflags; | ||
2263 | int k; | ||
2264 | struct sdebug_queued_cmd * sqcp; | ||
2265 | |||
2266 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
2267 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { | ||
2268 | sqcp = &queued_arr[k]; | ||
2269 | if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) { | ||
2270 | del_timer_sync(&sqcp->cmnd_timer); | ||
2271 | sqcp->in_use = 0; | ||
2272 | sqcp->a_cmnd = NULL; | ||
2273 | break; | ||
2274 | } | ||
2275 | } | ||
2276 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
2277 | return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0; | ||
2278 | } | ||
2279 | |||
2280 | /* Deletes (stops) timers of all queued commands */ | ||
2281 | static void stop_all_queued(void) | ||
2282 | { | ||
2283 | unsigned long iflags; | ||
2284 | int k; | ||
2285 | struct sdebug_queued_cmd * sqcp; | ||
2286 | |||
2287 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
2288 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { | ||
2289 | sqcp = &queued_arr[k]; | ||
2290 | if (sqcp->in_use && sqcp->a_cmnd) { | ||
2291 | del_timer_sync(&sqcp->cmnd_timer); | ||
2292 | sqcp->in_use = 0; | ||
2293 | sqcp->a_cmnd = NULL; | ||
2294 | } | ||
2295 | } | ||
2296 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
2297 | } | ||
2298 | |||
2299 | /* Initializes timers in queued array */ | 1942 | /* Initializes timers in queued array */ |
2300 | static void __init init_all_queued(void) | 1943 | static void __init init_all_queued(void) |
2301 | { | 1944 | { |
@@ -2313,7 +1956,8 @@ static void __init init_all_queued(void) | |||
2313 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 1956 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
2314 | } | 1957 | } |
2315 | 1958 | ||
2316 | static void __init sdebug_build_parts(unsigned char * ramp) | 1959 | static void __init sdebug_build_parts(unsigned char *ramp, |
1960 | unsigned long store_size) | ||
2317 | { | 1961 | { |
2318 | struct partition * pp; | 1962 | struct partition * pp; |
2319 | int starts[SDEBUG_MAX_PARTS + 2]; | 1963 | int starts[SDEBUG_MAX_PARTS + 2]; |
@@ -2321,7 +1965,7 @@ static void __init sdebug_build_parts(unsigned char * ramp) | |||
2321 | int heads_by_sects, start_sec, end_sec; | 1965 | int heads_by_sects, start_sec, end_sec; |
2322 | 1966 | ||
2323 | /* assume partition table already zeroed */ | 1967 | /* assume partition table already zeroed */ |
2324 | if ((scsi_debug_num_parts < 1) || (sdebug_store_size < 1048576)) | 1968 | if ((scsi_debug_num_parts < 1) || (store_size < 1048576)) |
2325 | return; | 1969 | return; |
2326 | if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { | 1970 | if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { |
2327 | scsi_debug_num_parts = SDEBUG_MAX_PARTS; | 1971 | scsi_debug_num_parts = SDEBUG_MAX_PARTS; |
@@ -2419,7 +2063,6 @@ static int schedule_resp(struct scsi_cmnd * cmnd, | |||
2419 | return 0; | 2063 | return 0; |
2420 | } | 2064 | } |
2421 | } | 2065 | } |
2422 | |||
2423 | /* Note: The following macros create attribute files in the | 2066 | /* Note: The following macros create attribute files in the |
2424 | /sys/module/scsi_debug/parameters directory. Unfortunately this | 2067 | /sys/module/scsi_debug/parameters directory. Unfortunately this |
2425 | driver is unaware of a change and cannot trigger auxiliary actions | 2068 | driver is unaware of a change and cannot trigger auxiliary actions |
@@ -2736,11 +2379,9 @@ static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp, | |||
2736 | 2379 | ||
2737 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 2380 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
2738 | scsi_debug_virtual_gb = n; | 2381 | scsi_debug_virtual_gb = n; |
2739 | if (scsi_debug_virtual_gb > 0) { | 2382 | |
2740 | sdebug_capacity = 2048 * 1024; | 2383 | sdebug_capacity = get_sdebug_capacity(); |
2741 | sdebug_capacity *= scsi_debug_virtual_gb; | 2384 | |
2742 | } else | ||
2743 | sdebug_capacity = sdebug_store_sectors; | ||
2744 | return count; | 2385 | return count; |
2745 | } | 2386 | } |
2746 | return -EINVAL; | 2387 | return -EINVAL; |
@@ -2756,21 +2397,10 @@ static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf) | |||
2756 | static ssize_t sdebug_add_host_store(struct device_driver * ddp, | 2397 | static ssize_t sdebug_add_host_store(struct device_driver * ddp, |
2757 | const char * buf, size_t count) | 2398 | const char * buf, size_t count) |
2758 | { | 2399 | { |
2759 | int delta_hosts; | 2400 | int delta_hosts; |
2760 | char work[20]; | ||
2761 | 2401 | ||
2762 | if (1 != sscanf(buf, "%10s", work)) | 2402 | if (sscanf(buf, "%d", &delta_hosts) != 1) |
2763 | return -EINVAL; | 2403 | return -EINVAL; |
2764 | { /* temporary hack around sscanf() problem with -ve nums */ | ||
2765 | int neg = 0; | ||
2766 | |||
2767 | if ('-' == *work) | ||
2768 | neg = 1; | ||
2769 | if (1 != sscanf(work + neg, "%d", &delta_hosts)) | ||
2770 | return -EINVAL; | ||
2771 | if (neg) | ||
2772 | delta_hosts = -delta_hosts; | ||
2773 | } | ||
2774 | if (delta_hosts > 0) { | 2404 | if (delta_hosts > 0) { |
2775 | do { | 2405 | do { |
2776 | sdebug_add_adapter(); | 2406 | sdebug_add_adapter(); |
@@ -2782,7 +2412,7 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp, | |||
2782 | } | 2412 | } |
2783 | return count; | 2413 | return count; |
2784 | } | 2414 | } |
2785 | DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, | 2415 | DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, |
2786 | sdebug_add_host_store); | 2416 | sdebug_add_host_store); |
2787 | 2417 | ||
2788 | static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp, | 2418 | static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp, |
@@ -2851,22 +2481,29 @@ static void do_remove_driverfs_files(void) | |||
2851 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host); | 2481 | driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host); |
2852 | } | 2482 | } |
2853 | 2483 | ||
2484 | static void pseudo_0_release(struct device *dev) | ||
2485 | { | ||
2486 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
2487 | printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n"); | ||
2488 | } | ||
2489 | |||
2490 | static struct device pseudo_primary = { | ||
2491 | .bus_id = "pseudo_0", | ||
2492 | .release = pseudo_0_release, | ||
2493 | }; | ||
2494 | |||
2854 | static int __init scsi_debug_init(void) | 2495 | static int __init scsi_debug_init(void) |
2855 | { | 2496 | { |
2856 | unsigned int sz; | 2497 | unsigned long sz; |
2857 | int host_to_add; | 2498 | int host_to_add; |
2858 | int k; | 2499 | int k; |
2859 | int ret; | 2500 | int ret; |
2860 | 2501 | ||
2861 | if (scsi_debug_dev_size_mb < 1) | 2502 | if (scsi_debug_dev_size_mb < 1) |
2862 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ | 2503 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ |
2863 | sdebug_store_size = (unsigned int)scsi_debug_dev_size_mb * 1048576; | 2504 | sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; |
2864 | sdebug_store_sectors = sdebug_store_size / SECT_SIZE; | 2505 | sdebug_store_sectors = sz / SECT_SIZE; |
2865 | if (scsi_debug_virtual_gb > 0) { | 2506 | sdebug_capacity = get_sdebug_capacity(); |
2866 | sdebug_capacity = 2048 * 1024; | ||
2867 | sdebug_capacity *= scsi_debug_virtual_gb; | ||
2868 | } else | ||
2869 | sdebug_capacity = sdebug_store_sectors; | ||
2870 | 2507 | ||
2871 | /* play around with geometry, don't waste too much on track 0 */ | 2508 | /* play around with geometry, don't waste too much on track 0 */ |
2872 | sdebug_heads = 8; | 2509 | sdebug_heads = 8; |
@@ -2885,7 +2522,6 @@ static int __init scsi_debug_init(void) | |||
2885 | (sdebug_sectors_per * sdebug_heads); | 2522 | (sdebug_sectors_per * sdebug_heads); |
2886 | } | 2523 | } |
2887 | 2524 | ||
2888 | sz = sdebug_store_size; | ||
2889 | fake_storep = vmalloc(sz); | 2525 | fake_storep = vmalloc(sz); |
2890 | if (NULL == fake_storep) { | 2526 | if (NULL == fake_storep) { |
2891 | printk(KERN_ERR "scsi_debug_init: out of memory, 1\n"); | 2527 | printk(KERN_ERR "scsi_debug_init: out of memory, 1\n"); |
@@ -2893,7 +2529,7 @@ static int __init scsi_debug_init(void) | |||
2893 | } | 2529 | } |
2894 | memset(fake_storep, 0, sz); | 2530 | memset(fake_storep, 0, sz); |
2895 | if (scsi_debug_num_parts > 0) | 2531 | if (scsi_debug_num_parts > 0) |
2896 | sdebug_build_parts(fake_storep); | 2532 | sdebug_build_parts(fake_storep, sz); |
2897 | 2533 | ||
2898 | ret = device_register(&pseudo_primary); | 2534 | ret = device_register(&pseudo_primary); |
2899 | if (ret < 0) { | 2535 | if (ret < 0) { |
@@ -2922,8 +2558,6 @@ static int __init scsi_debug_init(void) | |||
2922 | 2558 | ||
2923 | init_all_queued(); | 2559 | init_all_queued(); |
2924 | 2560 | ||
2925 | sdebug_driver_template.proc_name = sdebug_proc_name; | ||
2926 | |||
2927 | host_to_add = scsi_debug_add_host; | 2561 | host_to_add = scsi_debug_add_host; |
2928 | scsi_debug_add_host = 0; | 2562 | scsi_debug_add_host = 0; |
2929 | 2563 | ||
@@ -2972,30 +2606,6 @@ static void __exit scsi_debug_exit(void) | |||
2972 | device_initcall(scsi_debug_init); | 2606 | device_initcall(scsi_debug_init); |
2973 | module_exit(scsi_debug_exit); | 2607 | module_exit(scsi_debug_exit); |
2974 | 2608 | ||
2975 | static void pseudo_0_release(struct device * dev) | ||
2976 | { | ||
2977 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
2978 | printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n"); | ||
2979 | } | ||
2980 | |||
2981 | static struct device pseudo_primary = { | ||
2982 | .bus_id = "pseudo_0", | ||
2983 | .release = pseudo_0_release, | ||
2984 | }; | ||
2985 | |||
2986 | static int pseudo_lld_bus_match(struct device *dev, | ||
2987 | struct device_driver *dev_driver) | ||
2988 | { | ||
2989 | return 1; | ||
2990 | } | ||
2991 | |||
2992 | static struct bus_type pseudo_lld_bus = { | ||
2993 | .name = "pseudo", | ||
2994 | .match = pseudo_lld_bus_match, | ||
2995 | .probe = sdebug_driver_probe, | ||
2996 | .remove = sdebug_driver_remove, | ||
2997 | }; | ||
2998 | |||
2999 | static void sdebug_release_adapter(struct device * dev) | 2609 | static void sdebug_release_adapter(struct device * dev) |
3000 | { | 2610 | { |
3001 | struct sdebug_host_info *sdbg_host; | 2611 | struct sdebug_host_info *sdbg_host; |
@@ -3009,8 +2619,7 @@ static int sdebug_add_adapter(void) | |||
3009 | int k, devs_per_host; | 2619 | int k, devs_per_host; |
3010 | int error = 0; | 2620 | int error = 0; |
3011 | struct sdebug_host_info *sdbg_host; | 2621 | struct sdebug_host_info *sdbg_host; |
3012 | struct sdebug_dev_info *sdbg_devinfo; | 2622 | struct sdebug_dev_info *sdbg_devinfo, *tmp; |
3013 | struct list_head *lh, *lh_sf; | ||
3014 | 2623 | ||
3015 | sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); | 2624 | sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); |
3016 | if (NULL == sdbg_host) { | 2625 | if (NULL == sdbg_host) { |
@@ -3023,16 +2632,13 @@ static int sdebug_add_adapter(void) | |||
3023 | 2632 | ||
3024 | devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns; | 2633 | devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns; |
3025 | for (k = 0; k < devs_per_host; k++) { | 2634 | for (k = 0; k < devs_per_host; k++) { |
3026 | sdbg_devinfo = kzalloc(sizeof(*sdbg_devinfo),GFP_KERNEL); | 2635 | sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); |
3027 | if (NULL == sdbg_devinfo) { | 2636 | if (!sdbg_devinfo) { |
3028 | printk(KERN_ERR "%s: out of memory at line %d\n", | 2637 | printk(KERN_ERR "%s: out of memory at line %d\n", |
3029 | __FUNCTION__, __LINE__); | 2638 | __FUNCTION__, __LINE__); |
3030 | error = -ENOMEM; | 2639 | error = -ENOMEM; |
3031 | goto clean; | 2640 | goto clean; |
3032 | } | 2641 | } |
3033 | sdbg_devinfo->sdbg_host = sdbg_host; | ||
3034 | list_add_tail(&sdbg_devinfo->dev_list, | ||
3035 | &sdbg_host->dev_info_list); | ||
3036 | } | 2642 | } |
3037 | 2643 | ||
3038 | spin_lock(&sdebug_host_list_lock); | 2644 | spin_lock(&sdebug_host_list_lock); |
@@ -3053,9 +2659,8 @@ static int sdebug_add_adapter(void) | |||
3053 | return error; | 2659 | return error; |
3054 | 2660 | ||
3055 | clean: | 2661 | clean: |
3056 | list_for_each_safe(lh, lh_sf, &sdbg_host->dev_info_list) { | 2662 | list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, |
3057 | sdbg_devinfo = list_entry(lh, struct sdebug_dev_info, | 2663 | dev_list) { |
3058 | dev_list); | ||
3059 | list_del(&sdbg_devinfo->dev_list); | 2664 | list_del(&sdbg_devinfo->dev_list); |
3060 | kfree(sdbg_devinfo); | 2665 | kfree(sdbg_devinfo); |
3061 | } | 2666 | } |
@@ -3083,6 +2688,263 @@ static void sdebug_remove_adapter(void) | |||
3083 | --scsi_debug_add_host; | 2688 | --scsi_debug_add_host; |
3084 | } | 2689 | } |
3085 | 2690 | ||
2691 | static | ||
2692 | int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) | ||
2693 | { | ||
2694 | unsigned char *cmd = (unsigned char *) SCpnt->cmnd; | ||
2695 | int len, k; | ||
2696 | unsigned int num; | ||
2697 | unsigned long long lba; | ||
2698 | int errsts = 0; | ||
2699 | int target = SCpnt->device->id; | ||
2700 | struct sdebug_dev_info *devip = NULL; | ||
2701 | int inj_recovered = 0; | ||
2702 | int inj_transport = 0; | ||
2703 | int delay_override = 0; | ||
2704 | |||
2705 | scsi_set_resid(SCpnt, 0); | ||
2706 | if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { | ||
2707 | printk(KERN_INFO "scsi_debug: cmd "); | ||
2708 | for (k = 0, len = SCpnt->cmd_len; k < len; ++k) | ||
2709 | printk("%02x ", (int)cmd[k]); | ||
2710 | printk("\n"); | ||
2711 | } | ||
2712 | |||
2713 | if (target == SCpnt->device->host->hostt->this_id) { | ||
2714 | printk(KERN_INFO "scsi_debug: initiator's id used as " | ||
2715 | "target!\n"); | ||
2716 | return schedule_resp(SCpnt, NULL, done, | ||
2717 | DID_NO_CONNECT << 16, 0); | ||
2718 | } | ||
2719 | |||
2720 | if ((SCpnt->device->lun >= scsi_debug_max_luns) && | ||
2721 | (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS)) | ||
2722 | return schedule_resp(SCpnt, NULL, done, | ||
2723 | DID_NO_CONNECT << 16, 0); | ||
2724 | devip = devInfoReg(SCpnt->device); | ||
2725 | if (NULL == devip) | ||
2726 | return schedule_resp(SCpnt, NULL, done, | ||
2727 | DID_NO_CONNECT << 16, 0); | ||
2728 | |||
2729 | if ((scsi_debug_every_nth != 0) && | ||
2730 | (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) { | ||
2731 | scsi_debug_cmnd_count = 0; | ||
2732 | if (scsi_debug_every_nth < -1) | ||
2733 | scsi_debug_every_nth = -1; | ||
2734 | if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) | ||
2735 | return 0; /* ignore command causing timeout */ | ||
2736 | else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts) | ||
2737 | inj_recovered = 1; /* to reads and writes below */ | ||
2738 | else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts) | ||
2739 | inj_transport = 1; /* to reads and writes below */ | ||
2740 | } | ||
2741 | |||
2742 | if (devip->wlun) { | ||
2743 | switch (*cmd) { | ||
2744 | case INQUIRY: | ||
2745 | case REQUEST_SENSE: | ||
2746 | case TEST_UNIT_READY: | ||
2747 | case REPORT_LUNS: | ||
2748 | break; /* only allowable wlun commands */ | ||
2749 | default: | ||
2750 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
2751 | printk(KERN_INFO "scsi_debug: Opcode: 0x%x " | ||
2752 | "not supported for wlun\n", *cmd); | ||
2753 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
2754 | INVALID_OPCODE, 0); | ||
2755 | errsts = check_condition_result; | ||
2756 | return schedule_resp(SCpnt, devip, done, errsts, | ||
2757 | 0); | ||
2758 | } | ||
2759 | } | ||
2760 | |||
2761 | switch (*cmd) { | ||
2762 | case INQUIRY: /* mandatory, ignore unit attention */ | ||
2763 | delay_override = 1; | ||
2764 | errsts = resp_inquiry(SCpnt, target, devip); | ||
2765 | break; | ||
2766 | case REQUEST_SENSE: /* mandatory, ignore unit attention */ | ||
2767 | delay_override = 1; | ||
2768 | errsts = resp_requests(SCpnt, devip); | ||
2769 | break; | ||
2770 | case REZERO_UNIT: /* actually this is REWIND for SSC */ | ||
2771 | case START_STOP: | ||
2772 | errsts = resp_start_stop(SCpnt, devip); | ||
2773 | break; | ||
2774 | case ALLOW_MEDIUM_REMOVAL: | ||
2775 | errsts = check_readiness(SCpnt, 1, devip); | ||
2776 | if (errsts) | ||
2777 | break; | ||
2778 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
2779 | printk(KERN_INFO "scsi_debug: Medium removal %s\n", | ||
2780 | cmd[4] ? "inhibited" : "enabled"); | ||
2781 | break; | ||
2782 | case SEND_DIAGNOSTIC: /* mandatory */ | ||
2783 | errsts = check_readiness(SCpnt, 1, devip); | ||
2784 | break; | ||
2785 | case TEST_UNIT_READY: /* mandatory */ | ||
2786 | delay_override = 1; | ||
2787 | errsts = check_readiness(SCpnt, 0, devip); | ||
2788 | break; | ||
2789 | case RESERVE: | ||
2790 | errsts = check_readiness(SCpnt, 1, devip); | ||
2791 | break; | ||
2792 | case RESERVE_10: | ||
2793 | errsts = check_readiness(SCpnt, 1, devip); | ||
2794 | break; | ||
2795 | case RELEASE: | ||
2796 | errsts = check_readiness(SCpnt, 1, devip); | ||
2797 | break; | ||
2798 | case RELEASE_10: | ||
2799 | errsts = check_readiness(SCpnt, 1, devip); | ||
2800 | break; | ||
2801 | case READ_CAPACITY: | ||
2802 | errsts = resp_readcap(SCpnt, devip); | ||
2803 | break; | ||
2804 | case SERVICE_ACTION_IN: | ||
2805 | if (SAI_READ_CAPACITY_16 != cmd[1]) { | ||
2806 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
2807 | INVALID_OPCODE, 0); | ||
2808 | errsts = check_condition_result; | ||
2809 | break; | ||
2810 | } | ||
2811 | errsts = resp_readcap16(SCpnt, devip); | ||
2812 | break; | ||
2813 | case MAINTENANCE_IN: | ||
2814 | if (MI_REPORT_TARGET_PGS != cmd[1]) { | ||
2815 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
2816 | INVALID_OPCODE, 0); | ||
2817 | errsts = check_condition_result; | ||
2818 | break; | ||
2819 | } | ||
2820 | errsts = resp_report_tgtpgs(SCpnt, devip); | ||
2821 | break; | ||
2822 | case READ_16: | ||
2823 | case READ_12: | ||
2824 | case READ_10: | ||
2825 | case READ_6: | ||
2826 | errsts = check_readiness(SCpnt, 0, devip); | ||
2827 | if (errsts) | ||
2828 | break; | ||
2829 | if (scsi_debug_fake_rw) | ||
2830 | break; | ||
2831 | get_data_transfer_info(cmd, &lba, &num); | ||
2832 | errsts = resp_read(SCpnt, lba, num, devip); | ||
2833 | if (inj_recovered && (0 == errsts)) { | ||
2834 | mk_sense_buffer(devip, RECOVERED_ERROR, | ||
2835 | THRESHOLD_EXCEEDED, 0); | ||
2836 | errsts = check_condition_result; | ||
2837 | } else if (inj_transport && (0 == errsts)) { | ||
2838 | mk_sense_buffer(devip, ABORTED_COMMAND, | ||
2839 | TRANSPORT_PROBLEM, ACK_NAK_TO); | ||
2840 | errsts = check_condition_result; | ||
2841 | } | ||
2842 | break; | ||
2843 | case REPORT_LUNS: /* mandatory, ignore unit attention */ | ||
2844 | delay_override = 1; | ||
2845 | errsts = resp_report_luns(SCpnt, devip); | ||
2846 | break; | ||
2847 | case VERIFY: /* 10 byte SBC-2 command */ | ||
2848 | errsts = check_readiness(SCpnt, 0, devip); | ||
2849 | break; | ||
2850 | case WRITE_16: | ||
2851 | case WRITE_12: | ||
2852 | case WRITE_10: | ||
2853 | case WRITE_6: | ||
2854 | errsts = check_readiness(SCpnt, 0, devip); | ||
2855 | if (errsts) | ||
2856 | break; | ||
2857 | if (scsi_debug_fake_rw) | ||
2858 | break; | ||
2859 | get_data_transfer_info(cmd, &lba, &num); | ||
2860 | errsts = resp_write(SCpnt, lba, num, devip); | ||
2861 | if (inj_recovered && (0 == errsts)) { | ||
2862 | mk_sense_buffer(devip, RECOVERED_ERROR, | ||
2863 | THRESHOLD_EXCEEDED, 0); | ||
2864 | errsts = check_condition_result; | ||
2865 | } | ||
2866 | break; | ||
2867 | case MODE_SENSE: | ||
2868 | case MODE_SENSE_10: | ||
2869 | errsts = resp_mode_sense(SCpnt, target, devip); | ||
2870 | break; | ||
2871 | case MODE_SELECT: | ||
2872 | errsts = resp_mode_select(SCpnt, 1, devip); | ||
2873 | break; | ||
2874 | case MODE_SELECT_10: | ||
2875 | errsts = resp_mode_select(SCpnt, 0, devip); | ||
2876 | break; | ||
2877 | case LOG_SENSE: | ||
2878 | errsts = resp_log_sense(SCpnt, devip); | ||
2879 | break; | ||
2880 | case SYNCHRONIZE_CACHE: | ||
2881 | delay_override = 1; | ||
2882 | errsts = check_readiness(SCpnt, 0, devip); | ||
2883 | break; | ||
2884 | case WRITE_BUFFER: | ||
2885 | errsts = check_readiness(SCpnt, 1, devip); | ||
2886 | break; | ||
2887 | case XDWRITEREAD_10: | ||
2888 | if (!scsi_bidi_cmnd(SCpnt)) { | ||
2889 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | ||
2890 | INVALID_FIELD_IN_CDB, 0); | ||
2891 | errsts = check_condition_result; | ||
2892 | break; | ||
2893 | } | ||
2894 | |||
2895 | errsts = check_readiness(SCpnt, 0, devip); | ||
2896 | if (errsts) | ||
2897 | break; | ||
2898 | if (scsi_debug_fake_rw) | ||
2899 | break; | ||
2900 | get_data_transfer_info(cmd, &lba, &num); | ||
2901 | errsts = resp_read(SCpnt, lba, num, devip); | ||
2902 | if (errsts) | ||
2903 | break; | ||
2904 | errsts = resp_write(SCpnt, lba, num, devip); | ||
2905 | if (errsts) | ||
2906 | break; | ||
2907 | errsts = resp_xdwriteread(SCpnt, lba, num, devip); | ||
2908 | break; | ||
2909 | default: | ||
2910 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
2911 | printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " | ||
2912 | "supported\n", *cmd); | ||
2913 | errsts = check_readiness(SCpnt, 1, devip); | ||
2914 | if (errsts) | ||
2915 | break; /* Unit attention takes precedence */ | ||
2916 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); | ||
2917 | errsts = check_condition_result; | ||
2918 | break; | ||
2919 | } | ||
2920 | return schedule_resp(SCpnt, devip, done, errsts, | ||
2921 | (delay_override ? 0 : scsi_debug_delay)); | ||
2922 | } | ||
2923 | |||
2924 | static struct scsi_host_template sdebug_driver_template = { | ||
2925 | .proc_info = scsi_debug_proc_info, | ||
2926 | .proc_name = sdebug_proc_name, | ||
2927 | .name = "SCSI DEBUG", | ||
2928 | .info = scsi_debug_info, | ||
2929 | .slave_alloc = scsi_debug_slave_alloc, | ||
2930 | .slave_configure = scsi_debug_slave_configure, | ||
2931 | .slave_destroy = scsi_debug_slave_destroy, | ||
2932 | .ioctl = scsi_debug_ioctl, | ||
2933 | .queuecommand = scsi_debug_queuecommand, | ||
2934 | .eh_abort_handler = scsi_debug_abort, | ||
2935 | .eh_bus_reset_handler = scsi_debug_bus_reset, | ||
2936 | .eh_device_reset_handler = scsi_debug_device_reset, | ||
2937 | .eh_host_reset_handler = scsi_debug_host_reset, | ||
2938 | .bios_param = scsi_debug_biosparam, | ||
2939 | .can_queue = SCSI_DEBUG_CANQUEUE, | ||
2940 | .this_id = 7, | ||
2941 | .sg_tablesize = 256, | ||
2942 | .cmd_per_lun = 16, | ||
2943 | .max_sectors = 0xffff, | ||
2944 | .use_clustering = DISABLE_CLUSTERING, | ||
2945 | .module = THIS_MODULE, | ||
2946 | }; | ||
2947 | |||
3086 | static int sdebug_driver_probe(struct device * dev) | 2948 | static int sdebug_driver_probe(struct device * dev) |
3087 | { | 2949 | { |
3088 | int error = 0; | 2950 | int error = 0; |
@@ -3120,9 +2982,8 @@ static int sdebug_driver_probe(struct device * dev) | |||
3120 | 2982 | ||
3121 | static int sdebug_driver_remove(struct device * dev) | 2983 | static int sdebug_driver_remove(struct device * dev) |
3122 | { | 2984 | { |
3123 | struct list_head *lh, *lh_sf; | ||
3124 | struct sdebug_host_info *sdbg_host; | 2985 | struct sdebug_host_info *sdbg_host; |
3125 | struct sdebug_dev_info *sdbg_devinfo; | 2986 | struct sdebug_dev_info *sdbg_devinfo, *tmp; |
3126 | 2987 | ||
3127 | sdbg_host = to_sdebug_host(dev); | 2988 | sdbg_host = to_sdebug_host(dev); |
3128 | 2989 | ||
@@ -3134,9 +2995,8 @@ static int sdebug_driver_remove(struct device * dev) | |||
3134 | 2995 | ||
3135 | scsi_remove_host(sdbg_host->shost); | 2996 | scsi_remove_host(sdbg_host->shost); |
3136 | 2997 | ||
3137 | list_for_each_safe(lh, lh_sf, &sdbg_host->dev_info_list) { | 2998 | list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, |
3138 | sdbg_devinfo = list_entry(lh, struct sdebug_dev_info, | 2999 | dev_list) { |
3139 | dev_list); | ||
3140 | list_del(&sdbg_devinfo->dev_list); | 3000 | list_del(&sdbg_devinfo->dev_list); |
3141 | kfree(sdbg_devinfo); | 3001 | kfree(sdbg_devinfo); |
3142 | } | 3002 | } |
@@ -3145,20 +3005,15 @@ static int sdebug_driver_remove(struct device * dev) | |||
3145 | return 0; | 3005 | return 0; |
3146 | } | 3006 | } |
3147 | 3007 | ||
3148 | static void sdebug_max_tgts_luns(void) | 3008 | static int pseudo_lld_bus_match(struct device *dev, |
3009 | struct device_driver *dev_driver) | ||
3149 | { | 3010 | { |
3150 | struct sdebug_host_info * sdbg_host; | 3011 | return 1; |
3151 | struct Scsi_Host *hpnt; | ||
3152 | |||
3153 | spin_lock(&sdebug_host_list_lock); | ||
3154 | list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { | ||
3155 | hpnt = sdbg_host->shost; | ||
3156 | if ((hpnt->this_id >= 0) && | ||
3157 | (scsi_debug_num_tgts > hpnt->this_id)) | ||
3158 | hpnt->max_id = scsi_debug_num_tgts + 1; | ||
3159 | else | ||
3160 | hpnt->max_id = scsi_debug_num_tgts; | ||
3161 | hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* scsi_debug_max_luns; */ | ||
3162 | } | ||
3163 | spin_unlock(&sdebug_host_list_lock); | ||
3164 | } | 3012 | } |
3013 | |||
3014 | static struct bus_type pseudo_lld_bus = { | ||
3015 | .name = "pseudo", | ||
3016 | .match = pseudo_lld_bus_match, | ||
3017 | .probe = sdebug_driver_probe, | ||
3018 | .remove = sdebug_driver_remove, | ||
3019 | }; | ||
diff --git a/drivers/scsi/scsi_debug.h b/drivers/scsi/scsi_debug.h deleted file mode 100644 index 965dd5e760c1..000000000000 --- a/drivers/scsi/scsi_debug.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | #ifndef _SCSI_DEBUG_H | ||
2 | |||
3 | #include <linux/types.h> | ||
4 | |||
5 | static int scsi_debug_slave_alloc(struct scsi_device *); | ||
6 | static int scsi_debug_slave_configure(struct scsi_device *); | ||
7 | static void scsi_debug_slave_destroy(struct scsi_device *); | ||
8 | static int scsi_debug_queuecommand(struct scsi_cmnd *, | ||
9 | void (*done) (struct scsi_cmnd *)); | ||
10 | static int scsi_debug_ioctl(struct scsi_device *, int, void __user *); | ||
11 | static int scsi_debug_biosparam(struct scsi_device *, struct block_device *, | ||
12 | sector_t, int[]); | ||
13 | static int scsi_debug_abort(struct scsi_cmnd *); | ||
14 | static int scsi_debug_bus_reset(struct scsi_cmnd *); | ||
15 | static int scsi_debug_device_reset(struct scsi_cmnd *); | ||
16 | static int scsi_debug_host_reset(struct scsi_cmnd *); | ||
17 | static int scsi_debug_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); | ||
18 | static const char * scsi_debug_info(struct Scsi_Host *); | ||
19 | |||
20 | #define SCSI_DEBUG_CANQUEUE 255 /* needs to be >= 1 */ | ||
21 | |||
22 | #define SCSI_DEBUG_MAX_CMD_LEN 16 | ||
23 | |||
24 | #endif | ||
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 045a0868fc7b..221f31e36d26 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -524,6 +524,41 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd) | |||
524 | return rtn; | 524 | return rtn; |
525 | } | 525 | } |
526 | 526 | ||
527 | static void __scsi_report_device_reset(struct scsi_device *sdev, void *data) | ||
528 | { | ||
529 | sdev->was_reset = 1; | ||
530 | sdev->expecting_cc_ua = 1; | ||
531 | } | ||
532 | |||
533 | /** | ||
534 | * scsi_try_target_reset - Ask host to perform a target reset | ||
535 | * @scmd: SCSI cmd used to send a target reset | ||
536 | * | ||
537 | * Notes: | ||
538 | * There is no timeout for this operation. if this operation is | ||
539 | * unreliable for a given host, then the host itself needs to put a | ||
540 | * timer on it, and set the host back to a consistent state prior to | ||
541 | * returning. | ||
542 | */ | ||
543 | static int scsi_try_target_reset(struct scsi_cmnd *scmd) | ||
544 | { | ||
545 | unsigned long flags; | ||
546 | int rtn; | ||
547 | |||
548 | if (!scmd->device->host->hostt->eh_target_reset_handler) | ||
549 | return FAILED; | ||
550 | |||
551 | rtn = scmd->device->host->hostt->eh_target_reset_handler(scmd); | ||
552 | if (rtn == SUCCESS) { | ||
553 | spin_lock_irqsave(scmd->device->host->host_lock, flags); | ||
554 | __starget_for_each_device(scsi_target(scmd->device), NULL, | ||
555 | __scsi_report_device_reset); | ||
556 | spin_unlock_irqrestore(scmd->device->host->host_lock, flags); | ||
557 | } | ||
558 | |||
559 | return rtn; | ||
560 | } | ||
561 | |||
527 | /** | 562 | /** |
528 | * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev | 563 | * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev |
529 | * @scmd: SCSI cmd used to send BDR | 564 | * @scmd: SCSI cmd used to send BDR |
@@ -542,11 +577,8 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) | |||
542 | return FAILED; | 577 | return FAILED; |
543 | 578 | ||
544 | rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd); | 579 | rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd); |
545 | if (rtn == SUCCESS) { | 580 | if (rtn == SUCCESS) |
546 | scmd->device->was_reset = 1; | 581 | __scsi_report_device_reset(scmd->device, NULL); |
547 | scmd->device->expecting_cc_ua = 1; | ||
548 | } | ||
549 | |||
550 | return rtn; | 582 | return rtn; |
551 | } | 583 | } |
552 | 584 | ||
@@ -584,8 +616,9 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd) | |||
584 | { | 616 | { |
585 | if (__scsi_try_to_abort_cmd(scmd) != SUCCESS) | 617 | if (__scsi_try_to_abort_cmd(scmd) != SUCCESS) |
586 | if (scsi_try_bus_device_reset(scmd) != SUCCESS) | 618 | if (scsi_try_bus_device_reset(scmd) != SUCCESS) |
587 | if (scsi_try_bus_reset(scmd) != SUCCESS) | 619 | if (scsi_try_target_reset(scmd) != SUCCESS) |
588 | scsi_try_host_reset(scmd); | 620 | if (scsi_try_bus_reset(scmd) != SUCCESS) |
621 | scsi_try_host_reset(scmd); | ||
589 | } | 622 | } |
590 | 623 | ||
591 | /** | 624 | /** |
@@ -1060,6 +1093,56 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, | |||
1060 | } | 1093 | } |
1061 | 1094 | ||
1062 | /** | 1095 | /** |
1096 | * scsi_eh_target_reset - send target reset if needed | ||
1097 | * @shost: scsi host being recovered. | ||
1098 | * @work_q: &list_head for pending commands. | ||
1099 | * @done_q: &list_head for processed commands. | ||
1100 | * | ||
1101 | * Notes: | ||
1102 | * Try a target reset. | ||
1103 | */ | ||
1104 | static int scsi_eh_target_reset(struct Scsi_Host *shost, | ||
1105 | struct list_head *work_q, | ||
1106 | struct list_head *done_q) | ||
1107 | { | ||
1108 | struct scsi_cmnd *scmd, *tgtr_scmd, *next; | ||
1109 | unsigned int id; | ||
1110 | int rtn; | ||
1111 | |||
1112 | for (id = 0; id <= shost->max_id; id++) { | ||
1113 | tgtr_scmd = NULL; | ||
1114 | list_for_each_entry(scmd, work_q, eh_entry) { | ||
1115 | if (id == scmd_id(scmd)) { | ||
1116 | tgtr_scmd = scmd; | ||
1117 | break; | ||
1118 | } | ||
1119 | } | ||
1120 | if (!tgtr_scmd) | ||
1121 | continue; | ||
1122 | |||
1123 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " | ||
1124 | "to target %d\n", | ||
1125 | current->comm, id)); | ||
1126 | rtn = scsi_try_target_reset(tgtr_scmd); | ||
1127 | if (rtn == SUCCESS) { | ||
1128 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { | ||
1129 | if (id == scmd_id(scmd)) | ||
1130 | if (!scsi_device_online(scmd->device) || | ||
1131 | !scsi_eh_tur(tgtr_scmd)) | ||
1132 | scsi_eh_finish_cmd(scmd, | ||
1133 | done_q); | ||
1134 | } | ||
1135 | } else | ||
1136 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset" | ||
1137 | " failed target: " | ||
1138 | "%d\n", | ||
1139 | current->comm, id)); | ||
1140 | } | ||
1141 | |||
1142 | return list_empty(work_q); | ||
1143 | } | ||
1144 | |||
1145 | /** | ||
1063 | * scsi_eh_bus_reset - send a bus reset | 1146 | * scsi_eh_bus_reset - send a bus reset |
1064 | * @shost: &scsi host being recovered. | 1147 | * @shost: &scsi host being recovered. |
1065 | * @work_q: &list_head for pending commands. | 1148 | * @work_q: &list_head for pending commands. |
@@ -1447,9 +1530,11 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost, | |||
1447 | { | 1530 | { |
1448 | if (!scsi_eh_stu(shost, work_q, done_q)) | 1531 | if (!scsi_eh_stu(shost, work_q, done_q)) |
1449 | if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) | 1532 | if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) |
1450 | if (!scsi_eh_bus_reset(shost, work_q, done_q)) | 1533 | if (!scsi_eh_target_reset(shost, work_q, done_q)) |
1451 | if (!scsi_eh_host_reset(work_q, done_q)) | 1534 | if (!scsi_eh_bus_reset(shost, work_q, done_q)) |
1452 | scsi_eh_offline_sdevs(work_q, done_q); | 1535 | if (!scsi_eh_host_reset(work_q, done_q)) |
1536 | scsi_eh_offline_sdevs(work_q, | ||
1537 | done_q); | ||
1453 | } | 1538 | } |
1454 | EXPORT_SYMBOL_GPL(scsi_eh_ready_devs); | 1539 | EXPORT_SYMBOL_GPL(scsi_eh_ready_devs); |
1455 | 1540 | ||
@@ -1619,10 +1704,8 @@ void scsi_report_bus_reset(struct Scsi_Host *shost, int channel) | |||
1619 | struct scsi_device *sdev; | 1704 | struct scsi_device *sdev; |
1620 | 1705 | ||
1621 | __shost_for_each_device(sdev, shost) { | 1706 | __shost_for_each_device(sdev, shost) { |
1622 | if (channel == sdev_channel(sdev)) { | 1707 | if (channel == sdev_channel(sdev)) |
1623 | sdev->was_reset = 1; | 1708 | __scsi_report_device_reset(sdev, NULL); |
1624 | sdev->expecting_cc_ua = 1; | ||
1625 | } | ||
1626 | } | 1709 | } |
1627 | } | 1710 | } |
1628 | EXPORT_SYMBOL(scsi_report_bus_reset); | 1711 | EXPORT_SYMBOL(scsi_report_bus_reset); |
@@ -1655,10 +1738,8 @@ void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target) | |||
1655 | 1738 | ||
1656 | __shost_for_each_device(sdev, shost) { | 1739 | __shost_for_each_device(sdev, shost) { |
1657 | if (channel == sdev_channel(sdev) && | 1740 | if (channel == sdev_channel(sdev) && |
1658 | target == sdev_id(sdev)) { | 1741 | target == sdev_id(sdev)) |
1659 | sdev->was_reset = 1; | 1742 | __scsi_report_device_reset(sdev, NULL); |
1660 | sdev->expecting_cc_ua = 1; | ||
1661 | } | ||
1662 | } | 1743 | } |
1663 | } | 1744 | } |
1664 | EXPORT_SYMBOL(scsi_report_device_reset); | 1745 | EXPORT_SYMBOL(scsi_report_device_reset); |
@@ -1714,6 +1795,11 @@ scsi_reset_provider(struct scsi_device *dev, int flag) | |||
1714 | if (rtn == SUCCESS) | 1795 | if (rtn == SUCCESS) |
1715 | break; | 1796 | break; |
1716 | /* FALLTHROUGH */ | 1797 | /* FALLTHROUGH */ |
1798 | case SCSI_TRY_RESET_TARGET: | ||
1799 | rtn = scsi_try_target_reset(scmd); | ||
1800 | if (rtn == SUCCESS) | ||
1801 | break; | ||
1802 | /* FALLTHROUGH */ | ||
1717 | case SCSI_TRY_RESET_BUS: | 1803 | case SCSI_TRY_RESET_BUS: |
1718 | rtn = scsi_try_bus_reset(scmd); | 1804 | rtn = scsi_try_bus_reset(scmd); |
1719 | if (rtn == SUCCESS) | 1805 | if (rtn == SUCCESS) |
@@ -1907,3 +1993,31 @@ int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, | |||
1907 | } | 1993 | } |
1908 | } | 1994 | } |
1909 | EXPORT_SYMBOL(scsi_get_sense_info_fld); | 1995 | EXPORT_SYMBOL(scsi_get_sense_info_fld); |
1996 | |||
1997 | /** | ||
1998 | * scsi_build_sense_buffer - build sense data in a buffer | ||
1999 | * @desc: Sense format (non zero == descriptor format, | ||
2000 | * 0 == fixed format) | ||
2001 | * @buf: Where to build sense data | ||
2002 | * @key: Sense key | ||
2003 | * @asc: Additional sense code | ||
2004 | * @ascq: Additional sense code qualifier | ||
2005 | * | ||
2006 | **/ | ||
2007 | void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq) | ||
2008 | { | ||
2009 | if (desc) { | ||
2010 | buf[0] = 0x72; /* descriptor, current */ | ||
2011 | buf[1] = key; | ||
2012 | buf[2] = asc; | ||
2013 | buf[3] = ascq; | ||
2014 | buf[7] = 0; | ||
2015 | } else { | ||
2016 | buf[0] = 0x70; /* fixed, current */ | ||
2017 | buf[2] = key; | ||
2018 | buf[7] = 0xa; | ||
2019 | buf[12] = asc; | ||
2020 | buf[13] = ascq; | ||
2021 | } | ||
2022 | } | ||
2023 | EXPORT_SYMBOL(scsi_build_sense_buffer); | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f40898dc2d14..67f412bb4974 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -784,7 +784,7 @@ EXPORT_SYMBOL(scsi_release_buffers); | |||
784 | * in req->data_len and req->next_rq->data_len. The upper-layer driver can | 784 | * in req->data_len and req->next_rq->data_len. The upper-layer driver can |
785 | * decide what to do with this information. | 785 | * decide what to do with this information. |
786 | */ | 786 | */ |
787 | void scsi_end_bidi_request(struct scsi_cmnd *cmd) | 787 | static void scsi_end_bidi_request(struct scsi_cmnd *cmd) |
788 | { | 788 | { |
789 | struct request *req = cmd->request; | 789 | struct request *req = cmd->request; |
790 | unsigned int dlen = req->data_len; | 790 | unsigned int dlen = req->data_len; |
@@ -839,7 +839,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
839 | int this_count = scsi_bufflen(cmd); | 839 | int this_count = scsi_bufflen(cmd); |
840 | struct request_queue *q = cmd->device->request_queue; | 840 | struct request_queue *q = cmd->device->request_queue; |
841 | struct request *req = cmd->request; | 841 | struct request *req = cmd->request; |
842 | int clear_errors = 1; | 842 | int error = 0; |
843 | struct scsi_sense_hdr sshdr; | 843 | struct scsi_sense_hdr sshdr; |
844 | int sense_valid = 0; | 844 | int sense_valid = 0; |
845 | int sense_deferred = 0; | 845 | int sense_deferred = 0; |
@@ -853,7 +853,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
853 | if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ | 853 | if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ |
854 | req->errors = result; | 854 | req->errors = result; |
855 | if (result) { | 855 | if (result) { |
856 | clear_errors = 0; | ||
857 | if (sense_valid && req->sense) { | 856 | if (sense_valid && req->sense) { |
858 | /* | 857 | /* |
859 | * SG_IO wants current and deferred errors | 858 | * SG_IO wants current and deferred errors |
@@ -865,6 +864,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
865 | memcpy(req->sense, cmd->sense_buffer, len); | 864 | memcpy(req->sense, cmd->sense_buffer, len); |
866 | req->sense_len = len; | 865 | req->sense_len = len; |
867 | } | 866 | } |
867 | if (!sense_deferred) | ||
868 | error = -EIO; | ||
868 | } | 869 | } |
869 | if (scsi_bidi_cmnd(cmd)) { | 870 | if (scsi_bidi_cmnd(cmd)) { |
870 | /* will also release_buffers */ | 871 | /* will also release_buffers */ |
@@ -885,14 +886,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
885 | "%d bytes done.\n", | 886 | "%d bytes done.\n", |
886 | req->nr_sectors, good_bytes)); | 887 | req->nr_sectors, good_bytes)); |
887 | 888 | ||
888 | if (clear_errors) | ||
889 | req->errors = 0; | ||
890 | |||
891 | /* A number of bytes were successfully read. If there | 889 | /* A number of bytes were successfully read. If there |
892 | * are leftovers and there is some kind of error | 890 | * are leftovers and there is some kind of error |
893 | * (result != 0), retry the rest. | 891 | * (result != 0), retry the rest. |
894 | */ | 892 | */ |
895 | if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL) | 893 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) |
896 | return; | 894 | return; |
897 | 895 | ||
898 | /* good_bytes = 0, or (inclusive) there were leftovers and | 896 | /* good_bytes = 0, or (inclusive) there were leftovers and |
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index a0f308bd145b..ee8496aa0336 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -621,9 +621,7 @@ static int __init scsi_tgt_init(void) | |||
621 | { | 621 | { |
622 | int err; | 622 | int err; |
623 | 623 | ||
624 | scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd", | 624 | scsi_tgt_cmd_cache = KMEM_CACHE(scsi_tgt_cmd, 0); |
625 | sizeof(struct scsi_tgt_cmd), | ||
626 | 0, 0, NULL); | ||
627 | if (!scsi_tgt_cmd_cache) | 625 | if (!scsi_tgt_cmd_cache) |
628 | return -ENOMEM; | 626 | return -ENOMEM; |
629 | 627 | ||
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index 26cfc56c7091..03e359670506 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c | |||
@@ -263,10 +263,11 @@ static int __init sgiwd93_probe(struct platform_device *pdev) | |||
263 | regs.SASR = wdregs + 3; | 263 | regs.SASR = wdregs + 3; |
264 | regs.SCMD = wdregs + 7; | 264 | regs.SCMD = wdregs + 7; |
265 | 265 | ||
266 | wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20)); | 266 | hdata->wh.no_sync = 0; |
267 | hdata->wh.fast = 1; | ||
268 | hdata->wh.dma_mode = CTRL_BURST; | ||
267 | 269 | ||
268 | if (hdata->wh.no_sync == 0xff) | 270 | wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20)); |
269 | hdata->wh.no_sync = 0; | ||
270 | 271 | ||
271 | err = request_irq(irq, sgiwd93_intr, 0, "SGI WD93", host); | 272 | err = request_irq(irq, sgiwd93_intr, 0, "SGI WD93", host); |
272 | if (err) { | 273 | if (err) { |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 0a52d9d2da2c..df83bea2c620 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -17,7 +17,7 @@ | |||
17 | Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support | 17 | Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support |
18 | */ | 18 | */ |
19 | 19 | ||
20 | static const char *verstr = "20080221"; | 20 | static const char *verstr = "20080224"; |
21 | 21 | ||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
@@ -183,6 +183,7 @@ static int modes_defined; | |||
183 | 183 | ||
184 | static struct st_buffer *new_tape_buffer(int, int, int); | 184 | static struct st_buffer *new_tape_buffer(int, int, int); |
185 | static int enlarge_buffer(struct st_buffer *, int, int); | 185 | static int enlarge_buffer(struct st_buffer *, int, int); |
186 | static void clear_buffer(struct st_buffer *); | ||
186 | static void normalize_buffer(struct st_buffer *); | 187 | static void normalize_buffer(struct st_buffer *); |
187 | static int append_to_buffer(const char __user *, struct st_buffer *, int); | 188 | static int append_to_buffer(const char __user *, struct st_buffer *, int); |
188 | static int from_buffer(struct st_buffer *, char __user *, int); | 189 | static int from_buffer(struct st_buffer *, char __user *, int); |
@@ -442,6 +443,7 @@ static void st_sleep_done(void *data, char *sense, int result, int resid) | |||
442 | 443 | ||
443 | memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE); | 444 | memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE); |
444 | (STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result; | 445 | (STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result; |
446 | (STp->buffer)->cmdstat.residual = resid; | ||
445 | DEB( STp->write_pending = 0; ) | 447 | DEB( STp->write_pending = 0; ) |
446 | 448 | ||
447 | if (SRpnt->waiting) | 449 | if (SRpnt->waiting) |
@@ -626,7 +628,7 @@ static int cross_eof(struct scsi_tape * STp, int forward) | |||
626 | 628 | ||
627 | 629 | ||
628 | /* Flush the write buffer (never need to write if variable blocksize). */ | 630 | /* Flush the write buffer (never need to write if variable blocksize). */ |
629 | static int flush_write_buffer(struct scsi_tape * STp) | 631 | static int st_flush_write_buffer(struct scsi_tape * STp) |
630 | { | 632 | { |
631 | int offset, transfer, blks; | 633 | int offset, transfer, blks; |
632 | int result; | 634 | int result; |
@@ -717,7 +719,7 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next) | |||
717 | return 0; | 719 | return 0; |
718 | STps = &(STp->ps[STp->partition]); | 720 | STps = &(STp->ps[STp->partition]); |
719 | if (STps->rw == ST_WRITING) /* Writing */ | 721 | if (STps->rw == ST_WRITING) /* Writing */ |
720 | return flush_write_buffer(STp); | 722 | return st_flush_write_buffer(STp); |
721 | 723 | ||
722 | if (STp->block_size == 0) | 724 | if (STp->block_size == 0) |
723 | return 0; | 725 | return 0; |
@@ -1159,6 +1161,7 @@ static int st_open(struct inode *inode, struct file *filp) | |||
1159 | goto err_out; | 1161 | goto err_out; |
1160 | } | 1162 | } |
1161 | 1163 | ||
1164 | (STp->buffer)->cleared = 0; | ||
1162 | (STp->buffer)->writing = 0; | 1165 | (STp->buffer)->writing = 0; |
1163 | (STp->buffer)->syscall_result = 0; | 1166 | (STp->buffer)->syscall_result = 0; |
1164 | 1167 | ||
@@ -1211,7 +1214,7 @@ static int st_flush(struct file *filp, fl_owner_t id) | |||
1211 | return 0; | 1214 | return 0; |
1212 | 1215 | ||
1213 | if (STps->rw == ST_WRITING && !STp->pos_unknown) { | 1216 | if (STps->rw == ST_WRITING && !STp->pos_unknown) { |
1214 | result = flush_write_buffer(STp); | 1217 | result = st_flush_write_buffer(STp); |
1215 | if (result != 0 && result != (-ENOSPC)) | 1218 | if (result != 0 && result != (-ENOSPC)) |
1216 | goto out; | 1219 | goto out; |
1217 | } | 1220 | } |
@@ -1432,8 +1435,14 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf, | |||
1432 | if (STp->block_size) | 1435 | if (STp->block_size) |
1433 | bufsize = STp->block_size > st_fixed_buffer_size ? | 1436 | bufsize = STp->block_size > st_fixed_buffer_size ? |
1434 | STp->block_size : st_fixed_buffer_size; | 1437 | STp->block_size : st_fixed_buffer_size; |
1435 | else | 1438 | else { |
1436 | bufsize = count; | 1439 | bufsize = count; |
1440 | /* Make sure that data from previous user is not leaked even if | ||
1441 | HBA does not return correct residual */ | ||
1442 | if (is_read && STp->sili && !STbp->cleared) | ||
1443 | clear_buffer(STbp); | ||
1444 | } | ||
1445 | |||
1437 | if (bufsize > STbp->buffer_size && | 1446 | if (bufsize > STbp->buffer_size && |
1438 | !enlarge_buffer(STbp, bufsize, STp->restr_dma)) { | 1447 | !enlarge_buffer(STbp, bufsize, STp->restr_dma)) { |
1439 | printk(KERN_WARNING "%s: Can't allocate %d byte tape buffer.\n", | 1448 | printk(KERN_WARNING "%s: Can't allocate %d byte tape buffer.\n", |
@@ -1783,6 +1792,8 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
1783 | memset(cmd, 0, MAX_COMMAND_SIZE); | 1792 | memset(cmd, 0, MAX_COMMAND_SIZE); |
1784 | cmd[0] = READ_6; | 1793 | cmd[0] = READ_6; |
1785 | cmd[1] = (STp->block_size != 0); | 1794 | cmd[1] = (STp->block_size != 0); |
1795 | if (!cmd[1] && STp->sili) | ||
1796 | cmd[1] |= 2; | ||
1786 | cmd[2] = blks >> 16; | 1797 | cmd[2] = blks >> 16; |
1787 | cmd[3] = blks >> 8; | 1798 | cmd[3] = blks >> 8; |
1788 | cmd[4] = blks; | 1799 | cmd[4] = blks; |
@@ -1911,8 +1922,11 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
1911 | 1922 | ||
1912 | } | 1923 | } |
1913 | /* End of error handling */ | 1924 | /* End of error handling */ |
1914 | else /* Read successful */ | 1925 | else { /* Read successful */ |
1915 | STbp->buffer_bytes = bytes; | 1926 | STbp->buffer_bytes = bytes; |
1927 | if (STp->sili) /* In fixed block mode residual is always zero here */ | ||
1928 | STbp->buffer_bytes -= STp->buffer->cmdstat.residual; | ||
1929 | } | ||
1916 | 1930 | ||
1917 | if (STps->drv_block >= 0) { | 1931 | if (STps->drv_block >= 0) { |
1918 | if (STp->block_size == 0) | 1932 | if (STp->block_size == 0) |
@@ -2090,7 +2104,8 @@ static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char | |||
2090 | name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, | 2104 | name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, |
2091 | STp->scsi2_logical); | 2105 | STp->scsi2_logical); |
2092 | printk(KERN_INFO | 2106 | printk(KERN_INFO |
2093 | "%s: sysv: %d nowait: %d\n", name, STm->sysv, STp->immediate); | 2107 | "%s: sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate, |
2108 | STp->sili); | ||
2094 | printk(KERN_INFO "%s: debugging: %d\n", | 2109 | printk(KERN_INFO "%s: debugging: %d\n", |
2095 | name, debugging); | 2110 | name, debugging); |
2096 | } | 2111 | } |
@@ -2133,6 +2148,7 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
2133 | STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; | 2148 | STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; |
2134 | STp->immediate = (options & MT_ST_NOWAIT) != 0; | 2149 | STp->immediate = (options & MT_ST_NOWAIT) != 0; |
2135 | STm->sysv = (options & MT_ST_SYSV) != 0; | 2150 | STm->sysv = (options & MT_ST_SYSV) != 0; |
2151 | STp->sili = (options & MT_ST_SILI) != 0; | ||
2136 | DEB( debugging = (options & MT_ST_DEBUGGING) != 0; | 2152 | DEB( debugging = (options & MT_ST_DEBUGGING) != 0; |
2137 | st_log_options(STp, STm, name); ) | 2153 | st_log_options(STp, STm, name); ) |
2138 | } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) { | 2154 | } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) { |
@@ -2164,6 +2180,8 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
2164 | STp->immediate = value; | 2180 | STp->immediate = value; |
2165 | if ((options & MT_ST_SYSV) != 0) | 2181 | if ((options & MT_ST_SYSV) != 0) |
2166 | STm->sysv = value; | 2182 | STm->sysv = value; |
2183 | if ((options & MT_ST_SILI) != 0) | ||
2184 | STp->sili = value; | ||
2167 | DEB( | 2185 | DEB( |
2168 | if ((options & MT_ST_DEBUGGING) != 0) | 2186 | if ((options & MT_ST_DEBUGGING) != 0) |
2169 | debugging = value; | 2187 | debugging = value; |
@@ -3655,6 +3673,8 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm | |||
3655 | STbuffer->frp_segs += 1; | 3673 | STbuffer->frp_segs += 1; |
3656 | got += b_size; | 3674 | got += b_size; |
3657 | STbuffer->buffer_size = got; | 3675 | STbuffer->buffer_size = got; |
3676 | if (STbuffer->cleared) | ||
3677 | memset(page_address(STbuffer->frp[segs].page), 0, b_size); | ||
3658 | segs++; | 3678 | segs++; |
3659 | } | 3679 | } |
3660 | STbuffer->b_data = page_address(STbuffer->frp[0].page); | 3680 | STbuffer->b_data = page_address(STbuffer->frp[0].page); |
@@ -3663,6 +3683,17 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm | |||
3663 | } | 3683 | } |
3664 | 3684 | ||
3665 | 3685 | ||
3686 | /* Make sure that no data from previous user is in the internal buffer */ | ||
3687 | static void clear_buffer(struct st_buffer * st_bp) | ||
3688 | { | ||
3689 | int i; | ||
3690 | |||
3691 | for (i=0; i < st_bp->frp_segs; i++) | ||
3692 | memset(page_address(st_bp->frp[i].page), 0, st_bp->frp[i].length); | ||
3693 | st_bp->cleared = 1; | ||
3694 | } | ||
3695 | |||
3696 | |||
3666 | /* Release the extra buffer */ | 3697 | /* Release the extra buffer */ |
3667 | static void normalize_buffer(struct st_buffer * STbuffer) | 3698 | static void normalize_buffer(struct st_buffer * STbuffer) |
3668 | { | 3699 | { |
@@ -3987,6 +4018,7 @@ static int st_probe(struct device *dev) | |||
3987 | tpnt->two_fm = ST_TWO_FM; | 4018 | tpnt->two_fm = ST_TWO_FM; |
3988 | tpnt->fast_mteom = ST_FAST_MTEOM; | 4019 | tpnt->fast_mteom = ST_FAST_MTEOM; |
3989 | tpnt->scsi2_logical = ST_SCSI2LOGICAL; | 4020 | tpnt->scsi2_logical = ST_SCSI2LOGICAL; |
4021 | tpnt->sili = ST_SILI; | ||
3990 | tpnt->immediate = ST_NOWAIT; | 4022 | tpnt->immediate = ST_NOWAIT; |
3991 | tpnt->default_drvbuffer = 0xff; /* No forced buffering */ | 4023 | tpnt->default_drvbuffer = 0xff; /* No forced buffering */ |
3992 | tpnt->partition = 0; | 4024 | tpnt->partition = 0; |
@@ -4333,6 +4365,46 @@ static ssize_t st_defcompression_show(struct class_device *class_dev, char *buf) | |||
4333 | 4365 | ||
4334 | CLASS_DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL); | 4366 | CLASS_DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL); |
4335 | 4367 | ||
4368 | static ssize_t st_options_show(struct class_device *class_dev, char *buf) | ||
4369 | { | ||
4370 | struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev); | ||
4371 | struct scsi_tape *STp; | ||
4372 | int i, j, options; | ||
4373 | ssize_t l = 0; | ||
4374 | |||
4375 | for (i=0; i < st_dev_max; i++) { | ||
4376 | for (j=0; j < ST_NBR_MODES; j++) | ||
4377 | if (&scsi_tapes[i]->modes[j] == STm) | ||
4378 | break; | ||
4379 | if (j < ST_NBR_MODES) | ||
4380 | break; | ||
4381 | } | ||
4382 | if (i == st_dev_max) | ||
4383 | return 0; /* should never happen */ | ||
4384 | |||
4385 | STp = scsi_tapes[i]; | ||
4386 | |||
4387 | options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0; | ||
4388 | options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0; | ||
4389 | options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0; | ||
4390 | DEB( options |= debugging ? MT_ST_DEBUGGING : 0 ); | ||
4391 | options |= STp->two_fm ? MT_ST_TWO_FM : 0; | ||
4392 | options |= STp->fast_mteom ? MT_ST_FAST_MTEOM : 0; | ||
4393 | options |= STm->defaults_for_writes ? MT_ST_DEF_WRITES : 0; | ||
4394 | options |= STp->can_bsr ? MT_ST_CAN_BSR : 0; | ||
4395 | options |= STp->omit_blklims ? MT_ST_NO_BLKLIMS : 0; | ||
4396 | options |= STp->can_partitions ? MT_ST_CAN_PARTITIONS : 0; | ||
4397 | options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0; | ||
4398 | options |= STm->sysv ? MT_ST_SYSV : 0; | ||
4399 | options |= STp->immediate ? MT_ST_NOWAIT : 0; | ||
4400 | options |= STp->sili ? MT_ST_SILI : 0; | ||
4401 | |||
4402 | l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); | ||
4403 | return l; | ||
4404 | } | ||
4405 | |||
4406 | CLASS_DEVICE_ATTR(options, S_IRUGO, st_options_show, NULL); | ||
4407 | |||
4336 | static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode) | 4408 | static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode) |
4337 | { | 4409 | { |
4338 | int i, rew, error; | 4410 | int i, rew, error; |
@@ -4370,6 +4442,9 @@ static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode) | |||
4370 | error = class_device_create_file(st_class_member, | 4442 | error = class_device_create_file(st_class_member, |
4371 | &class_device_attr_default_compression); | 4443 | &class_device_attr_default_compression); |
4372 | if (error) goto out; | 4444 | if (error) goto out; |
4445 | error = class_device_create_file(st_class_member, | ||
4446 | &class_device_attr_options); | ||
4447 | if (error) goto out; | ||
4373 | 4448 | ||
4374 | if (mode == 0 && rew == 0) { | 4449 | if (mode == 0 && rew == 0) { |
4375 | error = sysfs_create_link(&STp->device->sdev_gendev.kobj, | 4450 | error = sysfs_create_link(&STp->device->sdev_gendev.kobj, |
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index 5931726fcf93..b92712f95931 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h | |||
@@ -12,6 +12,7 @@ struct st_cmdstatus { | |||
12 | int midlevel_result; | 12 | int midlevel_result; |
13 | struct scsi_sense_hdr sense_hdr; | 13 | struct scsi_sense_hdr sense_hdr; |
14 | int have_sense; | 14 | int have_sense; |
15 | int residual; | ||
15 | u64 uremainder64; | 16 | u64 uremainder64; |
16 | u8 flags; | 17 | u8 flags; |
17 | u8 remainder_valid; | 18 | u8 remainder_valid; |
@@ -34,6 +35,7 @@ struct st_request { | |||
34 | struct st_buffer { | 35 | struct st_buffer { |
35 | unsigned char dma; /* DMA-able buffer */ | 36 | unsigned char dma; /* DMA-able buffer */ |
36 | unsigned char do_dio; /* direct i/o set up? */ | 37 | unsigned char do_dio; /* direct i/o set up? */ |
38 | unsigned char cleared; /* internal buffer cleared after open? */ | ||
37 | int buffer_size; | 39 | int buffer_size; |
38 | int buffer_blocks; | 40 | int buffer_blocks; |
39 | int buffer_bytes; | 41 | int buffer_bytes; |
@@ -122,6 +124,7 @@ struct scsi_tape { | |||
122 | unsigned char try_dio_now; /* try direct i/o before next close? */ | 124 | unsigned char try_dio_now; /* try direct i/o before next close? */ |
123 | unsigned char c_algo; /* compression algorithm */ | 125 | unsigned char c_algo; /* compression algorithm */ |
124 | unsigned char pos_unknown; /* after reset position unknown */ | 126 | unsigned char pos_unknown; /* after reset position unknown */ |
127 | unsigned char sili; /* use SILI when reading in variable b mode */ | ||
125 | int tape_type; | 128 | int tape_type; |
126 | int long_timeout; /* timeout for commands known to take long time */ | 129 | int long_timeout; /* timeout for commands known to take long time */ |
127 | 130 | ||
diff --git a/drivers/scsi/st_options.h b/drivers/scsi/st_options.h index b6b5c9c37677..d2f947935554 100644 --- a/drivers/scsi/st_options.h +++ b/drivers/scsi/st_options.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | Copyright 1995-2003 Kai Makisara. | 4 | Copyright 1995-2003 Kai Makisara. |
5 | 5 | ||
6 | Last modified: Mon Apr 7 22:49:18 2003 by makisara | 6 | Last modified: Thu Feb 21 21:47:07 2008 by kai.makisara |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef _ST_OPTIONS_H | 9 | #ifndef _ST_OPTIONS_H |
@@ -94,6 +94,10 @@ | |||
94 | The default is BSD semantics. */ | 94 | The default is BSD semantics. */ |
95 | #define ST_SYSV 0 | 95 | #define ST_SYSV 0 |
96 | 96 | ||
97 | /* If ST_SILI is non-zero, the SILI bit is set when reading in variable block | ||
98 | mode and the block size is determined using the residual returned by the HBA. */ | ||
99 | #define ST_SILI 0 | ||
100 | |||
97 | /* Time to wait for the drive to become ready if blocking open */ | 101 | /* Time to wait for the drive to become ready if blocking open */ |
98 | #define ST_BLOCK_SECONDS 120 | 102 | #define ST_BLOCK_SECONDS 120 |
99 | 103 | ||
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 654430edf74d..f308a0308829 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <scsi/scsi_host.h> | 33 | #include <scsi/scsi_host.h> |
34 | #include <scsi/scsi_tcq.h> | 34 | #include <scsi/scsi_tcq.h> |
35 | #include <scsi/scsi_dbg.h> | 35 | #include <scsi/scsi_dbg.h> |
36 | #include <scsi/scsi_eh.h> | ||
36 | 37 | ||
37 | #define DRV_NAME "stex" | 38 | #define DRV_NAME "stex" |
38 | #define ST_DRIVER_VERSION "3.6.0000.1" | 39 | #define ST_DRIVER_VERSION "3.6.0000.1" |
@@ -362,22 +363,14 @@ static struct status_msg *stex_get_status(struct st_hba *hba) | |||
362 | return status; | 363 | return status; |
363 | } | 364 | } |
364 | 365 | ||
365 | static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) | ||
366 | { | ||
367 | cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; | ||
368 | |||
369 | cmd->sense_buffer[0] = 0x70; /* fixed format, current */ | ||
370 | cmd->sense_buffer[2] = sk; | ||
371 | cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ | ||
372 | cmd->sense_buffer[12] = asc; | ||
373 | cmd->sense_buffer[13] = ascq; | ||
374 | } | ||
375 | |||
376 | static void stex_invalid_field(struct scsi_cmnd *cmd, | 366 | static void stex_invalid_field(struct scsi_cmnd *cmd, |
377 | void (*done)(struct scsi_cmnd *)) | 367 | void (*done)(struct scsi_cmnd *)) |
378 | { | 368 | { |
369 | cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; | ||
370 | |||
379 | /* "Invalid field in cbd" */ | 371 | /* "Invalid field in cbd" */ |
380 | stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); | 372 | scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24, |
373 | 0x0); | ||
381 | done(cmd); | 374 | done(cmd); |
382 | } | 375 | } |
383 | 376 | ||
@@ -426,49 +419,13 @@ static int stex_map_sg(struct st_hba *hba, | |||
426 | return 0; | 419 | return 0; |
427 | } | 420 | } |
428 | 421 | ||
429 | static void stex_internal_copy(struct scsi_cmnd *cmd, | ||
430 | const void *src, size_t *count, int sg_count, int direction) | ||
431 | { | ||
432 | size_t lcount; | ||
433 | size_t len; | ||
434 | void *s, *d, *base = NULL; | ||
435 | size_t offset; | ||
436 | |||
437 | if (*count > scsi_bufflen(cmd)) | ||
438 | *count = scsi_bufflen(cmd); | ||
439 | lcount = *count; | ||
440 | while (lcount) { | ||
441 | len = lcount; | ||
442 | s = (void *)src; | ||
443 | |||
444 | offset = *count - lcount; | ||
445 | s += offset; | ||
446 | base = scsi_kmap_atomic_sg(scsi_sglist(cmd), | ||
447 | sg_count, &offset, &len); | ||
448 | if (!base) { | ||
449 | *count -= lcount; | ||
450 | return; | ||
451 | } | ||
452 | d = base + offset; | ||
453 | |||
454 | if (direction == ST_TO_CMD) | ||
455 | memcpy(d, s, len); | ||
456 | else | ||
457 | memcpy(s, d, len); | ||
458 | |||
459 | lcount -= len; | ||
460 | scsi_kunmap_atomic_sg(base); | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) | 422 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) |
465 | { | 423 | { |
466 | struct st_frame *p; | 424 | struct st_frame *p; |
467 | size_t count = sizeof(struct st_frame); | 425 | size_t count = sizeof(struct st_frame); |
468 | 426 | ||
469 | p = hba->copy_buffer; | 427 | p = hba->copy_buffer; |
470 | stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd), | 428 | count = scsi_sg_copy_to_buffer(ccb->cmd, p, count); |
471 | ST_FROM_CMD); | ||
472 | memset(p->base, 0, sizeof(u32)*6); | 429 | memset(p->base, 0, sizeof(u32)*6); |
473 | *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); | 430 | *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); |
474 | p->rom_addr = 0; | 431 | p->rom_addr = 0; |
@@ -486,8 +443,7 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) | |||
486 | p->subid = | 443 | p->subid = |
487 | hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; | 444 | hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; |
488 | 445 | ||
489 | stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd), | 446 | count = scsi_sg_copy_from_buffer(ccb->cmd, p, count); |
490 | ST_TO_CMD); | ||
491 | } | 447 | } |
492 | 448 | ||
493 | static void | 449 | static void |
@@ -554,10 +510,8 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
554 | unsigned char page; | 510 | unsigned char page; |
555 | page = cmd->cmnd[2] & 0x3f; | 511 | page = cmd->cmnd[2] & 0x3f; |
556 | if (page == 0x8 || page == 0x3f) { | 512 | if (page == 0x8 || page == 0x3f) { |
557 | size_t cp_len = sizeof(ms10_caching_page); | 513 | scsi_sg_copy_from_buffer(cmd, ms10_caching_page, |
558 | stex_internal_copy(cmd, ms10_caching_page, | 514 | sizeof(ms10_caching_page)); |
559 | &cp_len, scsi_sg_count(cmd), | ||
560 | ST_TO_CMD); | ||
561 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | 515 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
562 | done(cmd); | 516 | done(cmd); |
563 | } else | 517 | } else |
@@ -586,10 +540,8 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
586 | if (id != host->max_id - 1) | 540 | if (id != host->max_id - 1) |
587 | break; | 541 | break; |
588 | if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { | 542 | if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { |
589 | size_t cp_len = sizeof(console_inq_page); | 543 | scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page, |
590 | stex_internal_copy(cmd, console_inq_page, | 544 | sizeof(console_inq_page)); |
591 | &cp_len, scsi_sg_count(cmd), | ||
592 | ST_TO_CMD); | ||
593 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | 545 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
594 | done(cmd); | 546 | done(cmd); |
595 | } else | 547 | } else |
@@ -606,8 +558,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
606 | ver.signature[0] = PASSTHRU_SIGNATURE; | 558 | ver.signature[0] = PASSTHRU_SIGNATURE; |
607 | ver.console_id = host->max_id - 1; | 559 | ver.console_id = host->max_id - 1; |
608 | ver.host_no = hba->host->host_no; | 560 | ver.host_no = hba->host->host_no; |
609 | stex_internal_copy(cmd, &ver, &cp_len, | 561 | cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len); |
610 | scsi_sg_count(cmd), ST_TO_CMD); | ||
611 | cmd->result = sizeof(ver) == cp_len ? | 562 | cmd->result = sizeof(ver) == cp_len ? |
612 | DID_OK << 16 | COMMAND_COMPLETE << 8 : | 563 | DID_OK << 16 | COMMAND_COMPLETE << 8 : |
613 | DID_ERROR << 16 | COMMAND_COMPLETE << 8; | 564 | DID_ERROR << 16 | COMMAND_COMPLETE << 8; |
@@ -700,15 +651,12 @@ static void stex_copy_data(struct st_ccb *ccb, | |||
700 | 651 | ||
701 | if (ccb->cmd == NULL) | 652 | if (ccb->cmd == NULL) |
702 | return; | 653 | return; |
703 | stex_internal_copy(ccb->cmd, | 654 | count = scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, count); |
704 | resp->variable, &count, scsi_sg_count(ccb->cmd), ST_TO_CMD); | ||
705 | } | 655 | } |
706 | 656 | ||
707 | static void stex_ys_commands(struct st_hba *hba, | 657 | static void stex_ys_commands(struct st_hba *hba, |
708 | struct st_ccb *ccb, struct status_msg *resp) | 658 | struct st_ccb *ccb, struct status_msg *resp) |
709 | { | 659 | { |
710 | size_t count; | ||
711 | |||
712 | if (ccb->cmd->cmnd[0] == MGT_CMD && | 660 | if (ccb->cmd->cmnd[0] == MGT_CMD && |
713 | resp->scsi_status != SAM_STAT_CHECK_CONDITION) { | 661 | resp->scsi_status != SAM_STAT_CHECK_CONDITION) { |
714 | scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) - | 662 | scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) - |
@@ -724,9 +672,8 @@ static void stex_ys_commands(struct st_hba *hba, | |||
724 | resp->scsi_status == SAM_STAT_GOOD) { | 672 | resp->scsi_status == SAM_STAT_GOOD) { |
725 | ST_INQ *inq_data; | 673 | ST_INQ *inq_data; |
726 | 674 | ||
727 | count = STEX_EXTRA_SIZE; | 675 | scsi_sg_copy_to_buffer(ccb->cmd, hba->copy_buffer, |
728 | stex_internal_copy(ccb->cmd, hba->copy_buffer, | 676 | STEX_EXTRA_SIZE); |
729 | &count, scsi_sg_count(ccb->cmd), ST_FROM_CMD); | ||
730 | inq_data = (ST_INQ *)hba->copy_buffer; | 677 | inq_data = (ST_INQ *)hba->copy_buffer; |
731 | if (inq_data->DeviceTypeQualifier != 0) | 678 | if (inq_data->DeviceTypeQualifier != 0) |
732 | ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; | 679 | ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; |
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index 02d9727f017a..aaa4fd0dd1b9 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c | |||
@@ -582,3 +582,4 @@ static struct scsi_host_template driver_template = { | |||
582 | 582 | ||
583 | #include "scsi_module.c" | 583 | #include "scsi_module.c" |
584 | 584 | ||
585 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index f286c37da7e0..5fda881c2470 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c | |||
@@ -1973,10 +1973,7 @@ wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs, | |||
1973 | hostdata->incoming_ptr = 0; | 1973 | hostdata->incoming_ptr = 0; |
1974 | hostdata->outgoing_len = 0; | 1974 | hostdata->outgoing_len = 0; |
1975 | hostdata->default_sx_per = DEFAULT_SX_PER; | 1975 | hostdata->default_sx_per = DEFAULT_SX_PER; |
1976 | hostdata->no_sync = 0xff; /* sync defaults to off */ | ||
1977 | hostdata->no_dma = 0; /* default is DMA enabled */ | 1976 | hostdata->no_dma = 0; /* default is DMA enabled */ |
1978 | hostdata->fast = 0; /* default is Fast SCSI transfers disabled */ | ||
1979 | hostdata->dma_mode = CTRL_DMA; /* default is Single Byte DMA */ | ||
1980 | 1977 | ||
1981 | #ifdef PROC_INTERFACE | 1978 | #ifdef PROC_INTERFACE |
1982 | hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | | 1979 | hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | |
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h index f5582332af04..574b201b99d8 100644 --- a/include/linux/attribute_container.h +++ b/include/linux/attribute_container.h | |||
@@ -37,7 +37,7 @@ attribute_container_set_no_classdevs(struct attribute_container *atc) | |||
37 | } | 37 | } |
38 | 38 | ||
39 | int attribute_container_register(struct attribute_container *cont); | 39 | int attribute_container_register(struct attribute_container *cont); |
40 | int attribute_container_unregister(struct attribute_container *cont); | 40 | int __must_check attribute_container_unregister(struct attribute_container *cont); |
41 | void attribute_container_create_device(struct device *dev, | 41 | void attribute_container_create_device(struct device *dev, |
42 | int (*fn)(struct attribute_container *, | 42 | int (*fn)(struct attribute_container *, |
43 | struct device *, | 43 | struct device *, |
diff --git a/include/linux/mtio.h b/include/linux/mtio.h index 6f8d2d45a8fb..ef01d6aa5934 100644 --- a/include/linux/mtio.h +++ b/include/linux/mtio.h | |||
@@ -192,6 +192,7 @@ struct mtpos { | |||
192 | #define MT_ST_SCSI2LOGICAL 0x800 | 192 | #define MT_ST_SCSI2LOGICAL 0x800 |
193 | #define MT_ST_SYSV 0x1000 | 193 | #define MT_ST_SYSV 0x1000 |
194 | #define MT_ST_NOWAIT 0x2000 | 194 | #define MT_ST_NOWAIT 0x2000 |
195 | #define MT_ST_SILI 0x4000 | ||
195 | 196 | ||
196 | /* The mode parameters to be controlled. Parameter chosen with bits 20-28 */ | 197 | /* The mode parameters to be controlled. Parameter chosen with bits 20-28 */ |
197 | #define MT_ST_CLEAR_DEFAULT 0xfffff | 198 | #define MT_ST_CLEAR_DEFAULT 0xfffff |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index a3d567a974e8..71fc81360048 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -213,6 +213,11 @@ int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, | |||
213 | sg_alloc_fn *); | 213 | sg_alloc_fn *); |
214 | int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); | 214 | int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); |
215 | 215 | ||
216 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | ||
217 | void *buf, size_t buflen); | ||
218 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
219 | void *buf, size_t buflen); | ||
220 | |||
216 | /* | 221 | /* |
217 | * Maximum number of entries that will be allocated in one piece, if | 222 | * Maximum number of entries that will be allocated in one piece, if |
218 | * a list larger than this is required then chaining will be utilized. | 223 | * a list larger than this is required then chaining will be utilized. |
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h index 1d6cc22e5f42..6696cf79c4f7 100644 --- a/include/linux/transport_class.h +++ b/include/linux/transport_class.h | |||
@@ -86,9 +86,10 @@ static inline int transport_container_register(struct transport_container *tc) | |||
86 | return attribute_container_register(&tc->ac); | 86 | return attribute_container_register(&tc->ac); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline int transport_container_unregister(struct transport_container *tc) | 89 | static inline void transport_container_unregister(struct transport_container *tc) |
90 | { | 90 | { |
91 | return attribute_container_unregister(&tc->ac); | 91 | if (unlikely(attribute_container_unregister(&tc->ac))) |
92 | BUG(); | ||
92 | } | 93 | } |
93 | 94 | ||
94 | int transport_class_register(struct transport_class *); | 95 | int transport_class_register(struct transport_class *); |
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h index 5ffec8ad6964..e0593bfae622 100644 --- a/include/scsi/iscsi_proto.h +++ b/include/scsi/iscsi_proto.h | |||
@@ -112,6 +112,7 @@ struct iscsi_ahs_hdr { | |||
112 | 112 | ||
113 | #define ISCSI_AHSTYPE_CDB 1 | 113 | #define ISCSI_AHSTYPE_CDB 1 |
114 | #define ISCSI_AHSTYPE_RLENGTH 2 | 114 | #define ISCSI_AHSTYPE_RLENGTH 2 |
115 | #define ISCSI_CDB_SIZE 16 | ||
115 | 116 | ||
116 | /* iSCSI PDU Header */ | 117 | /* iSCSI PDU Header */ |
117 | struct iscsi_cmd { | 118 | struct iscsi_cmd { |
@@ -125,7 +126,7 @@ struct iscsi_cmd { | |||
125 | __be32 data_length; | 126 | __be32 data_length; |
126 | __be32 cmdsn; | 127 | __be32 cmdsn; |
127 | __be32 exp_statsn; | 128 | __be32 exp_statsn; |
128 | uint8_t cdb[16]; /* SCSI Command Block */ | 129 | uint8_t cdb[ISCSI_CDB_SIZE]; /* SCSI Command Block */ |
129 | /* Additional Data (Command Dependent) */ | 130 | /* Additional Data (Command Dependent) */ |
130 | }; | 131 | }; |
131 | 132 | ||
@@ -154,7 +155,8 @@ struct iscsi_ecdb_ahdr { | |||
154 | __be16 ahslength; /* CDB length - 15, including reserved byte */ | 155 | __be16 ahslength; /* CDB length - 15, including reserved byte */ |
155 | uint8_t ahstype; | 156 | uint8_t ahstype; |
156 | uint8_t reserved; | 157 | uint8_t reserved; |
157 | uint8_t ecdb[260 - 16]; /* 4-byte aligned extended CDB spillover */ | 158 | /* 4-byte aligned extended CDB spillover */ |
159 | uint8_t ecdb[260 - ISCSI_CDB_SIZE]; | ||
158 | }; | 160 | }; |
159 | 161 | ||
160 | /* SCSI Response Header */ | 162 | /* SCSI Response Header */ |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 39e1cac24bb7..98724ba65a79 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -677,4 +677,6 @@ extern void sas_ssp_task_response(struct device *dev, struct sas_task *task, | |||
677 | struct ssp_response_iu *iu); | 677 | struct ssp_response_iu *iu); |
678 | struct sas_phy *sas_find_local_phy(struct domain_device *dev); | 678 | struct sas_phy *sas_find_local_phy(struct domain_device *dev); |
679 | 679 | ||
680 | int sas_request_addr(struct Scsi_Host *shost, u8 *addr); | ||
681 | |||
680 | #endif /* _SASLIB_H_ */ | 682 | #endif /* _SASLIB_H_ */ |
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index dd5edc915417..c583193ae929 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h | |||
@@ -47,12 +47,12 @@ static inline int dev_is_sata(struct domain_device *dev) | |||
47 | { | 47 | { |
48 | return 0; | 48 | return 0; |
49 | } | 49 | } |
50 | int sas_ata_init_host_and_port(struct domain_device *found_dev, | 50 | static inline int sas_ata_init_host_and_port(struct domain_device *found_dev, |
51 | struct scsi_target *starget) | 51 | struct scsi_target *starget) |
52 | { | 52 | { |
53 | return 0; | 53 | return 0; |
54 | } | 54 | } |
55 | void sas_ata_task_abort(struct sas_task *task) | 55 | static inline void sas_ata_task_abort(struct sas_task *task) |
56 | { | 56 | { |
57 | } | 57 | } |
58 | #endif | 58 | #endif |
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index de28aab820b0..8d20e60a94b7 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h | |||
@@ -130,6 +130,9 @@ extern void scsi_release_buffers(struct scsi_cmnd *cmd); | |||
130 | extern int scsi_dma_map(struct scsi_cmnd *cmd); | 130 | extern int scsi_dma_map(struct scsi_cmnd *cmd); |
131 | extern void scsi_dma_unmap(struct scsi_cmnd *cmd); | 131 | extern void scsi_dma_unmap(struct scsi_cmnd *cmd); |
132 | 132 | ||
133 | struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask); | ||
134 | void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd); | ||
135 | |||
133 | static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd) | 136 | static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd) |
134 | { | 137 | { |
135 | return cmd->sdb.table.nents; | 138 | return cmd->sdb.table.nents; |
@@ -175,4 +178,18 @@ static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd) | |||
175 | return &cmd->sdb; | 178 | return &cmd->sdb; |
176 | } | 179 | } |
177 | 180 | ||
181 | static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd, | ||
182 | void *buf, int buflen) | ||
183 | { | ||
184 | return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), | ||
185 | buf, buflen); | ||
186 | } | ||
187 | |||
188 | static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd, | ||
189 | void *buf, int buflen) | ||
190 | { | ||
191 | return sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), | ||
192 | buf, buflen); | ||
193 | } | ||
194 | |||
178 | #endif /* _SCSI_SCSI_CMND_H */ | 195 | #endif /* _SCSI_SCSI_CMND_H */ |
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index 25071d5d9bf8..d3a133b4a072 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h | |||
@@ -57,13 +57,16 @@ extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, | |||
57 | 57 | ||
58 | extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, | 58 | extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, |
59 | u64 * info_out); | 59 | u64 * info_out); |
60 | 60 | ||
61 | extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); | ||
62 | |||
61 | /* | 63 | /* |
62 | * Reset request from external source | 64 | * Reset request from external source |
63 | */ | 65 | */ |
64 | #define SCSI_TRY_RESET_DEVICE 1 | 66 | #define SCSI_TRY_RESET_DEVICE 1 |
65 | #define SCSI_TRY_RESET_BUS 2 | 67 | #define SCSI_TRY_RESET_BUS 2 |
66 | #define SCSI_TRY_RESET_HOST 3 | 68 | #define SCSI_TRY_RESET_HOST 3 |
69 | #define SCSI_TRY_RESET_TARGET 4 | ||
67 | 70 | ||
68 | extern int scsi_reset_provider(struct scsi_device *, int); | 71 | extern int scsi_reset_provider(struct scsi_device *, int); |
69 | 72 | ||
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 530ff4c553f8..49132862bfaa 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -172,6 +172,7 @@ struct scsi_host_template { | |||
172 | */ | 172 | */ |
173 | int (* eh_abort_handler)(struct scsi_cmnd *); | 173 | int (* eh_abort_handler)(struct scsi_cmnd *); |
174 | int (* eh_device_reset_handler)(struct scsi_cmnd *); | 174 | int (* eh_device_reset_handler)(struct scsi_cmnd *); |
175 | int (* eh_target_reset_handler)(struct scsi_cmnd *); | ||
175 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); | 176 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); |
176 | int (* eh_host_reset_handler)(struct scsi_cmnd *); | 177 | int (* eh_host_reset_handler)(struct scsi_cmnd *); |
177 | 178 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index acca4901046c..b80c21100d78 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/scatterlist.h> | 10 | #include <linux/scatterlist.h> |
11 | #include <linux/highmem.h> | ||
11 | 12 | ||
12 | /** | 13 | /** |
13 | * sg_next - return the next scatterlist entry in a list | 14 | * sg_next - return the next scatterlist entry in a list |
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
292 | return ret; | 293 | return ret; |
293 | } | 294 | } |
294 | EXPORT_SYMBOL(sg_alloc_table); | 295 | EXPORT_SYMBOL(sg_alloc_table); |
296 | |||
297 | /** | ||
298 | * sg_copy_buffer - Copy data between a linear buffer and an SG list | ||
299 | * @sgl: The SG list | ||
300 | * @nents: Number of SG entries | ||
301 | * @buf: Where to copy from | ||
302 | * @buflen: The number of bytes to copy | ||
303 | * @to_buffer: transfer direction (non zero == from an sg list to a | ||
304 | * buffer, 0 == from a buffer to an sg list | ||
305 | * | ||
306 | * Returns the number of copied bytes. | ||
307 | * | ||
308 | **/ | ||
309 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | ||
310 | void *buf, size_t buflen, int to_buffer) | ||
311 | { | ||
312 | struct scatterlist *sg; | ||
313 | size_t buf_off = 0; | ||
314 | int i; | ||
315 | |||
316 | WARN_ON(!irqs_disabled()); | ||
317 | |||
318 | for_each_sg(sgl, sg, nents, i) { | ||
319 | struct page *page; | ||
320 | int n = 0; | ||
321 | unsigned int sg_off = sg->offset; | ||
322 | unsigned int sg_copy = sg->length; | ||
323 | |||
324 | if (sg_copy > buflen) | ||
325 | sg_copy = buflen; | ||
326 | buflen -= sg_copy; | ||
327 | |||
328 | while (sg_copy > 0) { | ||
329 | unsigned int page_copy; | ||
330 | void *p; | ||
331 | |||
332 | page_copy = PAGE_SIZE - sg_off; | ||
333 | if (page_copy > sg_copy) | ||
334 | page_copy = sg_copy; | ||
335 | |||
336 | page = nth_page(sg_page(sg), n); | ||
337 | p = kmap_atomic(page, KM_BIO_SRC_IRQ); | ||
338 | |||
339 | if (to_buffer) | ||
340 | memcpy(buf + buf_off, p + sg_off, page_copy); | ||
341 | else { | ||
342 | memcpy(p + sg_off, buf + buf_off, page_copy); | ||
343 | flush_kernel_dcache_page(page); | ||
344 | } | ||
345 | |||
346 | kunmap_atomic(p, KM_BIO_SRC_IRQ); | ||
347 | |||
348 | buf_off += page_copy; | ||
349 | sg_off += page_copy; | ||
350 | if (sg_off == PAGE_SIZE) { | ||
351 | sg_off = 0; | ||
352 | n++; | ||
353 | } | ||
354 | sg_copy -= page_copy; | ||
355 | } | ||
356 | |||
357 | if (!buflen) | ||
358 | break; | ||
359 | } | ||
360 | |||
361 | return buf_off; | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * sg_copy_from_buffer - Copy from a linear buffer to an SG list | ||
366 | * @sgl: The SG list | ||
367 | * @nents: Number of SG entries | ||
368 | * @buf: Where to copy from | ||
369 | * @buflen: The number of bytes to copy | ||
370 | * | ||
371 | * Returns the number of copied bytes. | ||
372 | * | ||
373 | **/ | ||
374 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | ||
375 | void *buf, size_t buflen) | ||
376 | { | ||
377 | return sg_copy_buffer(sgl, nents, buf, buflen, 0); | ||
378 | } | ||
379 | EXPORT_SYMBOL(sg_copy_from_buffer); | ||
380 | |||
381 | /** | ||
382 | * sg_copy_to_buffer - Copy from an SG list to a linear buffer | ||
383 | * @sgl: The SG list | ||
384 | * @nents: Number of SG entries | ||
385 | * @buf: Where to copy to | ||
386 | * @buflen: The number of bytes to copy | ||
387 | * | ||
388 | * Returns the number of copied bytes. | ||
389 | * | ||
390 | **/ | ||
391 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
392 | void *buf, size_t buflen) | ||
393 | { | ||
394 | return sg_copy_buffer(sgl, nents, buf, buflen, 1); | ||
395 | } | ||
396 | EXPORT_SYMBOL(sg_copy_to_buffer); | ||