diff options
Diffstat (limited to 'drivers')
41 files changed, 1174 insertions, 588 deletions
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 20e0b447e8e8..55ff25244af4 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -3518,7 +3518,7 @@ retry_page: | |||
3518 | } else | 3518 | } else |
3519 | mptsas_volume_delete(ioc, sas_info->fw.id); | 3519 | mptsas_volume_delete(ioc, sas_info->fw.id); |
3520 | } | 3520 | } |
3521 | mutex_lock(&ioc->sas_device_info_mutex); | 3521 | mutex_unlock(&ioc->sas_device_info_mutex); |
3522 | 3522 | ||
3523 | /* expanders */ | 3523 | /* expanders */ |
3524 | mutex_lock(&ioc->sas_topology_mutex); | 3524 | mutex_lock(&ioc->sas_topology_mutex); |
@@ -3549,7 +3549,7 @@ retry_page: | |||
3549 | goto redo_expander_scan; | 3549 | goto redo_expander_scan; |
3550 | } | 3550 | } |
3551 | } | 3551 | } |
3552 | mutex_lock(&ioc->sas_topology_mutex); | 3552 | mutex_unlock(&ioc->sas_topology_mutex); |
3553 | } | 3553 | } |
3554 | 3554 | ||
3555 | /** | 3555 | /** |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 44f77eb1180f..4d1515f45ba2 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -25,8 +25,6 @@ | |||
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/ethtool.h> | 26 | #include <linux/ethtool.h> |
27 | #include <linux/if_vlan.h> | 27 | #include <linux/if_vlan.h> |
28 | #include <linux/module.h> | ||
29 | |||
30 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 28 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
31 | #define BCM_VLAN 1 | 29 | #define BCM_VLAN 1 |
32 | #endif | 30 | #endif |
@@ -2521,9 +2519,9 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) | |||
2521 | struct cnic_dev *cdev; | 2519 | struct cnic_dev *cdev; |
2522 | struct cnic_local *cp; | 2520 | struct cnic_local *cp; |
2523 | struct cnic_eth_dev *ethdev = NULL; | 2521 | struct cnic_eth_dev *ethdev = NULL; |
2524 | struct cnic_eth_dev *(*probe)(void *) = NULL; | 2522 | struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; |
2525 | 2523 | ||
2526 | probe = __symbol_get("bnx2_cnic_probe"); | 2524 | probe = symbol_get(bnx2_cnic_probe); |
2527 | if (probe) { | 2525 | if (probe) { |
2528 | ethdev = (*probe)(dev); | 2526 | ethdev = (*probe)(dev); |
2529 | symbol_put_addr(probe); | 2527 | symbol_put_addr(probe); |
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 06380963a34e..d1bce27ee99e 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h | |||
@@ -296,4 +296,6 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); | |||
296 | 296 | ||
297 | extern int cnic_unregister_driver(int ulp_type); | 297 | extern int cnic_unregister_driver(int ulp_type); |
298 | 298 | ||
299 | extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); | ||
300 | |||
299 | #endif | 301 | #endif |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 6a19ed9a1194..9c23122f755f 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -258,10 +258,21 @@ config SCSI_SCAN_ASYNC | |||
258 | or async on the kernel's command line. | 258 | or async on the kernel's command line. |
259 | 259 | ||
260 | config SCSI_WAIT_SCAN | 260 | config SCSI_WAIT_SCAN |
261 | tristate | 261 | tristate # No prompt here, this is an invisible symbol. |
262 | default m | 262 | default m |
263 | depends on SCSI | 263 | depends on SCSI |
264 | depends on MODULES | 264 | depends on MODULES |
265 | # scsi_wait_scan is a loadable module which waits until all the async scans are | ||
266 | # complete. The idea is to use it in initrd/ initramfs scripts. You modprobe | ||
267 | # it after all the modprobes of the root SCSI drivers and it will wait until | ||
268 | # they have all finished scanning their buses before allowing the boot to | ||
269 | # proceed. (This method is not applicable if targets boot independently in | ||
270 | # parallel with the initiator, or with transports with non-deterministic target | ||
271 | # discovery schemes, or if a transport driver does not support scsi_wait_scan.) | ||
272 | # | ||
273 | # This symbol is not exposed as a prompt because little is to be gained by | ||
274 | # disabling it, whereas people who accidentally switch it off may wonder why | ||
275 | # their mkinitrd gets into trouble. | ||
265 | 276 | ||
266 | menu "SCSI Transports" | 277 | menu "SCSI Transports" |
267 | depends on SCSI | 278 | depends on SCSI |
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index b62b482e55e7..1e9f7141102b 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig | |||
@@ -1,6 +1,8 @@ | |||
1 | config SCSI_BNX2_ISCSI | 1 | config SCSI_BNX2_ISCSI |
2 | tristate "Broadcom NetXtreme II iSCSI support" | 2 | tristate "Broadcom NetXtreme II iSCSI support" |
3 | select SCSI_ISCSI_ATTRS | 3 | select SCSI_ISCSI_ATTRS |
4 | select NETDEVICES | ||
5 | select NETDEV_1000 | ||
4 | select CNIC | 6 | select CNIC |
5 | depends on PCI | 7 | depends on PCI |
6 | ---help--- | 8 | ---help--- |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c index 99c912547902..344fd53b9954 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c | |||
@@ -206,6 +206,31 @@ int cxgb3i_ddp_find_page_index(unsigned long pgsz) | |||
206 | return DDP_PGIDX_MAX; | 206 | return DDP_PGIDX_MAX; |
207 | } | 207 | } |
208 | 208 | ||
209 | /** | ||
210 | * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE | ||
211 | * return the ddp page index, if no match is found return DDP_PGIDX_MAX. | ||
212 | */ | ||
213 | int cxgb3i_ddp_adjust_page_table(void) | ||
214 | { | ||
215 | int i; | ||
216 | unsigned int base_order, order; | ||
217 | |||
218 | if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { | ||
219 | ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n", | ||
220 | PAGE_SIZE, 1UL << ddp_page_shift[0]); | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | |||
224 | base_order = get_order(1UL << ddp_page_shift[0]); | ||
225 | order = get_order(1 << PAGE_SHIFT); | ||
226 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | ||
227 | /* first is the kernel page size, then just doubling the size */ | ||
228 | ddp_page_order[i] = order - base_order + i; | ||
229 | ddp_page_shift[i] = PAGE_SHIFT + i; | ||
230 | } | ||
231 | return 0; | ||
232 | } | ||
233 | |||
209 | static inline void ddp_gl_unmap(struct pci_dev *pdev, | 234 | static inline void ddp_gl_unmap(struct pci_dev *pdev, |
210 | struct cxgb3i_gather_list *gl) | 235 | struct cxgb3i_gather_list *gl) |
211 | { | 236 | { |
@@ -598,30 +623,40 @@ int cxgb3i_adapter_ddp_info(struct t3cdev *tdev, | |||
598 | * release all the resource held by the ddp pagepod manager for a given | 623 | * release all the resource held by the ddp pagepod manager for a given |
599 | * adapter if needed | 624 | * adapter if needed |
600 | */ | 625 | */ |
601 | void cxgb3i_ddp_cleanup(struct t3cdev *tdev) | 626 | |
627 | static void ddp_cleanup(struct kref *kref) | ||
602 | { | 628 | { |
629 | struct cxgb3i_ddp_info *ddp = container_of(kref, | ||
630 | struct cxgb3i_ddp_info, | ||
631 | refcnt); | ||
603 | int i = 0; | 632 | int i = 0; |
633 | |||
634 | ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev); | ||
635 | |||
636 | ddp->tdev->ulp_iscsi = NULL; | ||
637 | while (i < ddp->nppods) { | ||
638 | struct cxgb3i_gather_list *gl = ddp->gl_map[i]; | ||
639 | if (gl) { | ||
640 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) | ||
641 | >> PPOD_PAGES_SHIFT; | ||
642 | ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", | ||
643 | ddp->tdev, i, npods); | ||
644 | kfree(gl); | ||
645 | ddp_free_gl_skb(ddp, i, npods); | ||
646 | i += npods; | ||
647 | } else | ||
648 | i++; | ||
649 | } | ||
650 | cxgb3i_free_big_mem(ddp); | ||
651 | } | ||
652 | |||
653 | void cxgb3i_ddp_cleanup(struct t3cdev *tdev) | ||
654 | { | ||
604 | struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; | 655 | struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; |
605 | 656 | ||
606 | ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); | 657 | ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); |
607 | 658 | if (ddp) | |
608 | if (ddp) { | 659 | kref_put(&ddp->refcnt, ddp_cleanup); |
609 | tdev->ulp_iscsi = NULL; | ||
610 | while (i < ddp->nppods) { | ||
611 | struct cxgb3i_gather_list *gl = ddp->gl_map[i]; | ||
612 | if (gl) { | ||
613 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) | ||
614 | >> PPOD_PAGES_SHIFT; | ||
615 | ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", | ||
616 | tdev, i, npods); | ||
617 | kfree(gl); | ||
618 | ddp_free_gl_skb(ddp, i, npods); | ||
619 | i += npods; | ||
620 | } else | ||
621 | i++; | ||
622 | } | ||
623 | cxgb3i_free_big_mem(ddp); | ||
624 | } | ||
625 | } | 660 | } |
626 | 661 | ||
627 | /** | 662 | /** |
@@ -631,12 +666,13 @@ void cxgb3i_ddp_cleanup(struct t3cdev *tdev) | |||
631 | */ | 666 | */ |
632 | static void ddp_init(struct t3cdev *tdev) | 667 | static void ddp_init(struct t3cdev *tdev) |
633 | { | 668 | { |
634 | struct cxgb3i_ddp_info *ddp; | 669 | struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; |
635 | struct ulp_iscsi_info uinfo; | 670 | struct ulp_iscsi_info uinfo; |
636 | unsigned int ppmax, bits; | 671 | unsigned int ppmax, bits; |
637 | int i, err; | 672 | int i, err; |
638 | 673 | ||
639 | if (tdev->ulp_iscsi) { | 674 | if (ddp) { |
675 | kref_get(&ddp->refcnt); | ||
640 | ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", | 676 | ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", |
641 | tdev, tdev->ulp_iscsi); | 677 | tdev, tdev->ulp_iscsi); |
642 | return; | 678 | return; |
@@ -670,6 +706,7 @@ static void ddp_init(struct t3cdev *tdev) | |||
670 | ppmax * | 706 | ppmax * |
671 | sizeof(struct cxgb3i_gather_list *)); | 707 | sizeof(struct cxgb3i_gather_list *)); |
672 | spin_lock_init(&ddp->map_lock); | 708 | spin_lock_init(&ddp->map_lock); |
709 | kref_init(&ddp->refcnt); | ||
673 | 710 | ||
674 | ddp->tdev = tdev; | 711 | ddp->tdev = tdev; |
675 | ddp->pdev = uinfo.pdev; | 712 | ddp->pdev = uinfo.pdev; |
@@ -715,6 +752,17 @@ void cxgb3i_ddp_init(struct t3cdev *tdev) | |||
715 | { | 752 | { |
716 | if (page_idx == DDP_PGIDX_MAX) { | 753 | if (page_idx == DDP_PGIDX_MAX) { |
717 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); | 754 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); |
755 | |||
756 | if (page_idx == DDP_PGIDX_MAX) { | ||
757 | ddp_log_info("system PAGE_SIZE %lu, update hw.\n", | ||
758 | PAGE_SIZE); | ||
759 | if (cxgb3i_ddp_adjust_page_table() < 0) { | ||
760 | ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n", | ||
761 | PAGE_SIZE); | ||
762 | return; | ||
763 | } | ||
764 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); | ||
765 | } | ||
718 | ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", | 766 | ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", |
719 | PAGE_SIZE, page_idx); | 767 | PAGE_SIZE, page_idx); |
720 | } | 768 | } |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h index 0d296de7cf32..87dd56b422bf 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h | |||
@@ -54,6 +54,7 @@ struct cxgb3i_gather_list { | |||
54 | * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload | 54 | * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload |
55 | * | 55 | * |
56 | * @list: list head to link elements | 56 | * @list: list head to link elements |
57 | * @refcnt: ref. count | ||
57 | * @tdev: pointer to t3cdev used by cxgb3 driver | 58 | * @tdev: pointer to t3cdev used by cxgb3 driver |
58 | * @max_txsz: max tx packet size for ddp | 59 | * @max_txsz: max tx packet size for ddp |
59 | * @max_rxsz: max rx packet size for ddp | 60 | * @max_rxsz: max rx packet size for ddp |
@@ -70,6 +71,7 @@ struct cxgb3i_gather_list { | |||
70 | */ | 71 | */ |
71 | struct cxgb3i_ddp_info { | 72 | struct cxgb3i_ddp_info { |
72 | struct list_head list; | 73 | struct list_head list; |
74 | struct kref refcnt; | ||
73 | struct t3cdev *tdev; | 75 | struct t3cdev *tdev; |
74 | struct pci_dev *pdev; | 76 | struct pci_dev *pdev; |
75 | unsigned int max_txsz; | 77 | unsigned int max_txsz; |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index c15878e88157..0a5609bb5817 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -45,8 +45,6 @@ | |||
45 | 45 | ||
46 | #include "fcoe.h" | 46 | #include "fcoe.h" |
47 | 47 | ||
48 | static int debug_fcoe; | ||
49 | |||
50 | MODULE_AUTHOR("Open-FCoE.org"); | 48 | MODULE_AUTHOR("Open-FCoE.org"); |
51 | MODULE_DESCRIPTION("FCoE"); | 49 | MODULE_DESCRIPTION("FCoE"); |
52 | MODULE_LICENSE("GPL v2"); | 50 | MODULE_LICENSE("GPL v2"); |
@@ -305,23 +303,22 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
305 | #ifdef NETIF_F_FCOE_CRC | 303 | #ifdef NETIF_F_FCOE_CRC |
306 | if (netdev->features & NETIF_F_FCOE_CRC) { | 304 | if (netdev->features & NETIF_F_FCOE_CRC) { |
307 | lp->crc_offload = 1; | 305 | lp->crc_offload = 1; |
308 | printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n", | 306 | FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); |
309 | netdev->name); | ||
310 | } | 307 | } |
311 | #endif | 308 | #endif |
312 | #ifdef NETIF_F_FSO | 309 | #ifdef NETIF_F_FSO |
313 | if (netdev->features & NETIF_F_FSO) { | 310 | if (netdev->features & NETIF_F_FSO) { |
314 | lp->seq_offload = 1; | 311 | lp->seq_offload = 1; |
315 | lp->lso_max = netdev->gso_max_size; | 312 | lp->lso_max = netdev->gso_max_size; |
316 | printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n", | 313 | FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", |
317 | netdev->name, lp->lso_max); | 314 | lp->lso_max); |
318 | } | 315 | } |
319 | #endif | 316 | #endif |
320 | if (netdev->fcoe_ddp_xid) { | 317 | if (netdev->fcoe_ddp_xid) { |
321 | lp->lro_enabled = 1; | 318 | lp->lro_enabled = 1; |
322 | lp->lro_xid = netdev->fcoe_ddp_xid; | 319 | lp->lro_xid = netdev->fcoe_ddp_xid; |
323 | printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n", | 320 | FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", |
324 | netdev->name, lp->lro_xid); | 321 | lp->lro_xid); |
325 | } | 322 | } |
326 | skb_queue_head_init(&fc->fcoe_pending_queue); | 323 | skb_queue_head_init(&fc->fcoe_pending_queue); |
327 | fc->fcoe_pending_queue_active = 0; | 324 | fc->fcoe_pending_queue_active = 0; |
@@ -407,7 +404,8 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, | |||
407 | /* add the new host to the SCSI-ml */ | 404 | /* add the new host to the SCSI-ml */ |
408 | rc = scsi_add_host(lp->host, dev); | 405 | rc = scsi_add_host(lp->host, dev); |
409 | if (rc) { | 406 | if (rc) { |
410 | FC_DBG("fcoe_shost_config:error on scsi_add_host\n"); | 407 | FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: " |
408 | "error on scsi_add_host\n"); | ||
411 | return rc; | 409 | return rc; |
412 | } | 410 | } |
413 | sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", | 411 | sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", |
@@ -448,8 +446,7 @@ static int fcoe_if_destroy(struct net_device *netdev) | |||
448 | 446 | ||
449 | BUG_ON(!netdev); | 447 | BUG_ON(!netdev); |
450 | 448 | ||
451 | printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n", | 449 | FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); |
452 | netdev->name); | ||
453 | 450 | ||
454 | lp = fcoe_hostlist_lookup(netdev); | 451 | lp = fcoe_hostlist_lookup(netdev); |
455 | if (!lp) | 452 | if (!lp) |
@@ -560,8 +557,7 @@ static int fcoe_if_create(struct net_device *netdev) | |||
560 | 557 | ||
561 | BUG_ON(!netdev); | 558 | BUG_ON(!netdev); |
562 | 559 | ||
563 | printk(KERN_DEBUG "fcoe_if_create:interface on %s\n", | 560 | FCOE_NETDEV_DBG(netdev, "Create Interface\n"); |
564 | netdev->name); | ||
565 | 561 | ||
566 | lp = fcoe_hostlist_lookup(netdev); | 562 | lp = fcoe_hostlist_lookup(netdev); |
567 | if (lp) | 563 | if (lp) |
@@ -570,7 +566,7 @@ static int fcoe_if_create(struct net_device *netdev) | |||
570 | shost = libfc_host_alloc(&fcoe_shost_template, | 566 | shost = libfc_host_alloc(&fcoe_shost_template, |
571 | sizeof(struct fcoe_softc)); | 567 | sizeof(struct fcoe_softc)); |
572 | if (!shost) { | 568 | if (!shost) { |
573 | FC_DBG("Could not allocate host structure\n"); | 569 | FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); |
574 | return -ENOMEM; | 570 | return -ENOMEM; |
575 | } | 571 | } |
576 | lp = shost_priv(shost); | 572 | lp = shost_priv(shost); |
@@ -579,7 +575,8 @@ static int fcoe_if_create(struct net_device *netdev) | |||
579 | /* configure fc_lport, e.g., em */ | 575 | /* configure fc_lport, e.g., em */ |
580 | rc = fcoe_lport_config(lp); | 576 | rc = fcoe_lport_config(lp); |
581 | if (rc) { | 577 | if (rc) { |
582 | FC_DBG("Could not configure lport\n"); | 578 | FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " |
579 | "interface\n"); | ||
583 | goto out_host_put; | 580 | goto out_host_put; |
584 | } | 581 | } |
585 | 582 | ||
@@ -593,28 +590,32 @@ static int fcoe_if_create(struct net_device *netdev) | |||
593 | /* configure lport network properties */ | 590 | /* configure lport network properties */ |
594 | rc = fcoe_netdev_config(lp, netdev); | 591 | rc = fcoe_netdev_config(lp, netdev); |
595 | if (rc) { | 592 | if (rc) { |
596 | FC_DBG("Could not configure netdev for the interface\n"); | 593 | FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " |
594 | "interface\n"); | ||
597 | goto out_netdev_cleanup; | 595 | goto out_netdev_cleanup; |
598 | } | 596 | } |
599 | 597 | ||
600 | /* configure lport scsi host properties */ | 598 | /* configure lport scsi host properties */ |
601 | rc = fcoe_shost_config(lp, shost, &netdev->dev); | 599 | rc = fcoe_shost_config(lp, shost, &netdev->dev); |
602 | if (rc) { | 600 | if (rc) { |
603 | FC_DBG("Could not configure shost for lport\n"); | 601 | FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " |
602 | "interface\n"); | ||
604 | goto out_netdev_cleanup; | 603 | goto out_netdev_cleanup; |
605 | } | 604 | } |
606 | 605 | ||
607 | /* lport exch manager allocation */ | 606 | /* lport exch manager allocation */ |
608 | rc = fcoe_em_config(lp); | 607 | rc = fcoe_em_config(lp); |
609 | if (rc) { | 608 | if (rc) { |
610 | FC_DBG("Could not configure em for lport\n"); | 609 | FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " |
610 | "interface\n"); | ||
611 | goto out_netdev_cleanup; | 611 | goto out_netdev_cleanup; |
612 | } | 612 | } |
613 | 613 | ||
614 | /* Initialize the library */ | 614 | /* Initialize the library */ |
615 | rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); | 615 | rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); |
616 | if (rc) { | 616 | if (rc) { |
617 | FC_DBG("Could not configure libfc for lport!\n"); | 617 | FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " |
618 | "interface\n"); | ||
618 | goto out_lp_destroy; | 619 | goto out_lp_destroy; |
619 | } | 620 | } |
620 | 621 | ||
@@ -653,7 +654,7 @@ static int __init fcoe_if_init(void) | |||
653 | fc_attach_transport(&fcoe_transport_function); | 654 | fc_attach_transport(&fcoe_transport_function); |
654 | 655 | ||
655 | if (!scsi_transport_fcoe_sw) { | 656 | if (!scsi_transport_fcoe_sw) { |
656 | printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n"); | 657 | printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); |
657 | return -ENODEV; | 658 | return -ENODEV; |
658 | } | 659 | } |
659 | 660 | ||
@@ -714,7 +715,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) | |||
714 | unsigned targ_cpu = smp_processor_id(); | 715 | unsigned targ_cpu = smp_processor_id(); |
715 | #endif /* CONFIG_SMP */ | 716 | #endif /* CONFIG_SMP */ |
716 | 717 | ||
717 | printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu); | 718 | FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); |
718 | 719 | ||
719 | /* Prevent any new skbs from being queued for this CPU. */ | 720 | /* Prevent any new skbs from being queued for this CPU. */ |
720 | p = &per_cpu(fcoe_percpu, cpu); | 721 | p = &per_cpu(fcoe_percpu, cpu); |
@@ -736,8 +737,8 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) | |||
736 | p0 = &per_cpu(fcoe_percpu, targ_cpu); | 737 | p0 = &per_cpu(fcoe_percpu, targ_cpu); |
737 | spin_lock_bh(&p0->fcoe_rx_list.lock); | 738 | spin_lock_bh(&p0->fcoe_rx_list.lock); |
738 | if (p0->thread) { | 739 | if (p0->thread) { |
739 | FC_DBG("Moving frames from CPU %d to CPU %d\n", | 740 | FCOE_DBG("Moving frames from CPU %d to CPU %d\n", |
740 | cpu, targ_cpu); | 741 | cpu, targ_cpu); |
741 | 742 | ||
742 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | 743 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) |
743 | __skb_queue_tail(&p0->fcoe_rx_list, skb); | 744 | __skb_queue_tail(&p0->fcoe_rx_list, skb); |
@@ -803,12 +804,12 @@ static int fcoe_cpu_callback(struct notifier_block *nfb, | |||
803 | switch (action) { | 804 | switch (action) { |
804 | case CPU_ONLINE: | 805 | case CPU_ONLINE: |
805 | case CPU_ONLINE_FROZEN: | 806 | case CPU_ONLINE_FROZEN: |
806 | FC_DBG("CPU %x online: Create Rx thread\n", cpu); | 807 | FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); |
807 | fcoe_percpu_thread_create(cpu); | 808 | fcoe_percpu_thread_create(cpu); |
808 | break; | 809 | break; |
809 | case CPU_DEAD: | 810 | case CPU_DEAD: |
810 | case CPU_DEAD_FROZEN: | 811 | case CPU_DEAD_FROZEN: |
811 | FC_DBG("CPU %x offline: Remove Rx thread\n", cpu); | 812 | FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); |
812 | fcoe_percpu_thread_destroy(cpu); | 813 | fcoe_percpu_thread_destroy(cpu); |
813 | break; | 814 | break; |
814 | default: | 815 | default: |
@@ -846,24 +847,21 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
846 | fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); | 847 | fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); |
847 | lp = fc->ctlr.lp; | 848 | lp = fc->ctlr.lp; |
848 | if (unlikely(lp == NULL)) { | 849 | if (unlikely(lp == NULL)) { |
849 | FC_DBG("cannot find hba structure"); | 850 | FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); |
850 | goto err2; | 851 | goto err2; |
851 | } | 852 | } |
852 | if (!lp->link_up) | 853 | if (!lp->link_up) |
853 | goto err2; | 854 | goto err2; |
854 | 855 | ||
855 | if (unlikely(debug_fcoe)) { | 856 | FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p " |
856 | FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p " | 857 | "data:%p tail:%p end:%p sum:%d dev:%s", |
857 | "end:%p sum:%d dev:%s", skb->len, skb->data_len, | 858 | skb->len, skb->data_len, skb->head, skb->data, |
858 | skb->head, skb->data, skb_tail_pointer(skb), | 859 | skb_tail_pointer(skb), skb_end_pointer(skb), |
859 | skb_end_pointer(skb), skb->csum, | 860 | skb->csum, skb->dev ? skb->dev->name : "<NULL>"); |
860 | skb->dev ? skb->dev->name : "<NULL>"); | ||
861 | |||
862 | } | ||
863 | 861 | ||
864 | /* check for FCOE packet type */ | 862 | /* check for FCOE packet type */ |
865 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { | 863 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { |
866 | FC_DBG("wrong FC type frame"); | 864 | FCOE_NETDEV_DBG(dev, "Wrong FC type frame"); |
867 | goto err; | 865 | goto err; |
868 | } | 866 | } |
869 | 867 | ||
@@ -901,8 +899,9 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
901 | * the first CPU now. For non-SMP systems this | 899 | * the first CPU now. For non-SMP systems this |
902 | * will check the same CPU twice. | 900 | * will check the same CPU twice. |
903 | */ | 901 | */ |
904 | FC_DBG("CPU is online, but no receive thread ready " | 902 | FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread " |
905 | "for incoming skb- using first online CPU.\n"); | 903 | "ready for incoming skb- using first online " |
904 | "CPU.\n"); | ||
906 | 905 | ||
907 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 906 | spin_unlock_bh(&fps->fcoe_rx_list.lock); |
908 | cpu = first_cpu(cpu_online_map); | 907 | cpu = first_cpu(cpu_online_map); |
@@ -1201,19 +1200,17 @@ int fcoe_percpu_receive_thread(void *arg) | |||
1201 | fr = fcoe_dev_from_skb(skb); | 1200 | fr = fcoe_dev_from_skb(skb); |
1202 | lp = fr->fr_dev; | 1201 | lp = fr->fr_dev; |
1203 | if (unlikely(lp == NULL)) { | 1202 | if (unlikely(lp == NULL)) { |
1204 | FC_DBG("invalid HBA Structure"); | 1203 | FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure"); |
1205 | kfree_skb(skb); | 1204 | kfree_skb(skb); |
1206 | continue; | 1205 | continue; |
1207 | } | 1206 | } |
1208 | 1207 | ||
1209 | if (unlikely(debug_fcoe)) { | 1208 | FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " |
1210 | FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p " | 1209 | "head:%p data:%p tail:%p end:%p sum:%d dev:%s", |
1211 | "tail:%p end:%p sum:%d dev:%s", | 1210 | skb->len, skb->data_len, |
1212 | skb->len, skb->data_len, | 1211 | skb->head, skb->data, skb_tail_pointer(skb), |
1213 | skb->head, skb->data, skb_tail_pointer(skb), | 1212 | skb_end_pointer(skb), skb->csum, |
1214 | skb_end_pointer(skb), skb->csum, | 1213 | skb->dev ? skb->dev->name : "<NULL>"); |
1215 | skb->dev ? skb->dev->name : "<NULL>"); | ||
1216 | } | ||
1217 | 1214 | ||
1218 | /* | 1215 | /* |
1219 | * Save source MAC address before discarding header. | 1216 | * Save source MAC address before discarding header. |
@@ -1233,7 +1230,7 @@ int fcoe_percpu_receive_thread(void *arg) | |||
1233 | stats = fc_lport_get_stats(lp); | 1230 | stats = fc_lport_get_stats(lp); |
1234 | if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { | 1231 | if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { |
1235 | if (stats->ErrorFrames < 5) | 1232 | if (stats->ErrorFrames < 5) |
1236 | printk(KERN_WARNING "FCoE version " | 1233 | printk(KERN_WARNING "fcoe: FCoE version " |
1237 | "mismatch: The frame has " | 1234 | "mismatch: The frame has " |
1238 | "version %x, but the " | 1235 | "version %x, but the " |
1239 | "initiator supports version " | 1236 | "initiator supports version " |
@@ -1286,7 +1283,7 @@ int fcoe_percpu_receive_thread(void *arg) | |||
1286 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { | 1283 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { |
1287 | if (le32_to_cpu(fr_crc(fp)) != | 1284 | if (le32_to_cpu(fr_crc(fp)) != |
1288 | ~crc32(~0, skb->data, fr_len)) { | 1285 | ~crc32(~0, skb->data, fr_len)) { |
1289 | if (debug_fcoe || stats->InvalidCRCCount < 5) | 1286 | if (stats->InvalidCRCCount < 5) |
1290 | printk(KERN_WARNING "fcoe: dropping " | 1287 | printk(KERN_WARNING "fcoe: dropping " |
1291 | "frame with CRC error\n"); | 1288 | "frame with CRC error\n"); |
1292 | stats->InvalidCRCCount++; | 1289 | stats->InvalidCRCCount++; |
@@ -1432,7 +1429,8 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
1432 | case NETDEV_REGISTER: | 1429 | case NETDEV_REGISTER: |
1433 | break; | 1430 | break; |
1434 | default: | 1431 | default: |
1435 | FC_DBG("Unknown event %ld from netdev netlink\n", event); | 1432 | FCOE_NETDEV_DBG(real_dev, "Unknown event %ld " |
1433 | "from netdev netlink\n", event); | ||
1436 | } | 1434 | } |
1437 | if (link_possible && !fcoe_link_ok(lp)) | 1435 | if (link_possible && !fcoe_link_ok(lp)) |
1438 | fcoe_ctlr_link_up(&fc->ctlr); | 1436 | fcoe_ctlr_link_up(&fc->ctlr); |
@@ -1505,8 +1503,8 @@ static int fcoe_ethdrv_get(const struct net_device *netdev) | |||
1505 | 1503 | ||
1506 | owner = fcoe_netdev_to_module_owner(netdev); | 1504 | owner = fcoe_netdev_to_module_owner(netdev); |
1507 | if (owner) { | 1505 | if (owner) { |
1508 | printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n", | 1506 | FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n", |
1509 | module_name(owner), netdev->name); | 1507 | module_name(owner)); |
1510 | return try_module_get(owner); | 1508 | return try_module_get(owner); |
1511 | } | 1509 | } |
1512 | return -ENODEV; | 1510 | return -ENODEV; |
@@ -1527,8 +1525,8 @@ static int fcoe_ethdrv_put(const struct net_device *netdev) | |||
1527 | 1525 | ||
1528 | owner = fcoe_netdev_to_module_owner(netdev); | 1526 | owner = fcoe_netdev_to_module_owner(netdev); |
1529 | if (owner) { | 1527 | if (owner) { |
1530 | printk(KERN_DEBUG "fcoe:release driver module %s for %s\n", | 1528 | FCOE_NETDEV_DBG(netdev, "Release driver module %s\n", |
1531 | module_name(owner), netdev->name); | 1529 | module_name(owner)); |
1532 | module_put(owner); | 1530 | module_put(owner); |
1533 | return 0; | 1531 | return 0; |
1534 | } | 1532 | } |
@@ -1559,7 +1557,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) | |||
1559 | } | 1557 | } |
1560 | rc = fcoe_if_destroy(netdev); | 1558 | rc = fcoe_if_destroy(netdev); |
1561 | if (rc) { | 1559 | if (rc) { |
1562 | printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n", | 1560 | printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n", |
1563 | netdev->name); | 1561 | netdev->name); |
1564 | rc = -EIO; | 1562 | rc = -EIO; |
1565 | goto out_putdev; | 1563 | goto out_putdev; |
@@ -1598,7 +1596,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) | |||
1598 | 1596 | ||
1599 | rc = fcoe_if_create(netdev); | 1597 | rc = fcoe_if_create(netdev); |
1600 | if (rc) { | 1598 | if (rc) { |
1601 | printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n", | 1599 | printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", |
1602 | netdev->name); | 1600 | netdev->name); |
1603 | fcoe_ethdrv_put(netdev); | 1601 | fcoe_ethdrv_put(netdev); |
1604 | rc = -EIO; | 1602 | rc = -EIO; |
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index a1eb8c1988b0..0d724fa0898f 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h | |||
@@ -40,6 +40,30 @@ | |||
40 | #define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ | 40 | #define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ |
41 | #define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ | 41 | #define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ |
42 | 42 | ||
43 | unsigned int fcoe_debug_logging; | ||
44 | module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); | ||
45 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); | ||
46 | |||
47 | #define FCOE_LOGGING 0x01 /* General logging, not categorized */ | ||
48 | #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ | ||
49 | |||
50 | #define FCOE_CHECK_LOGGING(LEVEL, CMD) \ | ||
51 | do { \ | ||
52 | if (unlikely(fcoe_debug_logging & LEVEL)) \ | ||
53 | do { \ | ||
54 | CMD; \ | ||
55 | } while (0); \ | ||
56 | } while (0); | ||
57 | |||
58 | #define FCOE_DBG(fmt, args...) \ | ||
59 | FCOE_CHECK_LOGGING(FCOE_LOGGING, \ | ||
60 | printk(KERN_INFO "fcoe: " fmt, ##args);) | ||
61 | |||
62 | #define FCOE_NETDEV_DBG(netdev, fmt, args...) \ | ||
63 | FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \ | ||
64 | printk(KERN_INFO "fcoe: %s" fmt, \ | ||
65 | netdev->name, ##args);) | ||
66 | |||
43 | /* | 67 | /* |
44 | * this percpu struct for fcoe | 68 | * this percpu struct for fcoe |
45 | */ | 69 | */ |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 2f5bc7fd3fa9..f544340d318b 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -56,15 +56,28 @@ static void fcoe_ctlr_recv_work(struct work_struct *); | |||
56 | 56 | ||
57 | static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; | 57 | static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; |
58 | 58 | ||
59 | static u32 fcoe_ctlr_debug; /* 1 for basic, 2 for noisy debug */ | 59 | unsigned int libfcoe_debug_logging; |
60 | module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); | ||
61 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); | ||
60 | 62 | ||
61 | #define FIP_DBG_LVL(level, fmt, args...) \ | 63 | #define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ |
64 | #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ | ||
65 | |||
66 | #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ | ||
67 | do { \ | ||
68 | if (unlikely(libfcoe_debug_logging & LEVEL)) \ | ||
62 | do { \ | 69 | do { \ |
63 | if (fcoe_ctlr_debug >= (level)) \ | 70 | CMD; \ |
64 | FC_DBG(fmt, ##args); \ | 71 | } while (0); \ |
65 | } while (0) | 72 | } while (0); |
73 | |||
74 | #define LIBFCOE_DBG(fmt, args...) \ | ||
75 | LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ | ||
76 | printk(KERN_INFO "libfcoe: " fmt, ##args);) | ||
66 | 77 | ||
67 | #define FIP_DBG(fmt, args...) FIP_DBG_LVL(1, fmt, ##args) | 78 | #define LIBFCOE_FIP_DBG(fmt, args...) \ |
79 | LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ | ||
80 | printk(KERN_INFO "fip: " fmt, ##args);) | ||
68 | 81 | ||
69 | /* | 82 | /* |
70 | * Return non-zero if FCF fcoe_size has been validated. | 83 | * Return non-zero if FCF fcoe_size has been validated. |
@@ -243,7 +256,7 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) | |||
243 | fip->last_link = 1; | 256 | fip->last_link = 1; |
244 | fip->link = 1; | 257 | fip->link = 1; |
245 | spin_unlock_bh(&fip->lock); | 258 | spin_unlock_bh(&fip->lock); |
246 | FIP_DBG("%s", "setting AUTO mode.\n"); | 259 | LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n"); |
247 | fc_linkup(fip->lp); | 260 | fc_linkup(fip->lp); |
248 | fcoe_ctlr_solicit(fip, NULL); | 261 | fcoe_ctlr_solicit(fip, NULL); |
249 | } else | 262 | } else |
@@ -614,7 +627,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
614 | ((struct fip_mac_desc *)desc)->fd_mac, | 627 | ((struct fip_mac_desc *)desc)->fd_mac, |
615 | ETH_ALEN); | 628 | ETH_ALEN); |
616 | if (!is_valid_ether_addr(fcf->fcf_mac)) { | 629 | if (!is_valid_ether_addr(fcf->fcf_mac)) { |
617 | FIP_DBG("invalid MAC addr in FIP adv\n"); | 630 | LIBFCOE_FIP_DBG("Invalid MAC address " |
631 | "in FIP adv\n"); | ||
618 | return -EINVAL; | 632 | return -EINVAL; |
619 | } | 633 | } |
620 | break; | 634 | break; |
@@ -647,8 +661,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
647 | case FIP_DT_LOGO: | 661 | case FIP_DT_LOGO: |
648 | case FIP_DT_ELP: | 662 | case FIP_DT_ELP: |
649 | default: | 663 | default: |
650 | FIP_DBG("unexpected descriptor type %x in FIP adv\n", | 664 | LIBFCOE_FIP_DBG("unexpected descriptor type %x " |
651 | desc->fip_dtype); | 665 | "in FIP adv\n", desc->fip_dtype); |
652 | /* standard says ignore unknown descriptors >= 128 */ | 666 | /* standard says ignore unknown descriptors >= 128 */ |
653 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) | 667 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) |
654 | return -EINVAL; | 668 | return -EINVAL; |
@@ -664,8 +678,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
664 | return 0; | 678 | return 0; |
665 | 679 | ||
666 | len_err: | 680 | len_err: |
667 | FIP_DBG("FIP length error in descriptor type %x len %zu\n", | 681 | LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", |
668 | desc->fip_dtype, dlen); | 682 | desc->fip_dtype, dlen); |
669 | return -EINVAL; | 683 | return -EINVAL; |
670 | } | 684 | } |
671 | 685 | ||
@@ -728,9 +742,10 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
728 | } | 742 | } |
729 | mtu_valid = fcoe_ctlr_mtu_valid(fcf); | 743 | mtu_valid = fcoe_ctlr_mtu_valid(fcf); |
730 | fcf->time = jiffies; | 744 | fcf->time = jiffies; |
731 | FIP_DBG_LVL(found ? 2 : 1, "%s FCF for fab %llx map %x val %d\n", | 745 | if (!found) { |
732 | found ? "old" : "new", | 746 | LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n", |
733 | fcf->fabric_name, fcf->fc_map, mtu_valid); | 747 | fcf->fabric_name, fcf->fc_map, mtu_valid); |
748 | } | ||
734 | 749 | ||
735 | /* | 750 | /* |
736 | * If this advertisement is not solicited and our max receive size | 751 | * If this advertisement is not solicited and our max receive size |
@@ -807,7 +822,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
807 | ((struct fip_mac_desc *)desc)->fd_mac, | 822 | ((struct fip_mac_desc *)desc)->fd_mac, |
808 | ETH_ALEN); | 823 | ETH_ALEN); |
809 | if (!is_valid_ether_addr(granted_mac)) { | 824 | if (!is_valid_ether_addr(granted_mac)) { |
810 | FIP_DBG("invalid MAC addrs in FIP ELS\n"); | 825 | LIBFCOE_FIP_DBG("Invalid MAC address " |
826 | "in FIP ELS\n"); | ||
811 | goto drop; | 827 | goto drop; |
812 | } | 828 | } |
813 | break; | 829 | break; |
@@ -825,8 +841,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
825 | els_dtype = desc->fip_dtype; | 841 | els_dtype = desc->fip_dtype; |
826 | break; | 842 | break; |
827 | default: | 843 | default: |
828 | FIP_DBG("unexpected descriptor type %x " | 844 | LIBFCOE_FIP_DBG("unexpected descriptor type %x " |
829 | "in FIP adv\n", desc->fip_dtype); | 845 | "in FIP adv\n", desc->fip_dtype); |
830 | /* standard says ignore unknown descriptors >= 128 */ | 846 | /* standard says ignore unknown descriptors >= 128 */ |
831 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) | 847 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) |
832 | goto drop; | 848 | goto drop; |
@@ -867,8 +883,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
867 | return; | 883 | return; |
868 | 884 | ||
869 | len_err: | 885 | len_err: |
870 | FIP_DBG("FIP length error in descriptor type %x len %zu\n", | 886 | LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", |
871 | desc->fip_dtype, dlen); | 887 | desc->fip_dtype, dlen); |
872 | drop: | 888 | drop: |
873 | kfree_skb(skb); | 889 | kfree_skb(skb); |
874 | } | 890 | } |
@@ -894,7 +910,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
894 | struct fc_lport *lp = fip->lp; | 910 | struct fc_lport *lp = fip->lp; |
895 | u32 desc_mask; | 911 | u32 desc_mask; |
896 | 912 | ||
897 | FIP_DBG("Clear Virtual Link received\n"); | 913 | LIBFCOE_FIP_DBG("Clear Virtual Link received\n"); |
898 | if (!fcf) | 914 | if (!fcf) |
899 | return; | 915 | return; |
900 | if (!fcf || !fc_host_port_id(lp->host)) | 916 | if (!fcf || !fc_host_port_id(lp->host)) |
@@ -952,9 +968,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
952 | * reset only if all required descriptors were present and valid. | 968 | * reset only if all required descriptors were present and valid. |
953 | */ | 969 | */ |
954 | if (desc_mask) { | 970 | if (desc_mask) { |
955 | FIP_DBG("missing descriptors mask %x\n", desc_mask); | 971 | LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask); |
956 | } else { | 972 | } else { |
957 | FIP_DBG("performing Clear Virtual Link\n"); | 973 | LIBFCOE_FIP_DBG("performing Clear Virtual Link\n"); |
958 | fcoe_ctlr_reset(fip, FIP_ST_ENABLED); | 974 | fcoe_ctlr_reset(fip, FIP_ST_ENABLED); |
959 | } | 975 | } |
960 | } | 976 | } |
@@ -1002,10 +1018,6 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1002 | op = ntohs(fiph->fip_op); | 1018 | op = ntohs(fiph->fip_op); |
1003 | sub = fiph->fip_subcode; | 1019 | sub = fiph->fip_subcode; |
1004 | 1020 | ||
1005 | FIP_DBG_LVL(2, "ver %x op %x/%x dl %x fl %x\n", | ||
1006 | FIP_VER_DECAPS(fiph->fip_ver), op, sub, | ||
1007 | ntohs(fiph->fip_dl_len), ntohs(fiph->fip_flags)); | ||
1008 | |||
1009 | if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) | 1021 | if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) |
1010 | goto drop; | 1022 | goto drop; |
1011 | if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) | 1023 | if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) |
@@ -1017,7 +1029,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1017 | fip->map_dest = 0; | 1029 | fip->map_dest = 0; |
1018 | fip->state = FIP_ST_ENABLED; | 1030 | fip->state = FIP_ST_ENABLED; |
1019 | state = FIP_ST_ENABLED; | 1031 | state = FIP_ST_ENABLED; |
1020 | FIP_DBG("using FIP mode\n"); | 1032 | LIBFCOE_FIP_DBG("Using FIP mode\n"); |
1021 | } | 1033 | } |
1022 | spin_unlock_bh(&fip->lock); | 1034 | spin_unlock_bh(&fip->lock); |
1023 | if (state != FIP_ST_ENABLED) | 1035 | if (state != FIP_ST_ENABLED) |
@@ -1052,14 +1064,15 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
1052 | struct fcoe_fcf *best = NULL; | 1064 | struct fcoe_fcf *best = NULL; |
1053 | 1065 | ||
1054 | list_for_each_entry(fcf, &fip->fcfs, list) { | 1066 | list_for_each_entry(fcf, &fip->fcfs, list) { |
1055 | FIP_DBG("consider FCF for fab %llx VFID %d map %x val %d\n", | 1067 | LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x " |
1056 | fcf->fabric_name, fcf->vfid, | 1068 | "val %d\n", fcf->fabric_name, fcf->vfid, |
1057 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); | 1069 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); |
1058 | if (!fcoe_ctlr_fcf_usable(fcf)) { | 1070 | if (!fcoe_ctlr_fcf_usable(fcf)) { |
1059 | FIP_DBG("FCF for fab %llx map %x %svalid %savailable\n", | 1071 | LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid " |
1060 | fcf->fabric_name, fcf->fc_map, | 1072 | "%savailable\n", fcf->fabric_name, |
1061 | (fcf->flags & FIP_FL_SOL) ? "" : "in", | 1073 | fcf->fc_map, (fcf->flags & FIP_FL_SOL) |
1062 | (fcf->flags & FIP_FL_AVAIL) ? "" : "un"); | 1074 | ? "" : "in", (fcf->flags & FIP_FL_AVAIL) |
1075 | ? "" : "un"); | ||
1063 | continue; | 1076 | continue; |
1064 | } | 1077 | } |
1065 | if (!best) { | 1078 | if (!best) { |
@@ -1069,7 +1082,8 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
1069 | if (fcf->fabric_name != best->fabric_name || | 1082 | if (fcf->fabric_name != best->fabric_name || |
1070 | fcf->vfid != best->vfid || | 1083 | fcf->vfid != best->vfid || |
1071 | fcf->fc_map != best->fc_map) { | 1084 | fcf->fc_map != best->fc_map) { |
1072 | FIP_DBG("conflicting fabric, VFID, or FC-MAP\n"); | 1085 | LIBFCOE_FIP_DBG("Conflicting fabric, VFID, " |
1086 | "or FC-MAP\n"); | ||
1073 | return; | 1087 | return; |
1074 | } | 1088 | } |
1075 | if (fcf->pri < best->pri) | 1089 | if (fcf->pri < best->pri) |
@@ -1113,7 +1127,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
1113 | if (sel != fcf) { | 1127 | if (sel != fcf) { |
1114 | fcf = sel; /* the old FCF may have been freed */ | 1128 | fcf = sel; /* the old FCF may have been freed */ |
1115 | if (sel) { | 1129 | if (sel) { |
1116 | printk(KERN_INFO "host%d: FIP selected " | 1130 | printk(KERN_INFO "libfcoe: host%d: FIP selected " |
1117 | "Fibre-Channel Forwarder MAC %s\n", | 1131 | "Fibre-Channel Forwarder MAC %s\n", |
1118 | fip->lp->host->host_no, | 1132 | fip->lp->host->host_no, |
1119 | print_mac(buf, sel->fcf_mac)); | 1133 | print_mac(buf, sel->fcf_mac)); |
@@ -1123,7 +1137,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
1123 | fip->ctlr_ka_time = jiffies + sel->fka_period; | 1137 | fip->ctlr_ka_time = jiffies + sel->fka_period; |
1124 | fip->link = 1; | 1138 | fip->link = 1; |
1125 | } else { | 1139 | } else { |
1126 | printk(KERN_NOTICE "host%d: " | 1140 | printk(KERN_NOTICE "libfcoe: host%d: " |
1127 | "FIP Fibre-Channel Forwarder timed out. " | 1141 | "FIP Fibre-Channel Forwarder timed out. " |
1128 | "Starting FCF discovery.\n", | 1142 | "Starting FCF discovery.\n", |
1129 | fip->lp->host->host_no); | 1143 | fip->lp->host->host_no); |
@@ -1247,7 +1261,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | |||
1247 | return -EINVAL; | 1261 | return -EINVAL; |
1248 | } | 1262 | } |
1249 | fip->state = FIP_ST_NON_FIP; | 1263 | fip->state = FIP_ST_NON_FIP; |
1250 | FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); | 1264 | LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); |
1251 | 1265 | ||
1252 | /* | 1266 | /* |
1253 | * FLOGI accepted. | 1267 | * FLOGI accepted. |
@@ -1276,7 +1290,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | |||
1276 | memcpy(fip->dest_addr, sa, ETH_ALEN); | 1290 | memcpy(fip->dest_addr, sa, ETH_ALEN); |
1277 | fip->map_dest = 0; | 1291 | fip->map_dest = 0; |
1278 | if (fip->state == FIP_ST_NON_FIP) | 1292 | if (fip->state == FIP_ST_NON_FIP) |
1279 | FIP_DBG("received FLOGI REQ, " | 1293 | LIBFCOE_FIP_DBG("received FLOGI REQ, " |
1280 | "using non-FIP mode\n"); | 1294 | "using non-FIP mode\n"); |
1281 | fip->state = FIP_ST_NON_FIP; | 1295 | fip->state = FIP_ST_NON_FIP; |
1282 | } | 1296 | } |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 89d41a424b33..5fd2da494d08 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include "scsi_logging.h" | 40 | #include "scsi_logging.h" |
41 | 41 | ||
42 | 42 | ||
43 | static int scsi_host_next_hn; /* host_no for next new host */ | 43 | static atomic_t scsi_host_next_hn; /* host_no for next new host */ |
44 | 44 | ||
45 | 45 | ||
46 | static void scsi_host_cls_release(struct device *dev) | 46 | static void scsi_host_cls_release(struct device *dev) |
@@ -333,7 +333,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
333 | 333 | ||
334 | mutex_init(&shost->scan_mutex); | 334 | mutex_init(&shost->scan_mutex); |
335 | 335 | ||
336 | shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */ | 336 | /* |
337 | * subtract one because we increment first then return, but we need to | ||
338 | * know what the next host number was before increment | ||
339 | */ | ||
340 | shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; | ||
337 | shost->dma_channel = 0xff; | 341 | shost->dma_channel = 0xff; |
338 | 342 | ||
339 | /* These three are default values which can be overridden */ | 343 | /* These three are default values which can be overridden */ |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index b4b805e8d7db..166d96450a0e 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -2254,10 +2254,13 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
2254 | continue; | 2254 | continue; |
2255 | if (crq->node_name && tgt->ids.node_name != crq->node_name) | 2255 | if (crq->node_name && tgt->ids.node_name != crq->node_name) |
2256 | continue; | 2256 | continue; |
2257 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 2257 | if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO) |
2258 | tgt->logo_rcvd = 1; | ||
2259 | if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) { | ||
2260 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | ||
2261 | ibmvfc_reinit_host(vhost); | ||
2262 | } | ||
2258 | } | 2263 | } |
2259 | |||
2260 | ibmvfc_reinit_host(vhost); | ||
2261 | break; | 2264 | break; |
2262 | case IBMVFC_AE_LINK_DOWN: | 2265 | case IBMVFC_AE_LINK_DOWN: |
2263 | case IBMVFC_AE_ADAPTER_FAILED: | 2266 | case IBMVFC_AE_ADAPTER_FAILED: |
@@ -2783,27 +2786,27 @@ static void ibmvfc_tasklet(void *data) | |||
2783 | 2786 | ||
2784 | spin_lock_irqsave(vhost->host->host_lock, flags); | 2787 | spin_lock_irqsave(vhost->host->host_lock, flags); |
2785 | while (!done) { | 2788 | while (!done) { |
2786 | /* Pull all the valid messages off the CRQ */ | ||
2787 | while ((crq = ibmvfc_next_crq(vhost)) != NULL) { | ||
2788 | ibmvfc_handle_crq(crq, vhost); | ||
2789 | crq->valid = 0; | ||
2790 | } | ||
2791 | |||
2792 | /* Pull all the valid messages off the async CRQ */ | 2789 | /* Pull all the valid messages off the async CRQ */ |
2793 | while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | 2790 | while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { |
2794 | ibmvfc_handle_async(async, vhost); | 2791 | ibmvfc_handle_async(async, vhost); |
2795 | async->valid = 0; | 2792 | async->valid = 0; |
2796 | } | 2793 | } |
2797 | 2794 | ||
2798 | vio_enable_interrupts(vdev); | 2795 | /* Pull all the valid messages off the CRQ */ |
2799 | if ((crq = ibmvfc_next_crq(vhost)) != NULL) { | 2796 | while ((crq = ibmvfc_next_crq(vhost)) != NULL) { |
2800 | vio_disable_interrupts(vdev); | ||
2801 | ibmvfc_handle_crq(crq, vhost); | 2797 | ibmvfc_handle_crq(crq, vhost); |
2802 | crq->valid = 0; | 2798 | crq->valid = 0; |
2803 | } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | 2799 | } |
2800 | |||
2801 | vio_enable_interrupts(vdev); | ||
2802 | if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | ||
2804 | vio_disable_interrupts(vdev); | 2803 | vio_disable_interrupts(vdev); |
2805 | ibmvfc_handle_async(async, vhost); | 2804 | ibmvfc_handle_async(async, vhost); |
2806 | async->valid = 0; | 2805 | async->valid = 0; |
2806 | } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { | ||
2807 | vio_disable_interrupts(vdev); | ||
2808 | ibmvfc_handle_crq(crq, vhost); | ||
2809 | crq->valid = 0; | ||
2807 | } else | 2810 | } else |
2808 | done = 1; | 2811 | done = 1; |
2809 | } | 2812 | } |
@@ -2927,7 +2930,11 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
2927 | break; | 2930 | break; |
2928 | case IBMVFC_MAD_FAILED: | 2931 | case IBMVFC_MAD_FAILED: |
2929 | default: | 2932 | default: |
2930 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 2933 | if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED) |
2934 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); | ||
2935 | else if (tgt->logo_rcvd) | ||
2936 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); | ||
2937 | else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | ||
2931 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); | 2938 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); |
2932 | else | 2939 | else |
2933 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 2940 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
@@ -3054,6 +3061,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt) | |||
3054 | return; | 3061 | return; |
3055 | 3062 | ||
3056 | kref_get(&tgt->kref); | 3063 | kref_get(&tgt->kref); |
3064 | tgt->logo_rcvd = 0; | ||
3057 | evt = ibmvfc_get_event(vhost); | 3065 | evt = ibmvfc_get_event(vhost); |
3058 | vhost->discovery_threads++; | 3066 | vhost->discovery_threads++; |
3059 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); | 3067 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index c2668d7d67f5..007fa1c9ef14 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -605,6 +605,7 @@ struct ibmvfc_target { | |||
605 | int need_login; | 605 | int need_login; |
606 | int add_rport; | 606 | int add_rport; |
607 | int init_retries; | 607 | int init_retries; |
608 | int logo_rcvd; | ||
608 | u32 cancel_key; | 609 | u32 cancel_key; |
609 | struct ibmvfc_service_parms service_parms; | 610 | struct ibmvfc_service_parms service_parms; |
610 | struct ibmvfc_service_parms service_parms_change; | 611 | struct ibmvfc_service_parms service_parms_change; |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 0f8bc772b112..5f045505a1f4 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -131,13 +131,13 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | |||
131 | }; | 131 | }; |
132 | 132 | ||
133 | static const struct ipr_chip_t ipr_chip[] = { | 133 | static const struct ipr_chip_t ipr_chip[] = { |
134 | { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] }, | 134 | { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, |
135 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, | 135 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, |
136 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, | 136 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, |
137 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, | 137 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, |
138 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] }, | 138 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, |
139 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, | 139 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, |
140 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } | 140 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static int ipr_max_bus_speeds [] = { | 143 | static int ipr_max_bus_speeds [] = { |
@@ -7367,6 +7367,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
7367 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | 7367 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); |
7368 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); | 7368 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); |
7369 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 7369 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
7370 | init_waitqueue_head(&ioa_cfg->msi_wait_q); | ||
7370 | ioa_cfg->sdt_state = INACTIVE; | 7371 | ioa_cfg->sdt_state = INACTIVE; |
7371 | if (ipr_enable_cache) | 7372 | if (ipr_enable_cache) |
7372 | ioa_cfg->cache_state = CACHE_ENABLED; | 7373 | ioa_cfg->cache_state = CACHE_ENABLED; |
@@ -7398,25 +7399,108 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
7398 | } | 7399 | } |
7399 | 7400 | ||
7400 | /** | 7401 | /** |
7401 | * ipr_get_chip_cfg - Find adapter chip configuration | 7402 | * ipr_get_chip_info - Find adapter chip information |
7402 | * @dev_id: PCI device id struct | 7403 | * @dev_id: PCI device id struct |
7403 | * | 7404 | * |
7404 | * Return value: | 7405 | * Return value: |
7405 | * ptr to chip config on success / NULL on failure | 7406 | * ptr to chip information on success / NULL on failure |
7406 | **/ | 7407 | **/ |
7407 | static const struct ipr_chip_cfg_t * __devinit | 7408 | static const struct ipr_chip_t * __devinit |
7408 | ipr_get_chip_cfg(const struct pci_device_id *dev_id) | 7409 | ipr_get_chip_info(const struct pci_device_id *dev_id) |
7409 | { | 7410 | { |
7410 | int i; | 7411 | int i; |
7411 | 7412 | ||
7412 | for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) | 7413 | for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) |
7413 | if (ipr_chip[i].vendor == dev_id->vendor && | 7414 | if (ipr_chip[i].vendor == dev_id->vendor && |
7414 | ipr_chip[i].device == dev_id->device) | 7415 | ipr_chip[i].device == dev_id->device) |
7415 | return ipr_chip[i].cfg; | 7416 | return &ipr_chip[i]; |
7416 | return NULL; | 7417 | return NULL; |
7417 | } | 7418 | } |
7418 | 7419 | ||
7419 | /** | 7420 | /** |
7421 | * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). | ||
7422 | * @pdev: PCI device struct | ||
7423 | * | ||
7424 | * Description: Simply set the msi_received flag to 1 indicating that | ||
7425 | * Message Signaled Interrupts are supported. | ||
7426 | * | ||
7427 | * Return value: | ||
7428 | * 0 on success / non-zero on failure | ||
7429 | **/ | ||
7430 | static irqreturn_t __devinit ipr_test_intr(int irq, void *devp) | ||
7431 | { | ||
7432 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; | ||
7433 | unsigned long lock_flags = 0; | ||
7434 | irqreturn_t rc = IRQ_HANDLED; | ||
7435 | |||
7436 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
7437 | |||
7438 | ioa_cfg->msi_received = 1; | ||
7439 | wake_up(&ioa_cfg->msi_wait_q); | ||
7440 | |||
7441 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
7442 | return rc; | ||
7443 | } | ||
7444 | |||
7445 | /** | ||
7446 | * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. | ||
7447 | * @pdev: PCI device struct | ||
7448 | * | ||
7449 | * Description: The return value from pci_enable_msi() can not always be | ||
7450 | * trusted. This routine sets up and initiates a test interrupt to determine | ||
7451 | * if the interrupt is received via the ipr_test_intr() service routine. | ||
7452 | * If the tests fails, the driver will fall back to LSI. | ||
7453 | * | ||
7454 | * Return value: | ||
7455 | * 0 on success / non-zero on failure | ||
7456 | **/ | ||
7457 | static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, | ||
7458 | struct pci_dev *pdev) | ||
7459 | { | ||
7460 | int rc; | ||
7461 | volatile u32 int_reg; | ||
7462 | unsigned long lock_flags = 0; | ||
7463 | |||
7464 | ENTER; | ||
7465 | |||
7466 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
7467 | init_waitqueue_head(&ioa_cfg->msi_wait_q); | ||
7468 | ioa_cfg->msi_received = 0; | ||
7469 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); | ||
7470 | writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg); | ||
7471 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | ||
7472 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
7473 | |||
7474 | rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | ||
7475 | if (rc) { | ||
7476 | dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); | ||
7477 | return rc; | ||
7478 | } else if (ipr_debug) | ||
7479 | dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); | ||
7480 | |||
7481 | writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg); | ||
7482 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); | ||
7483 | wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); | ||
7484 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); | ||
7485 | |||
7486 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
7487 | if (!ioa_cfg->msi_received) { | ||
7488 | /* MSI test failed */ | ||
7489 | dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); | ||
7490 | rc = -EOPNOTSUPP; | ||
7491 | } else if (ipr_debug) | ||
7492 | dev_info(&pdev->dev, "MSI test succeeded.\n"); | ||
7493 | |||
7494 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
7495 | |||
7496 | free_irq(pdev->irq, ioa_cfg); | ||
7497 | |||
7498 | LEAVE; | ||
7499 | |||
7500 | return rc; | ||
7501 | } | ||
7502 | |||
7503 | /** | ||
7420 | * ipr_probe_ioa - Allocates memory and does first stage of initialization | 7504 | * ipr_probe_ioa - Allocates memory and does first stage of initialization |
7421 | * @pdev: PCI device struct | 7505 | * @pdev: PCI device struct |
7422 | * @dev_id: PCI device id struct | 7506 | * @dev_id: PCI device id struct |
@@ -7441,11 +7525,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7441 | goto out; | 7525 | goto out; |
7442 | } | 7526 | } |
7443 | 7527 | ||
7444 | if (!(rc = pci_enable_msi(pdev))) | ||
7445 | dev_info(&pdev->dev, "MSI enabled\n"); | ||
7446 | else if (ipr_debug) | ||
7447 | dev_info(&pdev->dev, "Cannot enable MSI\n"); | ||
7448 | |||
7449 | dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); | 7528 | dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); |
7450 | 7529 | ||
7451 | host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); | 7530 | host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); |
@@ -7461,14 +7540,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7461 | ata_host_init(&ioa_cfg->ata_host, &pdev->dev, | 7540 | ata_host_init(&ioa_cfg->ata_host, &pdev->dev, |
7462 | sata_port_info.flags, &ipr_sata_ops); | 7541 | sata_port_info.flags, &ipr_sata_ops); |
7463 | 7542 | ||
7464 | ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id); | 7543 | ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); |
7465 | 7544 | ||
7466 | if (!ioa_cfg->chip_cfg) { | 7545 | if (!ioa_cfg->ipr_chip) { |
7467 | dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", | 7546 | dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", |
7468 | dev_id->vendor, dev_id->device); | 7547 | dev_id->vendor, dev_id->device); |
7469 | goto out_scsi_host_put; | 7548 | goto out_scsi_host_put; |
7470 | } | 7549 | } |
7471 | 7550 | ||
7551 | ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; | ||
7552 | |||
7472 | if (ipr_transop_timeout) | 7553 | if (ipr_transop_timeout) |
7473 | ioa_cfg->transop_timeout = ipr_transop_timeout; | 7554 | ioa_cfg->transop_timeout = ipr_transop_timeout; |
7474 | else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) | 7555 | else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) |
@@ -7519,6 +7600,18 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7519 | goto cleanup_nomem; | 7600 | goto cleanup_nomem; |
7520 | } | 7601 | } |
7521 | 7602 | ||
7603 | /* Enable MSI style interrupts if they are supported. */ | ||
7604 | if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) { | ||
7605 | rc = ipr_test_msi(ioa_cfg, pdev); | ||
7606 | if (rc == -EOPNOTSUPP) | ||
7607 | pci_disable_msi(pdev); | ||
7608 | else if (rc) | ||
7609 | goto out_msi_disable; | ||
7610 | else | ||
7611 | dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq); | ||
7612 | } else if (ipr_debug) | ||
7613 | dev_info(&pdev->dev, "Cannot enable MSI.\n"); | ||
7614 | |||
7522 | /* Save away PCI config space for use following IOA reset */ | 7615 | /* Save away PCI config space for use following IOA reset */ |
7523 | rc = pci_save_state(pdev); | 7616 | rc = pci_save_state(pdev); |
7524 | 7617 | ||
@@ -7556,7 +7649,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7556 | ioa_cfg->ioa_unit_checked = 1; | 7649 | ioa_cfg->ioa_unit_checked = 1; |
7557 | 7650 | ||
7558 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); | 7651 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); |
7559 | rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); | 7652 | rc = request_irq(pdev->irq, ipr_isr, |
7653 | ioa_cfg->msi_received ? 0 : IRQF_SHARED, | ||
7654 | IPR_NAME, ioa_cfg); | ||
7560 | 7655 | ||
7561 | if (rc) { | 7656 | if (rc) { |
7562 | dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", | 7657 | dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", |
@@ -7583,12 +7678,13 @@ cleanup_nolog: | |||
7583 | ipr_free_mem(ioa_cfg); | 7678 | ipr_free_mem(ioa_cfg); |
7584 | cleanup_nomem: | 7679 | cleanup_nomem: |
7585 | iounmap(ipr_regs); | 7680 | iounmap(ipr_regs); |
7681 | out_msi_disable: | ||
7682 | pci_disable_msi(pdev); | ||
7586 | out_release_regions: | 7683 | out_release_regions: |
7587 | pci_release_regions(pdev); | 7684 | pci_release_regions(pdev); |
7588 | out_scsi_host_put: | 7685 | out_scsi_host_put: |
7589 | scsi_host_put(host); | 7686 | scsi_host_put(host); |
7590 | out_disable: | 7687 | out_disable: |
7591 | pci_disable_msi(pdev); | ||
7592 | pci_disable_device(pdev); | 7688 | pci_disable_device(pdev); |
7593 | goto out; | 7689 | goto out; |
7594 | } | 7690 | } |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 79a3ae4fb2c7..4b63dd6b1c81 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -37,8 +37,8 @@ | |||
37 | /* | 37 | /* |
38 | * Literals | 38 | * Literals |
39 | */ | 39 | */ |
40 | #define IPR_DRIVER_VERSION "2.4.2" | 40 | #define IPR_DRIVER_VERSION "2.4.3" |
41 | #define IPR_DRIVER_DATE "(January 21, 2009)" | 41 | #define IPR_DRIVER_DATE "(June 10, 2009)" |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding | 44 | * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding |
@@ -1025,6 +1025,9 @@ struct ipr_chip_cfg_t { | |||
1025 | struct ipr_chip_t { | 1025 | struct ipr_chip_t { |
1026 | u16 vendor; | 1026 | u16 vendor; |
1027 | u16 device; | 1027 | u16 device; |
1028 | u16 intr_type; | ||
1029 | #define IPR_USE_LSI 0x00 | ||
1030 | #define IPR_USE_MSI 0x01 | ||
1028 | const struct ipr_chip_cfg_t *cfg; | 1031 | const struct ipr_chip_cfg_t *cfg; |
1029 | }; | 1032 | }; |
1030 | 1033 | ||
@@ -1094,6 +1097,7 @@ struct ipr_ioa_cfg { | |||
1094 | u8 needs_hard_reset:1; | 1097 | u8 needs_hard_reset:1; |
1095 | u8 dual_raid:1; | 1098 | u8 dual_raid:1; |
1096 | u8 needs_warm_reset:1; | 1099 | u8 needs_warm_reset:1; |
1100 | u8 msi_received:1; | ||
1097 | 1101 | ||
1098 | u8 revid; | 1102 | u8 revid; |
1099 | 1103 | ||
@@ -1159,6 +1163,7 @@ struct ipr_ioa_cfg { | |||
1159 | 1163 | ||
1160 | unsigned int transop_timeout; | 1164 | unsigned int transop_timeout; |
1161 | const struct ipr_chip_cfg_t *chip_cfg; | 1165 | const struct ipr_chip_cfg_t *chip_cfg; |
1166 | const struct ipr_chip_t *ipr_chip; | ||
1162 | 1167 | ||
1163 | void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ | 1168 | void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ |
1164 | unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ | 1169 | unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ |
@@ -1179,6 +1184,7 @@ struct ipr_ioa_cfg { | |||
1179 | struct work_struct work_q; | 1184 | struct work_struct work_q; |
1180 | 1185 | ||
1181 | wait_queue_head_t reset_wait_q; | 1186 | wait_queue_head_t reset_wait_q; |
1187 | wait_queue_head_t msi_wait_q; | ||
1182 | 1188 | ||
1183 | struct ipr_dump *dump; | 1189 | struct ipr_dump *dump; |
1184 | enum ipr_sdt_state sdt_state; | 1190 | enum ipr_sdt_state sdt_state; |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index b7c092d63bbe..518dbd91df85 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -253,8 +253,6 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn, | |||
253 | 253 | ||
254 | if (r < 0) { | 254 | if (r < 0) { |
255 | iscsi_tcp_segment_unmap(segment); | 255 | iscsi_tcp_segment_unmap(segment); |
256 | if (copied || r == -EAGAIN) | ||
257 | break; | ||
258 | return r; | 256 | return r; |
259 | } | 257 | } |
260 | copied += r; | 258 | copied += r; |
@@ -275,11 +273,17 @@ static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn) | |||
275 | 273 | ||
276 | while (1) { | 274 | while (1) { |
277 | rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); | 275 | rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); |
278 | if (rc < 0) { | 276 | /* |
277 | * We may not have been able to send data because the conn | ||
278 | * is getting stopped. libiscsi will know so propogate err | ||
279 | * for it to do the right thing. | ||
280 | */ | ||
281 | if (rc == -EAGAIN) | ||
282 | return rc; | ||
283 | else if (rc < 0) { | ||
279 | rc = ISCSI_ERR_XMIT_FAILED; | 284 | rc = ISCSI_ERR_XMIT_FAILED; |
280 | goto error; | 285 | goto error; |
281 | } | 286 | } else if (rc == 0) |
282 | if (rc == 0) | ||
283 | break; | 287 | break; |
284 | 288 | ||
285 | consumed += rc; | 289 | consumed += rc; |
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 4c880656990b..6fabf66972b9 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
@@ -45,14 +45,6 @@ | |||
45 | 45 | ||
46 | #define FC_DISC_DELAY 3 | 46 | #define FC_DISC_DELAY 3 |
47 | 47 | ||
48 | static int fc_disc_debug; | ||
49 | |||
50 | #define FC_DEBUG_DISC(fmt...) \ | ||
51 | do { \ | ||
52 | if (fc_disc_debug) \ | ||
53 | FC_DBG(fmt); \ | ||
54 | } while (0) | ||
55 | |||
56 | static void fc_disc_gpn_ft_req(struct fc_disc *); | 48 | static void fc_disc_gpn_ft_req(struct fc_disc *); |
57 | static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); | 49 | static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); |
58 | static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, | 50 | static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, |
@@ -137,8 +129,8 @@ static void fc_disc_rport_callback(struct fc_lport *lport, | |||
137 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 129 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
138 | struct fc_disc *disc = &lport->disc; | 130 | struct fc_disc *disc = &lport->disc; |
139 | 131 | ||
140 | FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event, | 132 | FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, |
141 | rport->port_id); | 133 | rport->port_id); |
142 | 134 | ||
143 | switch (event) { | 135 | switch (event) { |
144 | case RPORT_EV_CREATED: | 136 | case RPORT_EV_CREATED: |
@@ -191,8 +183,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
191 | 183 | ||
192 | lport = disc->lport; | 184 | lport = disc->lport; |
193 | 185 | ||
194 | FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n", | 186 | FC_DISC_DBG(disc, "Received an RSCN event\n"); |
195 | fc_host_port_id(lport->host)); | ||
196 | 187 | ||
197 | /* make sure the frame contains an RSCN message */ | 188 | /* make sure the frame contains an RSCN message */ |
198 | rp = fc_frame_payload_get(fp, sizeof(*rp)); | 189 | rp = fc_frame_payload_get(fp, sizeof(*rp)); |
@@ -225,8 +216,8 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
225 | */ | 216 | */ |
226 | switch (fmt) { | 217 | switch (fmt) { |
227 | case ELS_ADDR_FMT_PORT: | 218 | case ELS_ADDR_FMT_PORT: |
228 | FC_DEBUG_DISC("Port address format for port (%6x)\n", | 219 | FC_DISC_DBG(disc, "Port address format for port " |
229 | ntoh24(pp->rscn_fid)); | 220 | "(%6x)\n", ntoh24(pp->rscn_fid)); |
230 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); | 221 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); |
231 | if (!dp) { | 222 | if (!dp) { |
232 | redisc = 1; | 223 | redisc = 1; |
@@ -243,19 +234,19 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
243 | case ELS_ADDR_FMT_DOM: | 234 | case ELS_ADDR_FMT_DOM: |
244 | case ELS_ADDR_FMT_FAB: | 235 | case ELS_ADDR_FMT_FAB: |
245 | default: | 236 | default: |
246 | FC_DEBUG_DISC("Address format is (%d)\n", fmt); | 237 | FC_DISC_DBG(disc, "Address format is (%d)\n", fmt); |
247 | redisc = 1; | 238 | redisc = 1; |
248 | break; | 239 | break; |
249 | } | 240 | } |
250 | } | 241 | } |
251 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); | 242 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); |
252 | if (redisc) { | 243 | if (redisc) { |
253 | FC_DEBUG_DISC("RSCN received: rediscovering\n"); | 244 | FC_DISC_DBG(disc, "RSCN received: rediscovering\n"); |
254 | fc_disc_restart(disc); | 245 | fc_disc_restart(disc); |
255 | } else { | 246 | } else { |
256 | FC_DEBUG_DISC("RSCN received: not rediscovering. " | 247 | FC_DISC_DBG(disc, "RSCN received: not rediscovering. " |
257 | "redisc %d state %d in_prog %d\n", | 248 | "redisc %d state %d in_prog %d\n", |
258 | redisc, lport->state, disc->pending); | 249 | redisc, lport->state, disc->pending); |
259 | list_for_each_entry_safe(dp, next, &disc_ports, peers) { | 250 | list_for_each_entry_safe(dp, next, &disc_ports, peers) { |
260 | list_del(&dp->peers); | 251 | list_del(&dp->peers); |
261 | rport = lport->tt.rport_lookup(lport, dp->ids.port_id); | 252 | rport = lport->tt.rport_lookup(lport, dp->ids.port_id); |
@@ -270,7 +261,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
270 | fc_frame_free(fp); | 261 | fc_frame_free(fp); |
271 | return; | 262 | return; |
272 | reject: | 263 | reject: |
273 | FC_DEBUG_DISC("Received a bad RSCN frame\n"); | 264 | FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); |
274 | rjt_data.fp = NULL; | 265 | rjt_data.fp = NULL; |
275 | rjt_data.reason = ELS_RJT_LOGIC; | 266 | rjt_data.reason = ELS_RJT_LOGIC; |
276 | rjt_data.explan = ELS_EXPL_NONE; | 267 | rjt_data.explan = ELS_EXPL_NONE; |
@@ -302,7 +293,8 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
302 | mutex_unlock(&disc->disc_mutex); | 293 | mutex_unlock(&disc->disc_mutex); |
303 | break; | 294 | break; |
304 | default: | 295 | default: |
305 | FC_DBG("Received an unsupported request. opcode (%x)\n", op); | 296 | FC_DISC_DBG(disc, "Received an unsupported request, " |
297 | "the opcode is (%x)\n", op); | ||
306 | break; | 298 | break; |
307 | } | 299 | } |
308 | } | 300 | } |
@@ -320,12 +312,10 @@ static void fc_disc_restart(struct fc_disc *disc) | |||
320 | struct fc_rport_libfc_priv *rdata, *next; | 312 | struct fc_rport_libfc_priv *rdata, *next; |
321 | struct fc_lport *lport = disc->lport; | 313 | struct fc_lport *lport = disc->lport; |
322 | 314 | ||
323 | FC_DEBUG_DISC("Restarting discovery for port (%6x)\n", | 315 | FC_DISC_DBG(disc, "Restarting discovery\n"); |
324 | fc_host_port_id(lport->host)); | ||
325 | 316 | ||
326 | list_for_each_entry_safe(rdata, next, &disc->rports, peers) { | 317 | list_for_each_entry_safe(rdata, next, &disc->rports, peers) { |
327 | rport = PRIV_TO_RPORT(rdata); | 318 | rport = PRIV_TO_RPORT(rdata); |
328 | FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id); | ||
329 | list_del(&rdata->peers); | 319 | list_del(&rdata->peers); |
330 | lport->tt.rport_logoff(rport); | 320 | lport->tt.rport_logoff(rport); |
331 | } | 321 | } |
@@ -485,8 +475,7 @@ static void fc_disc_done(struct fc_disc *disc) | |||
485 | struct fc_lport *lport = disc->lport; | 475 | struct fc_lport *lport = disc->lport; |
486 | enum fc_disc_event event; | 476 | enum fc_disc_event event; |
487 | 477 | ||
488 | FC_DEBUG_DISC("Discovery complete for port (%6x)\n", | 478 | FC_DISC_DBG(disc, "Discovery complete\n"); |
489 | fc_host_port_id(lport->host)); | ||
490 | 479 | ||
491 | event = disc->event; | 480 | event = disc->event; |
492 | disc->event = DISC_EV_NONE; | 481 | disc->event = DISC_EV_NONE; |
@@ -510,10 +499,10 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) | |||
510 | { | 499 | { |
511 | struct fc_lport *lport = disc->lport; | 500 | struct fc_lport *lport = disc->lport; |
512 | unsigned long delay = 0; | 501 | unsigned long delay = 0; |
513 | if (fc_disc_debug) | 502 | |
514 | FC_DBG("Error %ld, retries %d/%d\n", | 503 | FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n", |
515 | PTR_ERR(fp), disc->retry_count, | 504 | PTR_ERR(fp), disc->retry_count, |
516 | FC_DISC_RETRY_LIMIT); | 505 | FC_DISC_RETRY_LIMIT); |
517 | 506 | ||
518 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { | 507 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { |
519 | /* | 508 | /* |
@@ -649,9 +638,9 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
649 | &disc->rogue_rports); | 638 | &disc->rogue_rports); |
650 | lport->tt.rport_login(rport); | 639 | lport->tt.rport_login(rport); |
651 | } else | 640 | } else |
652 | FC_DBG("Failed to allocate memory for " | 641 | printk(KERN_WARNING "libfc: Failed to allocate " |
653 | "the newly discovered port (%6x)\n", | 642 | "memory for the newly discovered port " |
654 | dp.ids.port_id); | 643 | "(%6x)\n", dp.ids.port_id); |
655 | } | 644 | } |
656 | 645 | ||
657 | if (np->fp_flags & FC_NS_FID_LAST) { | 646 | if (np->fp_flags & FC_NS_FID_LAST) { |
@@ -671,9 +660,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
671 | */ | 660 | */ |
672 | if (error == 0 && len > 0 && len < sizeof(*np)) { | 661 | if (error == 0 && len > 0 && len < sizeof(*np)) { |
673 | if (np != &disc->partial_buf) { | 662 | if (np != &disc->partial_buf) { |
674 | FC_DEBUG_DISC("Partial buffer remains " | 663 | FC_DISC_DBG(disc, "Partial buffer remains " |
675 | "for discovery by (%6x)\n", | 664 | "for discovery\n"); |
676 | fc_host_port_id(lport->host)); | ||
677 | memcpy(&disc->partial_buf, np, len); | 665 | memcpy(&disc->partial_buf, np, len); |
678 | } | 666 | } |
679 | disc->buf_len = (unsigned char) len; | 667 | disc->buf_len = (unsigned char) len; |
@@ -721,8 +709,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
721 | int error; | 709 | int error; |
722 | 710 | ||
723 | mutex_lock(&disc->disc_mutex); | 711 | mutex_lock(&disc->disc_mutex); |
724 | FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n", | 712 | FC_DISC_DBG(disc, "Received a GPN_FT response\n"); |
725 | fc_host_port_id(disc->lport->host)); | ||
726 | 713 | ||
727 | if (IS_ERR(fp)) { | 714 | if (IS_ERR(fp)) { |
728 | fc_disc_error(disc, fp); | 715 | fc_disc_error(disc, fp); |
@@ -738,30 +725,30 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
738 | disc->seq_count == 0) { | 725 | disc->seq_count == 0) { |
739 | cp = fc_frame_payload_get(fp, sizeof(*cp)); | 726 | cp = fc_frame_payload_get(fp, sizeof(*cp)); |
740 | if (!cp) { | 727 | if (!cp) { |
741 | FC_DBG("GPN_FT response too short, len %d\n", | 728 | FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n", |
742 | fr_len(fp)); | 729 | fr_len(fp)); |
743 | } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { | 730 | } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { |
744 | 731 | ||
745 | /* Accepted, parse the response. */ | 732 | /* Accepted, parse the response. */ |
746 | buf = cp + 1; | 733 | buf = cp + 1; |
747 | len -= sizeof(*cp); | 734 | len -= sizeof(*cp); |
748 | } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { | 735 | } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { |
749 | FC_DBG("GPN_FT rejected reason %x exp %x " | 736 | FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x " |
750 | "(check zoning)\n", cp->ct_reason, | 737 | "(check zoning)\n", cp->ct_reason, |
751 | cp->ct_explan); | 738 | cp->ct_explan); |
752 | disc->event = DISC_EV_FAILED; | 739 | disc->event = DISC_EV_FAILED; |
753 | fc_disc_done(disc); | 740 | fc_disc_done(disc); |
754 | } else { | 741 | } else { |
755 | FC_DBG("GPN_FT unexpected response code %x\n", | 742 | FC_DISC_DBG(disc, "GPN_FT unexpected response code " |
756 | ntohs(cp->ct_cmd)); | 743 | "%x\n", ntohs(cp->ct_cmd)); |
757 | } | 744 | } |
758 | } else if (fr_sof(fp) == FC_SOF_N3 && | 745 | } else if (fr_sof(fp) == FC_SOF_N3 && |
759 | seq_cnt == disc->seq_count) { | 746 | seq_cnt == disc->seq_count) { |
760 | buf = fh + 1; | 747 | buf = fh + 1; |
761 | } else { | 748 | } else { |
762 | FC_DBG("GPN_FT unexpected frame - out of sequence? " | 749 | FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? " |
763 | "seq_cnt %x expected %x sof %x eof %x\n", | 750 | "seq_cnt %x expected %x sof %x eof %x\n", |
764 | seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); | 751 | seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); |
765 | } | 752 | } |
766 | if (buf) { | 753 | if (buf) { |
767 | error = fc_disc_gpn_ft_parse(disc, buf, len); | 754 | error = fc_disc_gpn_ft_parse(disc, buf, len); |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 7af9bceb8aa9..2bc22be5f849 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -32,18 +32,7 @@ | |||
32 | #include <scsi/libfc.h> | 32 | #include <scsi/libfc.h> |
33 | #include <scsi/fc_encode.h> | 33 | #include <scsi/fc_encode.h> |
34 | 34 | ||
35 | /* | 35 | static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ |
36 | * fc_exch_debug can be set in debugger or at compile time to get more logs. | ||
37 | */ | ||
38 | static int fc_exch_debug; | ||
39 | |||
40 | #define FC_DEBUG_EXCH(fmt...) \ | ||
41 | do { \ | ||
42 | if (fc_exch_debug) \ | ||
43 | FC_DBG(fmt); \ | ||
44 | } while (0) | ||
45 | |||
46 | static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ | ||
47 | 36 | ||
48 | /* | 37 | /* |
49 | * Structure and function definitions for managing Fibre Channel Exchanges | 38 | * Structure and function definitions for managing Fibre Channel Exchanges |
@@ -333,8 +322,8 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, | |||
333 | if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) | 322 | if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) |
334 | return; | 323 | return; |
335 | 324 | ||
336 | FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n", | 325 | FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n"); |
337 | ep->xid); | 326 | |
338 | if (schedule_delayed_work(&ep->timeout_work, | 327 | if (schedule_delayed_work(&ep->timeout_work, |
339 | msecs_to_jiffies(timer_msec))) | 328 | msecs_to_jiffies(timer_msec))) |
340 | fc_exch_hold(ep); /* hold for timer */ | 329 | fc_exch_hold(ep); /* hold for timer */ |
@@ -545,7 +534,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, | |||
545 | /* alloc a new xid */ | 534 | /* alloc a new xid */ |
546 | xid = fc_em_alloc_xid(mp, fp); | 535 | xid = fc_em_alloc_xid(mp, fp); |
547 | if (!xid) { | 536 | if (!xid) { |
548 | printk(KERN_ERR "fc_em_alloc_xid() failed\n"); | 537 | printk(KERN_WARNING "libfc: Failed to allocate an exhange\n"); |
549 | goto err; | 538 | goto err; |
550 | } | 539 | } |
551 | } | 540 | } |
@@ -820,8 +809,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) | |||
820 | struct fc_exch *ep = fc_seq_exch(sp); | 809 | struct fc_exch *ep = fc_seq_exch(sp); |
821 | 810 | ||
822 | sp = fc_seq_alloc(ep, ep->seq_id++); | 811 | sp = fc_seq_alloc(ep, ep->seq_id++); |
823 | FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n", | 812 | FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", |
824 | ep->xid, ep->f_ctl, sp->id); | 813 | ep->f_ctl, sp->id); |
825 | return sp; | 814 | return sp; |
826 | } | 815 | } |
827 | /* | 816 | /* |
@@ -901,7 +890,7 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, | |||
901 | fc_exch_els_rec(sp, els_data->fp); | 890 | fc_exch_els_rec(sp, els_data->fp); |
902 | break; | 891 | break; |
903 | default: | 892 | default: |
904 | FC_DBG("Invalid ELS CMD:%x\n", els_cmd); | 893 | FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); |
905 | } | 894 | } |
906 | } | 895 | } |
907 | EXPORT_SYMBOL(fc_seq_els_rsp_send); | 896 | EXPORT_SYMBOL(fc_seq_els_rsp_send); |
@@ -1134,7 +1123,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, | |||
1134 | lp->tt.lport_recv(lp, sp, fp); | 1123 | lp->tt.lport_recv(lp, sp, fp); |
1135 | fc_exch_release(ep); /* release from lookup */ | 1124 | fc_exch_release(ep); /* release from lookup */ |
1136 | } else { | 1125 | } else { |
1137 | FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject); | 1126 | FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject); |
1138 | fc_frame_free(fp); | 1127 | fc_frame_free(fp); |
1139 | } | 1128 | } |
1140 | } | 1129 | } |
@@ -1242,10 +1231,10 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
1242 | sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ | 1231 | sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ |
1243 | if (!sp) { | 1232 | if (!sp) { |
1244 | atomic_inc(&mp->stats.xid_not_found); | 1233 | atomic_inc(&mp->stats.xid_not_found); |
1245 | FC_DEBUG_EXCH("seq lookup failed\n"); | 1234 | FC_EM_DBG(mp, "seq lookup failed\n"); |
1246 | } else { | 1235 | } else { |
1247 | atomic_inc(&mp->stats.non_bls_resp); | 1236 | atomic_inc(&mp->stats.non_bls_resp); |
1248 | FC_DEBUG_EXCH("non-BLS response to sequence"); | 1237 | FC_EM_DBG(mp, "non-BLS response to sequence"); |
1249 | } | 1238 | } |
1250 | fc_frame_free(fp); | 1239 | fc_frame_free(fp); |
1251 | } | 1240 | } |
@@ -1266,8 +1255,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) | |||
1266 | int rc = 1, has_rec = 0; | 1255 | int rc = 1, has_rec = 0; |
1267 | 1256 | ||
1268 | fh = fc_frame_header_get(fp); | 1257 | fh = fc_frame_header_get(fp); |
1269 | FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n", | 1258 | FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, |
1270 | fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); | 1259 | fc_exch_rctl_name(fh->fh_r_ctl)); |
1271 | 1260 | ||
1272 | if (cancel_delayed_work_sync(&ep->timeout_work)) | 1261 | if (cancel_delayed_work_sync(&ep->timeout_work)) |
1273 | fc_exch_release(ep); /* release from pending timer hold */ | 1262 | fc_exch_release(ep); /* release from pending timer hold */ |
@@ -1359,9 +1348,9 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
1359 | case FC_RCTL_ACK_0: | 1348 | case FC_RCTL_ACK_0: |
1360 | break; | 1349 | break; |
1361 | default: | 1350 | default: |
1362 | FC_DEBUG_EXCH("BLS rctl %x - %s received", | 1351 | FC_EXCH_DBG(ep, "BLS rctl %x - %s received", |
1363 | fh->fh_r_ctl, | 1352 | fh->fh_r_ctl, |
1364 | fc_exch_rctl_name(fh->fh_r_ctl)); | 1353 | fc_exch_rctl_name(fh->fh_r_ctl)); |
1365 | break; | 1354 | break; |
1366 | } | 1355 | } |
1367 | fc_frame_free(fp); | 1356 | fc_frame_free(fp); |
@@ -1599,7 +1588,8 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
1599 | 1588 | ||
1600 | if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) | 1589 | if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) |
1601 | goto cleanup; | 1590 | goto cleanup; |
1602 | FC_DBG("Cannot process RRQ, because of frame error %d\n", err); | 1591 | FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, " |
1592 | "frame error %d\n", err); | ||
1603 | return; | 1593 | return; |
1604 | } | 1594 | } |
1605 | 1595 | ||
@@ -1608,12 +1598,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
1608 | 1598 | ||
1609 | switch (op) { | 1599 | switch (op) { |
1610 | case ELS_LS_RJT: | 1600 | case ELS_LS_RJT: |
1611 | FC_DBG("LS_RJT for RRQ"); | 1601 | FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); |
1612 | /* fall through */ | 1602 | /* fall through */ |
1613 | case ELS_LS_ACC: | 1603 | case ELS_LS_ACC: |
1614 | goto cleanup; | 1604 | goto cleanup; |
1615 | default: | 1605 | default: |
1616 | FC_DBG("unexpected response op %x for RRQ", op); | 1606 | FC_EXCH_DBG(aborted_ep, "unexpected response op %x " |
1607 | "for RRQ", op); | ||
1617 | return; | 1608 | return; |
1618 | } | 1609 | } |
1619 | 1610 | ||
@@ -1740,8 +1731,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, | |||
1740 | size_t len; | 1731 | size_t len; |
1741 | 1732 | ||
1742 | if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { | 1733 | if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { |
1743 | FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n", | 1734 | FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", |
1744 | min_xid, max_xid); | 1735 | min_xid, max_xid); |
1745 | return NULL; | 1736 | return NULL; |
1746 | } | 1737 | } |
1747 | 1738 | ||
@@ -1878,7 +1869,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, | |||
1878 | 1869 | ||
1879 | /* lport lock ? */ | 1870 | /* lport lock ? */ |
1880 | if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { | 1871 | if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { |
1881 | FC_DBG("fc_lport or EM is not allocated and configured"); | 1872 | FC_LPORT_DBG(lp, "Receiving frames for an lport that " |
1873 | "has not been initialized correctly\n"); | ||
1882 | fc_frame_free(fp); | 1874 | fc_frame_free(fp); |
1883 | return; | 1875 | return; |
1884 | } | 1876 | } |
@@ -1904,7 +1896,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, | |||
1904 | fc_exch_recv_req(lp, mp, fp); | 1896 | fc_exch_recv_req(lp, mp, fp); |
1905 | break; | 1897 | break; |
1906 | default: | 1898 | default: |
1907 | FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp)); | 1899 | FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp)); |
1908 | fc_frame_free(fp); | 1900 | fc_frame_free(fp); |
1909 | break; | 1901 | break; |
1910 | } | 1902 | } |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index ad8b747837b0..e303e0d12c4b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -43,13 +43,9 @@ MODULE_AUTHOR("Open-FCoE.org"); | |||
43 | MODULE_DESCRIPTION("libfc"); | 43 | MODULE_DESCRIPTION("libfc"); |
44 | MODULE_LICENSE("GPL v2"); | 44 | MODULE_LICENSE("GPL v2"); |
45 | 45 | ||
46 | static int fc_fcp_debug; | 46 | unsigned int fc_debug_logging; |
47 | 47 | module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); | |
48 | #define FC_DEBUG_FCP(fmt...) \ | 48 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); |
49 | do { \ | ||
50 | if (fc_fcp_debug) \ | ||
51 | FC_DBG(fmt); \ | ||
52 | } while (0) | ||
53 | 49 | ||
54 | static struct kmem_cache *scsi_pkt_cachep; | 50 | static struct kmem_cache *scsi_pkt_cachep; |
55 | 51 | ||
@@ -347,8 +343,8 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
347 | if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && | 343 | if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && |
348 | fc_frame_crc_check(fp)) | 344 | fc_frame_crc_check(fp)) |
349 | goto crc_err; | 345 | goto crc_err; |
350 | FC_DEBUG_FCP("data received past end. len %zx offset %zx " | 346 | FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " |
351 | "data_len %x\n", len, offset, fsp->data_len); | 347 | "data_len %x\n", len, offset, fsp->data_len); |
352 | fc_fcp_retry_cmd(fsp); | 348 | fc_fcp_retry_cmd(fsp); |
353 | return; | 349 | return; |
354 | } | 350 | } |
@@ -411,7 +407,8 @@ crc_err: | |||
411 | stats->ErrorFrames++; | 407 | stats->ErrorFrames++; |
412 | /* FIXME - per cpu count, not total count! */ | 408 | /* FIXME - per cpu count, not total count! */ |
413 | if (stats->InvalidCRCCount++ < 5) | 409 | if (stats->InvalidCRCCount++ < 5) |
414 | printk(KERN_WARNING "CRC error on data frame for port (%6x)\n", | 410 | printk(KERN_WARNING "libfc: CRC error on data " |
411 | "frame for port (%6x)\n", | ||
415 | fc_host_port_id(lp->host)); | 412 | fc_host_port_id(lp->host)); |
416 | /* | 413 | /* |
417 | * Assume the frame is total garbage. | 414 | * Assume the frame is total garbage. |
@@ -475,14 +472,14 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
475 | WARN_ON(seq_blen <= 0); | 472 | WARN_ON(seq_blen <= 0); |
476 | if (unlikely(offset + seq_blen > fsp->data_len)) { | 473 | if (unlikely(offset + seq_blen > fsp->data_len)) { |
477 | /* this should never happen */ | 474 | /* this should never happen */ |
478 | FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n", | 475 | FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx " |
479 | seq_blen, offset); | 476 | "offset %zx\n", seq_blen, offset); |
480 | fc_fcp_send_abort(fsp); | 477 | fc_fcp_send_abort(fsp); |
481 | return 0; | 478 | return 0; |
482 | } else if (offset != fsp->xfer_len) { | 479 | } else if (offset != fsp->xfer_len) { |
483 | /* Out of Order Data Request - no problem, but unexpected. */ | 480 | /* Out of Order Data Request - no problem, but unexpected. */ |
484 | FC_DEBUG_FCP("xfer-ready non-contiguous. " | 481 | FC_FCP_DBG(fsp, "xfer-ready non-contiguous. " |
485 | "seq_blen %zx offset %zx\n", seq_blen, offset); | 482 | "seq_blen %zx offset %zx\n", seq_blen, offset); |
486 | } | 483 | } |
487 | 484 | ||
488 | /* | 485 | /* |
@@ -493,7 +490,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
493 | t_blen = fsp->max_payload; | 490 | t_blen = fsp->max_payload; |
494 | if (lp->seq_offload) { | 491 | if (lp->seq_offload) { |
495 | t_blen = min(seq_blen, (size_t)lp->lso_max); | 492 | t_blen = min(seq_blen, (size_t)lp->lso_max); |
496 | FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", | 493 | FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", |
497 | fsp, seq_blen, lp->lso_max, t_blen); | 494 | fsp, seq_blen, lp->lso_max, t_blen); |
498 | } | 495 | } |
499 | 496 | ||
@@ -694,7 +691,7 @@ static void fc_fcp_reduce_can_queue(struct fc_lport *lp) | |||
694 | if (!can_queue) | 691 | if (!can_queue) |
695 | can_queue = 1; | 692 | can_queue = 1; |
696 | lp->host->can_queue = can_queue; | 693 | lp->host->can_queue = can_queue; |
697 | shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n" | 694 | shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" |
698 | "Reducing can_queue to %d.\n", can_queue); | 695 | "Reducing can_queue to %d.\n", can_queue); |
699 | done: | 696 | done: |
700 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 697 | spin_unlock_irqrestore(lp->host->host_lock, flags); |
@@ -768,7 +765,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
768 | 765 | ||
769 | fc_fcp_resp(fsp, fp); | 766 | fc_fcp_resp(fsp, fp); |
770 | } else { | 767 | } else { |
771 | FC_DBG("unexpected frame. r_ctl %x\n", r_ctl); | 768 | FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl); |
772 | } | 769 | } |
773 | unlock: | 770 | unlock: |
774 | fc_fcp_unlock_pkt(fsp); | 771 | fc_fcp_unlock_pkt(fsp); |
@@ -877,17 +874,17 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
877 | return; | 874 | return; |
878 | } | 875 | } |
879 | fsp->status_code = FC_DATA_OVRRUN; | 876 | fsp->status_code = FC_DATA_OVRRUN; |
880 | FC_DBG("tgt %6x xfer len %zx greater than expected len %x. " | 877 | FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, " |
881 | "data len %x\n", | 878 | "len %x, data len %x\n", |
882 | fsp->rport->port_id, | 879 | fsp->rport->port_id, |
883 | fsp->xfer_len, expected_len, fsp->data_len); | 880 | fsp->xfer_len, expected_len, fsp->data_len); |
884 | } | 881 | } |
885 | fc_fcp_complete_locked(fsp); | 882 | fc_fcp_complete_locked(fsp); |
886 | return; | 883 | return; |
887 | 884 | ||
888 | len_err: | 885 | len_err: |
889 | FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n", | 886 | FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u " |
890 | flags, fr_len(fp), respl, snsl); | 887 | "snsl %u\n", flags, fr_len(fp), respl, snsl); |
891 | err: | 888 | err: |
892 | fsp->status_code = FC_ERROR; | 889 | fsp->status_code = FC_ERROR; |
893 | fc_fcp_complete_locked(fsp); | 890 | fc_fcp_complete_locked(fsp); |
@@ -1107,13 +1104,11 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
1107 | if (fc_fcp_lock_pkt(fsp)) | 1104 | if (fc_fcp_lock_pkt(fsp)) |
1108 | return; | 1105 | return; |
1109 | 1106 | ||
1110 | switch (error) { | 1107 | if (error == -FC_EX_CLOSED) { |
1111 | case -FC_EX_CLOSED: | ||
1112 | fc_fcp_retry_cmd(fsp); | 1108 | fc_fcp_retry_cmd(fsp); |
1113 | goto unlock; | 1109 | goto unlock; |
1114 | default: | ||
1115 | FC_DBG("unknown error %ld\n", PTR_ERR(fp)); | ||
1116 | } | 1110 | } |
1111 | |||
1117 | /* | 1112 | /* |
1118 | * clear abort pending, because the lower layer | 1113 | * clear abort pending, because the lower layer |
1119 | * decided to force completion. | 1114 | * decided to force completion. |
@@ -1145,10 +1140,10 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) | |||
1145 | fsp->wait_for_comp = 0; | 1140 | fsp->wait_for_comp = 0; |
1146 | 1141 | ||
1147 | if (!rc) { | 1142 | if (!rc) { |
1148 | FC_DBG("target abort cmd failed\n"); | 1143 | FC_FCP_DBG(fsp, "target abort cmd failed\n"); |
1149 | rc = FAILED; | 1144 | rc = FAILED; |
1150 | } else if (fsp->state & FC_SRB_ABORTED) { | 1145 | } else if (fsp->state & FC_SRB_ABORTED) { |
1151 | FC_DBG("target abort cmd passed\n"); | 1146 | FC_FCP_DBG(fsp, "target abort cmd passed\n"); |
1152 | rc = SUCCESS; | 1147 | rc = SUCCESS; |
1153 | fc_fcp_complete_locked(fsp); | 1148 | fc_fcp_complete_locked(fsp); |
1154 | } | 1149 | } |
@@ -1213,7 +1208,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
1213 | spin_unlock_bh(&fsp->scsi_pkt_lock); | 1208 | spin_unlock_bh(&fsp->scsi_pkt_lock); |
1214 | 1209 | ||
1215 | if (!rc) { | 1210 | if (!rc) { |
1216 | FC_DBG("lun reset failed\n"); | 1211 | FC_SCSI_DBG(lp, "lun reset failed\n"); |
1217 | return FAILED; | 1212 | return FAILED; |
1218 | } | 1213 | } |
1219 | 1214 | ||
@@ -1221,7 +1216,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
1221 | if (fsp->cdb_status != FCP_TMF_CMPL) | 1216 | if (fsp->cdb_status != FCP_TMF_CMPL) |
1222 | return FAILED; | 1217 | return FAILED; |
1223 | 1218 | ||
1224 | FC_DBG("lun reset to lun %u completed\n", lun); | 1219 | FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); |
1225 | fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); | 1220 | fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); |
1226 | return SUCCESS; | 1221 | return SUCCESS; |
1227 | } | 1222 | } |
@@ -1388,13 +1383,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1388 | rjt = fc_frame_payload_get(fp, sizeof(*rjt)); | 1383 | rjt = fc_frame_payload_get(fp, sizeof(*rjt)); |
1389 | switch (rjt->er_reason) { | 1384 | switch (rjt->er_reason) { |
1390 | default: | 1385 | default: |
1391 | FC_DEBUG_FCP("device %x unexpected REC reject " | 1386 | FC_FCP_DBG(fsp, "device %x unexpected REC reject " |
1392 | "reason %d expl %d\n", | 1387 | "reason %d expl %d\n", |
1393 | fsp->rport->port_id, rjt->er_reason, | 1388 | fsp->rport->port_id, rjt->er_reason, |
1394 | rjt->er_explan); | 1389 | rjt->er_explan); |
1395 | /* fall through */ | 1390 | /* fall through */ |
1396 | case ELS_RJT_UNSUP: | 1391 | case ELS_RJT_UNSUP: |
1397 | FC_DEBUG_FCP("device does not support REC\n"); | 1392 | FC_FCP_DBG(fsp, "device does not support REC\n"); |
1398 | rp = fsp->rport->dd_data; | 1393 | rp = fsp->rport->dd_data; |
1399 | /* | 1394 | /* |
1400 | * if we do not spport RECs or got some bogus | 1395 | * if we do not spport RECs or got some bogus |
@@ -1514,8 +1509,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
1514 | break; | 1509 | break; |
1515 | 1510 | ||
1516 | default: | 1511 | default: |
1517 | FC_DBG("REC %p fid %x error unexpected error %d\n", | 1512 | FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n", |
1518 | fsp, fsp->rport->port_id, error); | 1513 | fsp, fsp->rport->port_id, error); |
1519 | fsp->status_code = FC_CMD_PLOGO; | 1514 | fsp->status_code = FC_CMD_PLOGO; |
1520 | /* fall through */ | 1515 | /* fall through */ |
1521 | 1516 | ||
@@ -1524,9 +1519,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
1524 | * Assume REC or LS_ACC was lost. | 1519 | * Assume REC or LS_ACC was lost. |
1525 | * The exchange manager will have aborted REC, so retry. | 1520 | * The exchange manager will have aborted REC, so retry. |
1526 | */ | 1521 | */ |
1527 | FC_DBG("REC fid %x error error %d retry %d/%d\n", | 1522 | FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n", |
1528 | fsp->rport->port_id, error, fsp->recov_retry, | 1523 | fsp->rport->port_id, error, fsp->recov_retry, |
1529 | FC_MAX_RECOV_RETRY); | 1524 | FC_MAX_RECOV_RETRY); |
1530 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) | 1525 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) |
1531 | fc_fcp_rec(fsp); | 1526 | fc_fcp_rec(fsp); |
1532 | else | 1527 | else |
@@ -2011,9 +2006,11 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) | |||
2011 | if (lp->state != LPORT_ST_READY) | 2006 | if (lp->state != LPORT_ST_READY) |
2012 | return rc; | 2007 | return rc; |
2013 | 2008 | ||
2009 | FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); | ||
2010 | |||
2014 | fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); | 2011 | fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); |
2015 | if (fsp == NULL) { | 2012 | if (fsp == NULL) { |
2016 | FC_DBG("could not allocate scsi_pkt\n"); | 2013 | printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); |
2017 | sc_cmd->result = DID_NO_CONNECT << 16; | 2014 | sc_cmd->result = DID_NO_CONNECT << 16; |
2018 | goto out; | 2015 | goto out; |
2019 | } | 2016 | } |
@@ -2048,17 +2045,21 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) | |||
2048 | struct fc_lport *lp = shost_priv(shost); | 2045 | struct fc_lport *lp = shost_priv(shost); |
2049 | unsigned long wait_tmo; | 2046 | unsigned long wait_tmo; |
2050 | 2047 | ||
2048 | FC_SCSI_DBG(lp, "Resetting host\n"); | ||
2049 | |||
2051 | lp->tt.lport_reset(lp); | 2050 | lp->tt.lport_reset(lp); |
2052 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; | 2051 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; |
2053 | while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) | 2052 | while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) |
2054 | msleep(1000); | 2053 | msleep(1000); |
2055 | 2054 | ||
2056 | if (fc_fcp_lport_queue_ready(lp)) { | 2055 | if (fc_fcp_lport_queue_ready(lp)) { |
2057 | shost_printk(KERN_INFO, shost, "Host reset succeeded.\n"); | 2056 | shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " |
2057 | "on port (%6x)\n", fc_host_port_id(lp->host)); | ||
2058 | return SUCCESS; | 2058 | return SUCCESS; |
2059 | } else { | 2059 | } else { |
2060 | shost_printk(KERN_INFO, shost, "Host reset failed. " | 2060 | shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " |
2061 | "lport not ready.\n"); | 2061 | "port (%6x) is not ready.\n", |
2062 | fc_host_port_id(lp->host)); | ||
2062 | return FAILED; | 2063 | return FAILED; |
2063 | } | 2064 | } |
2064 | } | 2065 | } |
@@ -2117,7 +2118,8 @@ void fc_fcp_destroy(struct fc_lport *lp) | |||
2117 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 2118 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); |
2118 | 2119 | ||
2119 | if (!list_empty(&si->scsi_pkt_queue)) | 2120 | if (!list_empty(&si->scsi_pkt_queue)) |
2120 | printk(KERN_ERR "Leaked scsi packets.\n"); | 2121 | printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " |
2122 | "port (%6x)\n", fc_host_port_id(lp->host)); | ||
2121 | 2123 | ||
2122 | mempool_destroy(si->scsi_pkt_pool); | 2124 | mempool_destroy(si->scsi_pkt_pool); |
2123 | kfree(si); | 2125 | kfree(si); |
@@ -2166,7 +2168,8 @@ static int __init libfc_init(void) | |||
2166 | sizeof(struct fc_fcp_pkt), | 2168 | sizeof(struct fc_fcp_pkt), |
2167 | 0, SLAB_HWCACHE_ALIGN, NULL); | 2169 | 0, SLAB_HWCACHE_ALIGN, NULL); |
2168 | if (scsi_pkt_cachep == NULL) { | 2170 | if (scsi_pkt_cachep == NULL) { |
2169 | FC_DBG("Unable to allocate SRB cache...module load failed!"); | 2171 | printk(KERN_ERR "libfc: Unable to allocate SRB cache, " |
2172 | "module load failed!"); | ||
2170 | return -ENOMEM; | 2173 | return -ENOMEM; |
2171 | } | 2174 | } |
2172 | 2175 | ||
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e0c247724d2b..745fa5555d6a 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -101,14 +101,6 @@ | |||
101 | 101 | ||
102 | #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ | 102 | #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ |
103 | 103 | ||
104 | static int fc_lport_debug; | ||
105 | |||
106 | #define FC_DEBUG_LPORT(fmt...) \ | ||
107 | do { \ | ||
108 | if (fc_lport_debug) \ | ||
109 | FC_DBG(fmt); \ | ||
110 | } while (0) | ||
111 | |||
112 | static void fc_lport_error(struct fc_lport *, struct fc_frame *); | 104 | static void fc_lport_error(struct fc_lport *, struct fc_frame *); |
113 | 105 | ||
114 | static void fc_lport_enter_reset(struct fc_lport *); | 106 | static void fc_lport_enter_reset(struct fc_lport *); |
@@ -151,8 +143,8 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
151 | struct fc_rport *rport, | 143 | struct fc_rport *rport, |
152 | enum fc_rport_event event) | 144 | enum fc_rport_event event) |
153 | { | 145 | { |
154 | FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event, | 146 | FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, |
155 | rport->port_id); | 147 | rport->port_id); |
156 | 148 | ||
157 | switch (event) { | 149 | switch (event) { |
158 | case RPORT_EV_CREATED: | 150 | case RPORT_EV_CREATED: |
@@ -162,19 +154,19 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
162 | lport->dns_rp = rport; | 154 | lport->dns_rp = rport; |
163 | fc_lport_enter_rpn_id(lport); | 155 | fc_lport_enter_rpn_id(lport); |
164 | } else { | 156 | } else { |
165 | FC_DEBUG_LPORT("Received an CREATED event on " | 157 | FC_LPORT_DBG(lport, "Received an CREATED event " |
166 | "port (%6x) for the directory " | 158 | "on port (%6x) for the directory " |
167 | "server, but the lport is not " | 159 | "server, but the lport is not " |
168 | "in the DNS state, it's in the " | 160 | "in the DNS state, it's in the " |
169 | "%d state", rport->port_id, | 161 | "%d state", rport->port_id, |
170 | lport->state); | 162 | lport->state); |
171 | lport->tt.rport_logoff(rport); | 163 | lport->tt.rport_logoff(rport); |
172 | } | 164 | } |
173 | mutex_unlock(&lport->lp_mutex); | 165 | mutex_unlock(&lport->lp_mutex); |
174 | } else | 166 | } else |
175 | FC_DEBUG_LPORT("Received an event for port (%6x) " | 167 | FC_LPORT_DBG(lport, "Received an event for port (%6x) " |
176 | "which is not the directory server\n", | 168 | "which is not the directory server\n", |
177 | rport->port_id); | 169 | rport->port_id); |
178 | break; | 170 | break; |
179 | case RPORT_EV_LOGO: | 171 | case RPORT_EV_LOGO: |
180 | case RPORT_EV_FAILED: | 172 | case RPORT_EV_FAILED: |
@@ -185,9 +177,9 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
185 | mutex_unlock(&lport->lp_mutex); | 177 | mutex_unlock(&lport->lp_mutex); |
186 | 178 | ||
187 | } else | 179 | } else |
188 | FC_DEBUG_LPORT("Received an event for port (%6x) " | 180 | FC_LPORT_DBG(lport, "Received an event for port (%6x) " |
189 | "which is not the directory server\n", | 181 | "which is not the directory server\n", |
190 | rport->port_id); | 182 | rport->port_id); |
191 | break; | 183 | break; |
192 | case RPORT_EV_NONE: | 184 | case RPORT_EV_NONE: |
193 | break; | 185 | break; |
@@ -363,8 +355,8 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) | |||
363 | static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, | 355 | static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, |
364 | struct fc_lport *lport) | 356 | struct fc_lport *lport) |
365 | { | 357 | { |
366 | FC_DEBUG_LPORT("Received RLIR request while in state %s\n", | 358 | FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", |
367 | fc_lport_state(lport)); | 359 | fc_lport_state(lport)); |
368 | 360 | ||
369 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); | 361 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); |
370 | fc_frame_free(fp); | 362 | fc_frame_free(fp); |
@@ -389,8 +381,8 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
389 | void *dp; | 381 | void *dp; |
390 | u32 f_ctl; | 382 | u32 f_ctl; |
391 | 383 | ||
392 | FC_DEBUG_LPORT("Received RLIR request while in state %s\n", | 384 | FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", |
393 | fc_lport_state(lport)); | 385 | fc_lport_state(lport)); |
394 | 386 | ||
395 | len = fr_len(in_fp) - sizeof(struct fc_frame_header); | 387 | len = fr_len(in_fp) - sizeof(struct fc_frame_header); |
396 | pp = fc_frame_payload_get(in_fp, len); | 388 | pp = fc_frame_payload_get(in_fp, len); |
@@ -437,8 +429,8 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
437 | size_t len; | 429 | size_t len; |
438 | u32 f_ctl; | 430 | u32 f_ctl; |
439 | 431 | ||
440 | FC_DEBUG_LPORT("Received RNID request while in state %s\n", | 432 | FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", |
441 | fc_lport_state(lport)); | 433 | fc_lport_state(lport)); |
442 | 434 | ||
443 | req = fc_frame_payload_get(in_fp, sizeof(*req)); | 435 | req = fc_frame_payload_get(in_fp, sizeof(*req)); |
444 | if (!req) { | 436 | if (!req) { |
@@ -498,8 +490,8 @@ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
498 | size_t len; | 490 | size_t len; |
499 | u32 f_ctl; | 491 | u32 f_ctl; |
500 | 492 | ||
501 | FC_DEBUG_LPORT("Received ADISC request while in state %s\n", | 493 | FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n", |
502 | fc_lport_state(lport)); | 494 | fc_lport_state(lport)); |
503 | 495 | ||
504 | req = fc_frame_payload_get(in_fp, sizeof(*req)); | 496 | req = fc_frame_payload_get(in_fp, sizeof(*req)); |
505 | if (!req) { | 497 | if (!req) { |
@@ -574,8 +566,8 @@ EXPORT_SYMBOL(fc_fabric_login); | |||
574 | */ | 566 | */ |
575 | void fc_linkup(struct fc_lport *lport) | 567 | void fc_linkup(struct fc_lport *lport) |
576 | { | 568 | { |
577 | FC_DEBUG_LPORT("Link is up for port (%6x)\n", | 569 | printk(KERN_INFO "libfc: Link up on port (%6x)\n", |
578 | fc_host_port_id(lport->host)); | 570 | fc_host_port_id(lport->host)); |
579 | 571 | ||
580 | mutex_lock(&lport->lp_mutex); | 572 | mutex_lock(&lport->lp_mutex); |
581 | if (!lport->link_up) { | 573 | if (!lport->link_up) { |
@@ -595,8 +587,8 @@ EXPORT_SYMBOL(fc_linkup); | |||
595 | void fc_linkdown(struct fc_lport *lport) | 587 | void fc_linkdown(struct fc_lport *lport) |
596 | { | 588 | { |
597 | mutex_lock(&lport->lp_mutex); | 589 | mutex_lock(&lport->lp_mutex); |
598 | FC_DEBUG_LPORT("Link is down for port (%6x)\n", | 590 | printk(KERN_INFO "libfc: Link down on port (%6x)\n", |
599 | fc_host_port_id(lport->host)); | 591 | fc_host_port_id(lport->host)); |
600 | 592 | ||
601 | if (lport->link_up) { | 593 | if (lport->link_up) { |
602 | lport->link_up = 0; | 594 | lport->link_up = 0; |
@@ -701,12 +693,11 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) | |||
701 | { | 693 | { |
702 | switch (event) { | 694 | switch (event) { |
703 | case DISC_EV_SUCCESS: | 695 | case DISC_EV_SUCCESS: |
704 | FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n", | 696 | FC_LPORT_DBG(lport, "Discovery succeeded\n"); |
705 | fc_host_port_id(lport->host)); | ||
706 | break; | 697 | break; |
707 | case DISC_EV_FAILED: | 698 | case DISC_EV_FAILED: |
708 | FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n", | 699 | printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n", |
709 | fc_host_port_id(lport->host)); | 700 | fc_host_port_id(lport->host)); |
710 | mutex_lock(&lport->lp_mutex); | 701 | mutex_lock(&lport->lp_mutex); |
711 | fc_lport_enter_reset(lport); | 702 | fc_lport_enter_reset(lport); |
712 | mutex_unlock(&lport->lp_mutex); | 703 | mutex_unlock(&lport->lp_mutex); |
@@ -726,8 +717,8 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) | |||
726 | */ | 717 | */ |
727 | static void fc_lport_enter_ready(struct fc_lport *lport) | 718 | static void fc_lport_enter_ready(struct fc_lport *lport) |
728 | { | 719 | { |
729 | FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n", | 720 | FC_LPORT_DBG(lport, "Entered READY from state %s\n", |
730 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 721 | fc_lport_state(lport)); |
731 | 722 | ||
732 | fc_lport_state_enter(lport, LPORT_ST_READY); | 723 | fc_lport_state_enter(lport, LPORT_ST_READY); |
733 | 724 | ||
@@ -762,8 +753,8 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, | |||
762 | u32 local_fid; | 753 | u32 local_fid; |
763 | u32 f_ctl; | 754 | u32 f_ctl; |
764 | 755 | ||
765 | FC_DEBUG_LPORT("Received FLOGI request while in state %s\n", | 756 | FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", |
766 | fc_lport_state(lport)); | 757 | fc_lport_state(lport)); |
767 | 758 | ||
768 | fh = fc_frame_header_get(rx_fp); | 759 | fh = fc_frame_header_get(rx_fp); |
769 | remote_fid = ntoh24(fh->fh_s_id); | 760 | remote_fid = ntoh24(fh->fh_s_id); |
@@ -772,12 +763,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, | |||
772 | goto out; | 763 | goto out; |
773 | remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); | 764 | remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); |
774 | if (remote_wwpn == lport->wwpn) { | 765 | if (remote_wwpn == lport->wwpn) { |
775 | FC_DBG("FLOGI from port with same WWPN %llx " | 766 | printk(KERN_WARNING "libfc: Received FLOGI from port " |
776 | "possible configuration error\n", | 767 | "with same WWPN %llx\n", remote_wwpn); |
777 | (unsigned long long)remote_wwpn); | ||
778 | goto out; | 768 | goto out; |
779 | } | 769 | } |
780 | FC_DBG("FLOGI from port WWPN %llx\n", (unsigned long long)remote_wwpn); | 770 | FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); |
781 | 771 | ||
782 | /* | 772 | /* |
783 | * XXX what is the right thing to do for FIDs? | 773 | * XXX what is the right thing to do for FIDs? |
@@ -909,7 +899,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, | |||
909 | } | 899 | } |
910 | } | 900 | } |
911 | } else { | 901 | } else { |
912 | FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp)); | 902 | FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", |
903 | fr_eof(fp)); | ||
913 | fc_frame_free(fp); | 904 | fc_frame_free(fp); |
914 | } | 905 | } |
915 | mutex_unlock(&lport->lp_mutex); | 906 | mutex_unlock(&lport->lp_mutex); |
@@ -947,8 +938,8 @@ EXPORT_SYMBOL(fc_lport_reset); | |||
947 | */ | 938 | */ |
948 | static void fc_lport_enter_reset(struct fc_lport *lport) | 939 | static void fc_lport_enter_reset(struct fc_lport *lport) |
949 | { | 940 | { |
950 | FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n", | 941 | FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", |
951 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 942 | fc_lport_state(lport)); |
952 | 943 | ||
953 | fc_lport_state_enter(lport, LPORT_ST_RESET); | 944 | fc_lport_state_enter(lport, LPORT_ST_RESET); |
954 | 945 | ||
@@ -982,9 +973,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport) | |||
982 | static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) | 973 | static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) |
983 | { | 974 | { |
984 | unsigned long delay = 0; | 975 | unsigned long delay = 0; |
985 | FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n", | 976 | FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", |
986 | PTR_ERR(fp), fc_lport_state(lport), | 977 | PTR_ERR(fp), fc_lport_state(lport), |
987 | lport->retry_count); | 978 | lport->retry_count); |
988 | 979 | ||
989 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { | 980 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { |
990 | /* | 981 | /* |
@@ -1040,11 +1031,11 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1040 | 1031 | ||
1041 | mutex_lock(&lport->lp_mutex); | 1032 | mutex_lock(&lport->lp_mutex); |
1042 | 1033 | ||
1043 | FC_DEBUG_LPORT("Received a RFT_ID response\n"); | 1034 | FC_LPORT_DBG(lport, "Received a RFT_ID response\n"); |
1044 | 1035 | ||
1045 | if (lport->state != LPORT_ST_RFT_ID) { | 1036 | if (lport->state != LPORT_ST_RFT_ID) { |
1046 | FC_DBG("Received a RFT_ID response, but in state %s\n", | 1037 | FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " |
1047 | fc_lport_state(lport)); | 1038 | "%s\n", fc_lport_state(lport)); |
1048 | if (IS_ERR(fp)) | 1039 | if (IS_ERR(fp)) |
1049 | goto err; | 1040 | goto err; |
1050 | goto out; | 1041 | goto out; |
@@ -1094,11 +1085,11 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1094 | 1085 | ||
1095 | mutex_lock(&lport->lp_mutex); | 1086 | mutex_lock(&lport->lp_mutex); |
1096 | 1087 | ||
1097 | FC_DEBUG_LPORT("Received a RPN_ID response\n"); | 1088 | FC_LPORT_DBG(lport, "Received a RPN_ID response\n"); |
1098 | 1089 | ||
1099 | if (lport->state != LPORT_ST_RPN_ID) { | 1090 | if (lport->state != LPORT_ST_RPN_ID) { |
1100 | FC_DBG("Received a RPN_ID response, but in state %s\n", | 1091 | FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " |
1101 | fc_lport_state(lport)); | 1092 | "%s\n", fc_lport_state(lport)); |
1102 | if (IS_ERR(fp)) | 1093 | if (IS_ERR(fp)) |
1103 | goto err; | 1094 | goto err; |
1104 | goto out; | 1095 | goto out; |
@@ -1146,11 +1137,11 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1146 | 1137 | ||
1147 | mutex_lock(&lport->lp_mutex); | 1138 | mutex_lock(&lport->lp_mutex); |
1148 | 1139 | ||
1149 | FC_DEBUG_LPORT("Received a SCR response\n"); | 1140 | FC_LPORT_DBG(lport, "Received a SCR response\n"); |
1150 | 1141 | ||
1151 | if (lport->state != LPORT_ST_SCR) { | 1142 | if (lport->state != LPORT_ST_SCR) { |
1152 | FC_DBG("Received a SCR response, but in state %s\n", | 1143 | FC_LPORT_DBG(lport, "Received a SCR response, but in state " |
1153 | fc_lport_state(lport)); | 1144 | "%s\n", fc_lport_state(lport)); |
1154 | if (IS_ERR(fp)) | 1145 | if (IS_ERR(fp)) |
1155 | goto err; | 1146 | goto err; |
1156 | goto out; | 1147 | goto out; |
@@ -1184,8 +1175,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport) | |||
1184 | { | 1175 | { |
1185 | struct fc_frame *fp; | 1176 | struct fc_frame *fp; |
1186 | 1177 | ||
1187 | FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n", | 1178 | FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", |
1188 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1179 | fc_lport_state(lport)); |
1189 | 1180 | ||
1190 | fc_lport_state_enter(lport, LPORT_ST_SCR); | 1181 | fc_lport_state_enter(lport, LPORT_ST_SCR); |
1191 | 1182 | ||
@@ -1213,8 +1204,8 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) | |||
1213 | struct fc_ns_fts *lps; | 1204 | struct fc_ns_fts *lps; |
1214 | int i; | 1205 | int i; |
1215 | 1206 | ||
1216 | FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n", | 1207 | FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n", |
1217 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1208 | fc_lport_state(lport)); |
1218 | 1209 | ||
1219 | fc_lport_state_enter(lport, LPORT_ST_RFT_ID); | 1210 | fc_lport_state_enter(lport, LPORT_ST_RFT_ID); |
1220 | 1211 | ||
@@ -1253,8 +1244,8 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport) | |||
1253 | { | 1244 | { |
1254 | struct fc_frame *fp; | 1245 | struct fc_frame *fp; |
1255 | 1246 | ||
1256 | FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n", | 1247 | FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n", |
1257 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1248 | fc_lport_state(lport)); |
1258 | 1249 | ||
1259 | fc_lport_state_enter(lport, LPORT_ST_RPN_ID); | 1250 | fc_lport_state_enter(lport, LPORT_ST_RPN_ID); |
1260 | 1251 | ||
@@ -1294,8 +1285,8 @@ static void fc_lport_enter_dns(struct fc_lport *lport) | |||
1294 | dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; | 1285 | dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; |
1295 | dp.lp = lport; | 1286 | dp.lp = lport; |
1296 | 1287 | ||
1297 | FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n", | 1288 | FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", |
1298 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1289 | fc_lport_state(lport)); |
1299 | 1290 | ||
1300 | fc_lport_state_enter(lport, LPORT_ST_DNS); | 1291 | fc_lport_state_enter(lport, LPORT_ST_DNS); |
1301 | 1292 | ||
@@ -1374,11 +1365,11 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1374 | 1365 | ||
1375 | mutex_lock(&lport->lp_mutex); | 1366 | mutex_lock(&lport->lp_mutex); |
1376 | 1367 | ||
1377 | FC_DEBUG_LPORT("Received a LOGO response\n"); | 1368 | FC_LPORT_DBG(lport, "Received a LOGO response\n"); |
1378 | 1369 | ||
1379 | if (lport->state != LPORT_ST_LOGO) { | 1370 | if (lport->state != LPORT_ST_LOGO) { |
1380 | FC_DBG("Received a LOGO response, but in state %s\n", | 1371 | FC_LPORT_DBG(lport, "Received a LOGO response, but in state " |
1381 | fc_lport_state(lport)); | 1372 | "%s\n", fc_lport_state(lport)); |
1382 | if (IS_ERR(fp)) | 1373 | if (IS_ERR(fp)) |
1383 | goto err; | 1374 | goto err; |
1384 | goto out; | 1375 | goto out; |
@@ -1413,8 +1404,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport) | |||
1413 | struct fc_frame *fp; | 1404 | struct fc_frame *fp; |
1414 | struct fc_els_logo *logo; | 1405 | struct fc_els_logo *logo; |
1415 | 1406 | ||
1416 | FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n", | 1407 | FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", |
1417 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1408 | fc_lport_state(lport)); |
1418 | 1409 | ||
1419 | fc_lport_state_enter(lport, LPORT_ST_LOGO); | 1410 | fc_lport_state_enter(lport, LPORT_ST_LOGO); |
1420 | 1411 | ||
@@ -1456,11 +1447,11 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1456 | 1447 | ||
1457 | mutex_lock(&lport->lp_mutex); | 1448 | mutex_lock(&lport->lp_mutex); |
1458 | 1449 | ||
1459 | FC_DEBUG_LPORT("Received a FLOGI response\n"); | 1450 | FC_LPORT_DBG(lport, "Received a FLOGI response\n"); |
1460 | 1451 | ||
1461 | if (lport->state != LPORT_ST_FLOGI) { | 1452 | if (lport->state != LPORT_ST_FLOGI) { |
1462 | FC_DBG("Received a FLOGI response, but in state %s\n", | 1453 | FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " |
1463 | fc_lport_state(lport)); | 1454 | "%s\n", fc_lport_state(lport)); |
1464 | if (IS_ERR(fp)) | 1455 | if (IS_ERR(fp)) |
1465 | goto err; | 1456 | goto err; |
1466 | goto out; | 1457 | goto out; |
@@ -1475,7 +1466,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1475 | did = ntoh24(fh->fh_d_id); | 1466 | did = ntoh24(fh->fh_d_id); |
1476 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { | 1467 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { |
1477 | 1468 | ||
1478 | FC_DEBUG_LPORT("Assigned fid %x\n", did); | 1469 | printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n", |
1470 | did); | ||
1479 | fc_host_port_id(lport->host) = did; | 1471 | fc_host_port_id(lport->host) = did; |
1480 | 1472 | ||
1481 | flp = fc_frame_payload_get(fp, sizeof(*flp)); | 1473 | flp = fc_frame_payload_get(fp, sizeof(*flp)); |
@@ -1494,7 +1486,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1494 | if (e_d_tov > lport->e_d_tov) | 1486 | if (e_d_tov > lport->e_d_tov) |
1495 | lport->e_d_tov = e_d_tov; | 1487 | lport->e_d_tov = e_d_tov; |
1496 | lport->r_a_tov = 2 * e_d_tov; | 1488 | lport->r_a_tov = 2 * e_d_tov; |
1497 | FC_DBG("Point-to-Point mode\n"); | 1489 | printk(KERN_INFO "libfc: Port (%6x) entered " |
1490 | "point to point mode\n", did); | ||
1498 | fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), | 1491 | fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), |
1499 | get_unaligned_be64( | 1492 | get_unaligned_be64( |
1500 | &flp->fl_wwpn), | 1493 | &flp->fl_wwpn), |
@@ -1517,7 +1510,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1517 | } | 1510 | } |
1518 | } | 1511 | } |
1519 | } else { | 1512 | } else { |
1520 | FC_DBG("bad FLOGI response\n"); | 1513 | FC_LPORT_DBG(lport, "Bad FLOGI response\n"); |
1521 | } | 1514 | } |
1522 | 1515 | ||
1523 | out: | 1516 | out: |
@@ -1537,7 +1530,8 @@ void fc_lport_enter_flogi(struct fc_lport *lport) | |||
1537 | { | 1530 | { |
1538 | struct fc_frame *fp; | 1531 | struct fc_frame *fp; |
1539 | 1532 | ||
1540 | FC_DEBUG_LPORT("Processing FLOGI state\n"); | 1533 | FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", |
1534 | fc_lport_state(lport)); | ||
1541 | 1535 | ||
1542 | fc_lport_state_enter(lport, LPORT_ST_FLOGI); | 1536 | fc_lport_state_enter(lport, LPORT_ST_FLOGI); |
1543 | 1537 | ||
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 7bfbff7e0efb..7162385f52eb 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -55,14 +55,6 @@ | |||
55 | #include <scsi/libfc.h> | 55 | #include <scsi/libfc.h> |
56 | #include <scsi/fc_encode.h> | 56 | #include <scsi/fc_encode.h> |
57 | 57 | ||
58 | static int fc_rport_debug; | ||
59 | |||
60 | #define FC_DEBUG_RPORT(fmt...) \ | ||
61 | do { \ | ||
62 | if (fc_rport_debug) \ | ||
63 | FC_DBG(fmt); \ | ||
64 | } while (0) | ||
65 | |||
66 | struct workqueue_struct *rport_event_queue; | 58 | struct workqueue_struct *rport_event_queue; |
67 | 59 | ||
68 | static void fc_rport_enter_plogi(struct fc_rport *); | 60 | static void fc_rport_enter_plogi(struct fc_rport *); |
@@ -97,7 +89,7 @@ static const char *fc_rport_state_names[] = { | |||
97 | static void fc_rport_rogue_destroy(struct device *dev) | 89 | static void fc_rport_rogue_destroy(struct device *dev) |
98 | { | 90 | { |
99 | struct fc_rport *rport = dev_to_rport(dev); | 91 | struct fc_rport *rport = dev_to_rport(dev); |
100 | FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id); | 92 | FC_RPORT_DBG(rport, "Destroying rogue rport\n"); |
101 | kfree(rport); | 93 | kfree(rport); |
102 | } | 94 | } |
103 | 95 | ||
@@ -263,8 +255,8 @@ static void fc_rport_work(struct work_struct *work) | |||
263 | 255 | ||
264 | fc_rport_state_enter(new_rport, RPORT_ST_READY); | 256 | fc_rport_state_enter(new_rport, RPORT_ST_READY); |
265 | } else { | 257 | } else { |
266 | FC_DBG("Failed to create the rport for port " | 258 | printk(KERN_WARNING "libfc: Failed to allocate " |
267 | "(%6x).\n", ids.port_id); | 259 | " memory for rport (%6x)\n", ids.port_id); |
268 | event = RPORT_EV_FAILED; | 260 | event = RPORT_EV_FAILED; |
269 | } | 261 | } |
270 | if (rport->port_id != FC_FID_DIR_SERV) | 262 | if (rport->port_id != FC_FID_DIR_SERV) |
@@ -309,7 +301,7 @@ int fc_rport_login(struct fc_rport *rport) | |||
309 | 301 | ||
310 | mutex_lock(&rdata->rp_mutex); | 302 | mutex_lock(&rdata->rp_mutex); |
311 | 303 | ||
312 | FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id); | 304 | FC_RPORT_DBG(rport, "Login to port\n"); |
313 | 305 | ||
314 | fc_rport_enter_plogi(rport); | 306 | fc_rport_enter_plogi(rport); |
315 | 307 | ||
@@ -329,16 +321,13 @@ int fc_rport_login(struct fc_rport *rport) | |||
329 | int fc_rport_logoff(struct fc_rport *rport) | 321 | int fc_rport_logoff(struct fc_rport *rport) |
330 | { | 322 | { |
331 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 323 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
332 | struct fc_lport *lport = rdata->local_port; | ||
333 | 324 | ||
334 | mutex_lock(&rdata->rp_mutex); | 325 | mutex_lock(&rdata->rp_mutex); |
335 | 326 | ||
336 | FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id); | 327 | FC_RPORT_DBG(rport, "Remove port\n"); |
337 | 328 | ||
338 | if (rdata->rp_state == RPORT_ST_NONE) { | 329 | if (rdata->rp_state == RPORT_ST_NONE) { |
339 | FC_DEBUG_RPORT("(%6x): Port (%6x) in NONE state," | 330 | FC_RPORT_DBG(rport, "Port in NONE state, not removing\n"); |
340 | " not removing", fc_host_port_id(lport->host), | ||
341 | rport->port_id); | ||
342 | mutex_unlock(&rdata->rp_mutex); | 331 | mutex_unlock(&rdata->rp_mutex); |
343 | goto out; | 332 | goto out; |
344 | } | 333 | } |
@@ -379,7 +368,7 @@ static void fc_rport_enter_ready(struct fc_rport *rport) | |||
379 | 368 | ||
380 | fc_rport_state_enter(rport, RPORT_ST_READY); | 369 | fc_rport_state_enter(rport, RPORT_ST_READY); |
381 | 370 | ||
382 | FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id); | 371 | FC_RPORT_DBG(rport, "Port is Ready\n"); |
383 | 372 | ||
384 | rdata->event = RPORT_EV_CREATED; | 373 | rdata->event = RPORT_EV_CREATED; |
385 | queue_work(rport_event_queue, &rdata->event_work); | 374 | queue_work(rport_event_queue, &rdata->event_work); |
@@ -436,8 +425,8 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) | |||
436 | { | 425 | { |
437 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 426 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
438 | 427 | ||
439 | FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n", | 428 | FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n", |
440 | PTR_ERR(fp), fc_rport_state(rport), rdata->retries); | 429 | PTR_ERR(fp), fc_rport_state(rport), rdata->retries); |
441 | 430 | ||
442 | switch (rdata->rp_state) { | 431 | switch (rdata->rp_state) { |
443 | case RPORT_ST_PLOGI: | 432 | case RPORT_ST_PLOGI: |
@@ -479,8 +468,8 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) | |||
479 | return fc_rport_error(rport, fp); | 468 | return fc_rport_error(rport, fp); |
480 | 469 | ||
481 | if (rdata->retries < rdata->local_port->max_rport_retry_count) { | 470 | if (rdata->retries < rdata->local_port->max_rport_retry_count) { |
482 | FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", | 471 | FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n", |
483 | PTR_ERR(fp), fc_rport_state(rport)); | 472 | PTR_ERR(fp), fc_rport_state(rport)); |
484 | rdata->retries++; | 473 | rdata->retries++; |
485 | /* no additional delay on exchange timeouts */ | 474 | /* no additional delay on exchange timeouts */ |
486 | if (PTR_ERR(fp) == -FC_EX_TIMEOUT) | 475 | if (PTR_ERR(fp) == -FC_EX_TIMEOUT) |
@@ -517,12 +506,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
517 | 506 | ||
518 | mutex_lock(&rdata->rp_mutex); | 507 | mutex_lock(&rdata->rp_mutex); |
519 | 508 | ||
520 | FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n", | 509 | FC_RPORT_DBG(rport, "Received a PLOGI response\n"); |
521 | rport->port_id); | ||
522 | 510 | ||
523 | if (rdata->rp_state != RPORT_ST_PLOGI) { | 511 | if (rdata->rp_state != RPORT_ST_PLOGI) { |
524 | FC_DBG("Received a PLOGI response, but in state %s\n", | 512 | FC_RPORT_DBG(rport, "Received a PLOGI response, but in state " |
525 | fc_rport_state(rport)); | 513 | "%s\n", fc_rport_state(rport)); |
526 | if (IS_ERR(fp)) | 514 | if (IS_ERR(fp)) |
527 | goto err; | 515 | goto err; |
528 | goto out; | 516 | goto out; |
@@ -583,8 +571,8 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) | |||
583 | struct fc_lport *lport = rdata->local_port; | 571 | struct fc_lport *lport = rdata->local_port; |
584 | struct fc_frame *fp; | 572 | struct fc_frame *fp; |
585 | 573 | ||
586 | FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n", | 574 | FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n", |
587 | rport->port_id, fc_rport_state(rport)); | 575 | fc_rport_state(rport)); |
588 | 576 | ||
589 | fc_rport_state_enter(rport, RPORT_ST_PLOGI); | 577 | fc_rport_state_enter(rport, RPORT_ST_PLOGI); |
590 | 578 | ||
@@ -628,12 +616,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
628 | 616 | ||
629 | mutex_lock(&rdata->rp_mutex); | 617 | mutex_lock(&rdata->rp_mutex); |
630 | 618 | ||
631 | FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n", | 619 | FC_RPORT_DBG(rport, "Received a PRLI response\n"); |
632 | rport->port_id); | ||
633 | 620 | ||
634 | if (rdata->rp_state != RPORT_ST_PRLI) { | 621 | if (rdata->rp_state != RPORT_ST_PRLI) { |
635 | FC_DBG("Received a PRLI response, but in state %s\n", | 622 | FC_RPORT_DBG(rport, "Received a PRLI response, but in state " |
636 | fc_rport_state(rport)); | 623 | "%s\n", fc_rport_state(rport)); |
637 | if (IS_ERR(fp)) | 624 | if (IS_ERR(fp)) |
638 | goto err; | 625 | goto err; |
639 | goto out; | 626 | goto out; |
@@ -663,7 +650,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
663 | fc_rport_enter_rtv(rport); | 650 | fc_rport_enter_rtv(rport); |
664 | 651 | ||
665 | } else { | 652 | } else { |
666 | FC_DBG("Bad ELS response\n"); | 653 | FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n"); |
667 | rdata->event = RPORT_EV_FAILED; | 654 | rdata->event = RPORT_EV_FAILED; |
668 | fc_rport_state_enter(rport, RPORT_ST_NONE); | 655 | fc_rport_state_enter(rport, RPORT_ST_NONE); |
669 | queue_work(rport_event_queue, &rdata->event_work); | 656 | queue_work(rport_event_queue, &rdata->event_work); |
@@ -695,12 +682,11 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
695 | 682 | ||
696 | mutex_lock(&rdata->rp_mutex); | 683 | mutex_lock(&rdata->rp_mutex); |
697 | 684 | ||
698 | FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n", | 685 | FC_RPORT_DBG(rport, "Received a LOGO response\n"); |
699 | rport->port_id); | ||
700 | 686 | ||
701 | if (rdata->rp_state != RPORT_ST_LOGO) { | 687 | if (rdata->rp_state != RPORT_ST_LOGO) { |
702 | FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n", | 688 | FC_RPORT_DBG(rport, "Received a LOGO response, but in state " |
703 | fc_rport_state(rport)); | 689 | "%s\n", fc_rport_state(rport)); |
704 | if (IS_ERR(fp)) | 690 | if (IS_ERR(fp)) |
705 | goto err; | 691 | goto err; |
706 | goto out; | 692 | goto out; |
@@ -715,7 +701,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
715 | if (op == ELS_LS_ACC) { | 701 | if (op == ELS_LS_ACC) { |
716 | fc_rport_enter_rtv(rport); | 702 | fc_rport_enter_rtv(rport); |
717 | } else { | 703 | } else { |
718 | FC_DBG("Bad ELS response\n"); | 704 | FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n"); |
719 | rdata->event = RPORT_EV_LOGO; | 705 | rdata->event = RPORT_EV_LOGO; |
720 | fc_rport_state_enter(rport, RPORT_ST_NONE); | 706 | fc_rport_state_enter(rport, RPORT_ST_NONE); |
721 | queue_work(rport_event_queue, &rdata->event_work); | 707 | queue_work(rport_event_queue, &rdata->event_work); |
@@ -745,8 +731,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport) | |||
745 | } *pp; | 731 | } *pp; |
746 | struct fc_frame *fp; | 732 | struct fc_frame *fp; |
747 | 733 | ||
748 | FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n", | 734 | FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n", |
749 | rport->port_id, fc_rport_state(rport)); | 735 | fc_rport_state(rport)); |
750 | 736 | ||
751 | fc_rport_state_enter(rport, RPORT_ST_PRLI); | 737 | fc_rport_state_enter(rport, RPORT_ST_PRLI); |
752 | 738 | ||
@@ -784,12 +770,11 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
784 | 770 | ||
785 | mutex_lock(&rdata->rp_mutex); | 771 | mutex_lock(&rdata->rp_mutex); |
786 | 772 | ||
787 | FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n", | 773 | FC_RPORT_DBG(rport, "Received a RTV response\n"); |
788 | rport->port_id); | ||
789 | 774 | ||
790 | if (rdata->rp_state != RPORT_ST_RTV) { | 775 | if (rdata->rp_state != RPORT_ST_RTV) { |
791 | FC_DBG("Received a RTV response, but in state %s\n", | 776 | FC_RPORT_DBG(rport, "Received a RTV response, but in state " |
792 | fc_rport_state(rport)); | 777 | "%s\n", fc_rport_state(rport)); |
793 | if (IS_ERR(fp)) | 778 | if (IS_ERR(fp)) |
794 | goto err; | 779 | goto err; |
795 | goto out; | 780 | goto out; |
@@ -844,8 +829,8 @@ static void fc_rport_enter_rtv(struct fc_rport *rport) | |||
844 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 829 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
845 | struct fc_lport *lport = rdata->local_port; | 830 | struct fc_lport *lport = rdata->local_port; |
846 | 831 | ||
847 | FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n", | 832 | FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n", |
848 | rport->port_id, fc_rport_state(rport)); | 833 | fc_rport_state(rport)); |
849 | 834 | ||
850 | fc_rport_state_enter(rport, RPORT_ST_RTV); | 835 | fc_rport_state_enter(rport, RPORT_ST_RTV); |
851 | 836 | ||
@@ -875,8 +860,8 @@ static void fc_rport_enter_logo(struct fc_rport *rport) | |||
875 | struct fc_lport *lport = rdata->local_port; | 860 | struct fc_lport *lport = rdata->local_port; |
876 | struct fc_frame *fp; | 861 | struct fc_frame *fp; |
877 | 862 | ||
878 | FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n", | 863 | FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n", |
879 | rport->port_id, fc_rport_state(rport)); | 864 | fc_rport_state(rport)); |
880 | 865 | ||
881 | fc_rport_state_enter(rport, RPORT_ST_LOGO); | 866 | fc_rport_state_enter(rport, RPORT_ST_LOGO); |
882 | 867 | ||
@@ -983,14 +968,13 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |||
983 | 968 | ||
984 | fh = fc_frame_header_get(fp); | 969 | fh = fc_frame_header_get(fp); |
985 | 970 | ||
986 | FC_DEBUG_RPORT("Received PLOGI request from port (%6x) " | 971 | FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n", |
987 | "while in state %s\n", ntoh24(fh->fh_s_id), | 972 | fc_rport_state(rport)); |
988 | fc_rport_state(rport)); | ||
989 | 973 | ||
990 | sid = ntoh24(fh->fh_s_id); | 974 | sid = ntoh24(fh->fh_s_id); |
991 | pl = fc_frame_payload_get(fp, sizeof(*pl)); | 975 | pl = fc_frame_payload_get(fp, sizeof(*pl)); |
992 | if (!pl) { | 976 | if (!pl) { |
993 | FC_DBG("incoming PLOGI from %x too short\n", sid); | 977 | FC_RPORT_DBG(rport, "Received PLOGI too short\n"); |
994 | WARN_ON(1); | 978 | WARN_ON(1); |
995 | /* XXX TBD: send reject? */ | 979 | /* XXX TBD: send reject? */ |
996 | fc_frame_free(fp); | 980 | fc_frame_free(fp); |
@@ -1012,26 +996,26 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |||
1012 | */ | 996 | */ |
1013 | switch (rdata->rp_state) { | 997 | switch (rdata->rp_state) { |
1014 | case RPORT_ST_INIT: | 998 | case RPORT_ST_INIT: |
1015 | FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT " | 999 | FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT " |
1016 | "- reject\n", sid, (unsigned long long)wwpn); | 1000 | "- reject\n", (unsigned long long)wwpn); |
1017 | reject = ELS_RJT_UNSUP; | 1001 | reject = ELS_RJT_UNSUP; |
1018 | break; | 1002 | break; |
1019 | case RPORT_ST_PLOGI: | 1003 | case RPORT_ST_PLOGI: |
1020 | FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n", | 1004 | FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n", |
1021 | sid, rdata->rp_state); | 1005 | rdata->rp_state); |
1022 | if (wwpn < lport->wwpn) | 1006 | if (wwpn < lport->wwpn) |
1023 | reject = ELS_RJT_INPROG; | 1007 | reject = ELS_RJT_INPROG; |
1024 | break; | 1008 | break; |
1025 | case RPORT_ST_PRLI: | 1009 | case RPORT_ST_PRLI: |
1026 | case RPORT_ST_READY: | 1010 | case RPORT_ST_READY: |
1027 | FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d " | 1011 | FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d " |
1028 | "- ignored for now\n", sid, rdata->rp_state); | 1012 | "- ignored for now\n", rdata->rp_state); |
1029 | /* XXX TBD - should reset */ | 1013 | /* XXX TBD - should reset */ |
1030 | break; | 1014 | break; |
1031 | case RPORT_ST_NONE: | 1015 | case RPORT_ST_NONE: |
1032 | default: | 1016 | default: |
1033 | FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected " | 1017 | FC_RPORT_DBG(rport, "Received PLOGI in unexpected " |
1034 | "state %d\n", sid, rdata->rp_state); | 1018 | "state %d\n", rdata->rp_state); |
1035 | fc_frame_free(fp); | 1019 | fc_frame_free(fp); |
1036 | return; | 1020 | return; |
1037 | break; | 1021 | break; |
@@ -1115,9 +1099,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, | |||
1115 | 1099 | ||
1116 | fh = fc_frame_header_get(rx_fp); | 1100 | fh = fc_frame_header_get(rx_fp); |
1117 | 1101 | ||
1118 | FC_DEBUG_RPORT("Received PRLI request from port (%6x) " | 1102 | FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n", |
1119 | "while in state %s\n", ntoh24(fh->fh_s_id), | 1103 | fc_rport_state(rport)); |
1120 | fc_rport_state(rport)); | ||
1121 | 1104 | ||
1122 | switch (rdata->rp_state) { | 1105 | switch (rdata->rp_state) { |
1123 | case RPORT_ST_PRLI: | 1106 | case RPORT_ST_PRLI: |
@@ -1252,9 +1235,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, | |||
1252 | 1235 | ||
1253 | fh = fc_frame_header_get(fp); | 1236 | fh = fc_frame_header_get(fp); |
1254 | 1237 | ||
1255 | FC_DEBUG_RPORT("Received PRLO request from port (%6x) " | 1238 | FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n", |
1256 | "while in state %s\n", ntoh24(fh->fh_s_id), | 1239 | fc_rport_state(rport)); |
1257 | fc_rport_state(rport)); | ||
1258 | 1240 | ||
1259 | if (rdata->rp_state == RPORT_ST_NONE) { | 1241 | if (rdata->rp_state == RPORT_ST_NONE) { |
1260 | fc_frame_free(fp); | 1242 | fc_frame_free(fp); |
@@ -1286,9 +1268,8 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, | |||
1286 | 1268 | ||
1287 | fh = fc_frame_header_get(fp); | 1269 | fh = fc_frame_header_get(fp); |
1288 | 1270 | ||
1289 | FC_DEBUG_RPORT("Received LOGO request from port (%6x) " | 1271 | FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n", |
1290 | "while in state %s\n", ntoh24(fh->fh_s_id), | 1272 | fc_rport_state(rport)); |
1291 | fc_rport_state(rport)); | ||
1292 | 1273 | ||
1293 | if (rdata->rp_state == RPORT_ST_NONE) { | 1274 | if (rdata->rp_state == RPORT_ST_NONE) { |
1294 | fc_frame_free(fp); | 1275 | fc_frame_free(fp); |
@@ -1308,7 +1289,6 @@ static void fc_rport_flush_queue(void) | |||
1308 | flush_workqueue(rport_event_queue); | 1289 | flush_workqueue(rport_event_queue); |
1309 | } | 1290 | } |
1310 | 1291 | ||
1311 | |||
1312 | int fc_rport_init(struct fc_lport *lport) | 1292 | int fc_rport_init(struct fc_lport *lport) |
1313 | { | 1293 | { |
1314 | if (!lport->tt.rport_create) | 1294 | if (!lport->tt.rport_create) |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 59908aead531..716cc344c5df 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -38,15 +38,30 @@ | |||
38 | #include <scsi/scsi_transport_iscsi.h> | 38 | #include <scsi/scsi_transport_iscsi.h> |
39 | #include <scsi/libiscsi.h> | 39 | #include <scsi/libiscsi.h> |
40 | 40 | ||
41 | static int iscsi_dbg_lib; | 41 | static int iscsi_dbg_lib_conn; |
42 | module_param_named(debug_libiscsi, iscsi_dbg_lib, int, S_IRUGO | S_IWUSR); | 42 | module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int, |
43 | MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. " | 43 | S_IRUGO | S_IWUSR); |
44 | "Set to 1 to turn on, and zero to turn off. Default " | 44 | MODULE_PARM_DESC(debug_libiscsi_conn, |
45 | "is off."); | 45 | "Turn on debugging for connections in libiscsi module. " |
46 | "Set to 1 to turn on, and zero to turn off. Default is off."); | ||
47 | |||
48 | static int iscsi_dbg_lib_session; | ||
49 | module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int, | ||
50 | S_IRUGO | S_IWUSR); | ||
51 | MODULE_PARM_DESC(debug_libiscsi_session, | ||
52 | "Turn on debugging for sessions in libiscsi module. " | ||
53 | "Set to 1 to turn on, and zero to turn off. Default is off."); | ||
54 | |||
55 | static int iscsi_dbg_lib_eh; | ||
56 | module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int, | ||
57 | S_IRUGO | S_IWUSR); | ||
58 | MODULE_PARM_DESC(debug_libiscsi_eh, | ||
59 | "Turn on debugging for error handling in libiscsi module. " | ||
60 | "Set to 1 to turn on, and zero to turn off. Default is off."); | ||
46 | 61 | ||
47 | #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ | 62 | #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ |
48 | do { \ | 63 | do { \ |
49 | if (iscsi_dbg_lib) \ | 64 | if (iscsi_dbg_lib_conn) \ |
50 | iscsi_conn_printk(KERN_INFO, _conn, \ | 65 | iscsi_conn_printk(KERN_INFO, _conn, \ |
51 | "%s " dbg_fmt, \ | 66 | "%s " dbg_fmt, \ |
52 | __func__, ##arg); \ | 67 | __func__, ##arg); \ |
@@ -54,7 +69,15 @@ MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. " | |||
54 | 69 | ||
55 | #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ | 70 | #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ |
56 | do { \ | 71 | do { \ |
57 | if (iscsi_dbg_lib) \ | 72 | if (iscsi_dbg_lib_session) \ |
73 | iscsi_session_printk(KERN_INFO, _session, \ | ||
74 | "%s " dbg_fmt, \ | ||
75 | __func__, ##arg); \ | ||
76 | } while (0); | ||
77 | |||
78 | #define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \ | ||
79 | do { \ | ||
80 | if (iscsi_dbg_lib_eh) \ | ||
58 | iscsi_session_printk(KERN_INFO, _session, \ | 81 | iscsi_session_printk(KERN_INFO, _session, \ |
59 | "%s " dbg_fmt, \ | 82 | "%s " dbg_fmt, \ |
60 | __func__, ##arg); \ | 83 | __func__, ##arg); \ |
@@ -954,6 +977,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
954 | task = iscsi_itt_to_ctask(conn, hdr->itt); | 977 | task = iscsi_itt_to_ctask(conn, hdr->itt); |
955 | if (!task) | 978 | if (!task) |
956 | return ISCSI_ERR_BAD_ITT; | 979 | return ISCSI_ERR_BAD_ITT; |
980 | task->last_xfer = jiffies; | ||
957 | break; | 981 | break; |
958 | case ISCSI_OP_R2T: | 982 | case ISCSI_OP_R2T: |
959 | /* | 983 | /* |
@@ -1192,10 +1216,12 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) | |||
1192 | spin_unlock_bh(&conn->session->lock); | 1216 | spin_unlock_bh(&conn->session->lock); |
1193 | rc = conn->session->tt->xmit_task(task); | 1217 | rc = conn->session->tt->xmit_task(task); |
1194 | spin_lock_bh(&conn->session->lock); | 1218 | spin_lock_bh(&conn->session->lock); |
1195 | __iscsi_put_task(task); | 1219 | if (!rc) { |
1196 | if (!rc) | ||
1197 | /* done with this task */ | 1220 | /* done with this task */ |
1221 | task->last_xfer = jiffies; | ||
1198 | conn->task = NULL; | 1222 | conn->task = NULL; |
1223 | } | ||
1224 | __iscsi_put_task(task); | ||
1199 | return rc; | 1225 | return rc; |
1200 | } | 1226 | } |
1201 | 1227 | ||
@@ -1361,6 +1387,9 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, | |||
1361 | task->state = ISCSI_TASK_PENDING; | 1387 | task->state = ISCSI_TASK_PENDING; |
1362 | task->conn = conn; | 1388 | task->conn = conn; |
1363 | task->sc = sc; | 1389 | task->sc = sc; |
1390 | task->have_checked_conn = false; | ||
1391 | task->last_timeout = jiffies; | ||
1392 | task->last_xfer = jiffies; | ||
1364 | INIT_LIST_HEAD(&task->running); | 1393 | INIT_LIST_HEAD(&task->running); |
1365 | return task; | 1394 | return task; |
1366 | } | 1395 | } |
@@ -1555,10 +1584,10 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc) | |||
1555 | spin_lock_bh(&session->lock); | 1584 | spin_lock_bh(&session->lock); |
1556 | if (session->state == ISCSI_STATE_TERMINATE) { | 1585 | if (session->state == ISCSI_STATE_TERMINATE) { |
1557 | failed: | 1586 | failed: |
1558 | iscsi_session_printk(KERN_INFO, session, | 1587 | ISCSI_DBG_EH(session, |
1559 | "failing target reset: Could not log " | 1588 | "failing target reset: Could not log back into " |
1560 | "back into target [age %d]\n", | 1589 | "target [age %d]\n", |
1561 | session->age); | 1590 | session->age); |
1562 | spin_unlock_bh(&session->lock); | 1591 | spin_unlock_bh(&session->lock); |
1563 | mutex_unlock(&session->eh_mutex); | 1592 | mutex_unlock(&session->eh_mutex); |
1564 | return FAILED; | 1593 | return FAILED; |
@@ -1572,7 +1601,7 @@ failed: | |||
1572 | */ | 1601 | */ |
1573 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1602 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1574 | 1603 | ||
1575 | ISCSI_DBG_SESSION(session, "wait for relogin\n"); | 1604 | ISCSI_DBG_EH(session, "wait for relogin\n"); |
1576 | wait_event_interruptible(conn->ehwait, | 1605 | wait_event_interruptible(conn->ehwait, |
1577 | session->state == ISCSI_STATE_TERMINATE || | 1606 | session->state == ISCSI_STATE_TERMINATE || |
1578 | session->state == ISCSI_STATE_LOGGED_IN || | 1607 | session->state == ISCSI_STATE_LOGGED_IN || |
@@ -1582,10 +1611,10 @@ failed: | |||
1582 | 1611 | ||
1583 | mutex_lock(&session->eh_mutex); | 1612 | mutex_lock(&session->eh_mutex); |
1584 | spin_lock_bh(&session->lock); | 1613 | spin_lock_bh(&session->lock); |
1585 | if (session->state == ISCSI_STATE_LOGGED_IN) | 1614 | if (session->state == ISCSI_STATE_LOGGED_IN) { |
1586 | iscsi_session_printk(KERN_INFO, session, | 1615 | ISCSI_DBG_EH(session, |
1587 | "target reset succeeded\n"); | 1616 | "target reset succeeded\n"); |
1588 | else | 1617 | } else |
1589 | goto failed; | 1618 | goto failed; |
1590 | spin_unlock_bh(&session->lock); | 1619 | spin_unlock_bh(&session->lock); |
1591 | mutex_unlock(&session->eh_mutex); | 1620 | mutex_unlock(&session->eh_mutex); |
@@ -1601,7 +1630,7 @@ static void iscsi_tmf_timedout(unsigned long data) | |||
1601 | spin_lock(&session->lock); | 1630 | spin_lock(&session->lock); |
1602 | if (conn->tmf_state == TMF_QUEUED) { | 1631 | if (conn->tmf_state == TMF_QUEUED) { |
1603 | conn->tmf_state = TMF_TIMEDOUT; | 1632 | conn->tmf_state = TMF_TIMEDOUT; |
1604 | ISCSI_DBG_SESSION(session, "tmf timedout\n"); | 1633 | ISCSI_DBG_EH(session, "tmf timedout\n"); |
1605 | /* unblock eh_abort() */ | 1634 | /* unblock eh_abort() */ |
1606 | wake_up(&conn->ehwait); | 1635 | wake_up(&conn->ehwait); |
1607 | } | 1636 | } |
@@ -1621,7 +1650,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, | |||
1621 | spin_unlock_bh(&session->lock); | 1650 | spin_unlock_bh(&session->lock); |
1622 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1651 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1623 | spin_lock_bh(&session->lock); | 1652 | spin_lock_bh(&session->lock); |
1624 | ISCSI_DBG_SESSION(session, "tmf exec failure\n"); | 1653 | ISCSI_DBG_EH(session, "tmf exec failure\n"); |
1625 | return -EPERM; | 1654 | return -EPERM; |
1626 | } | 1655 | } |
1627 | conn->tmfcmd_pdus_cnt++; | 1656 | conn->tmfcmd_pdus_cnt++; |
@@ -1629,7 +1658,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, | |||
1629 | conn->tmf_timer.function = iscsi_tmf_timedout; | 1658 | conn->tmf_timer.function = iscsi_tmf_timedout; |
1630 | conn->tmf_timer.data = (unsigned long)conn; | 1659 | conn->tmf_timer.data = (unsigned long)conn; |
1631 | add_timer(&conn->tmf_timer); | 1660 | add_timer(&conn->tmf_timer); |
1632 | ISCSI_DBG_SESSION(session, "tmf set timeout\n"); | 1661 | ISCSI_DBG_EH(session, "tmf set timeout\n"); |
1633 | 1662 | ||
1634 | spin_unlock_bh(&session->lock); | 1663 | spin_unlock_bh(&session->lock); |
1635 | mutex_unlock(&session->eh_mutex); | 1664 | mutex_unlock(&session->eh_mutex); |
@@ -1716,17 +1745,18 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) | |||
1716 | return 0; | 1745 | return 0; |
1717 | } | 1746 | } |
1718 | 1747 | ||
1719 | static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | 1748 | static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) |
1720 | { | 1749 | { |
1750 | enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; | ||
1751 | struct iscsi_task *task = NULL; | ||
1721 | struct iscsi_cls_session *cls_session; | 1752 | struct iscsi_cls_session *cls_session; |
1722 | struct iscsi_session *session; | 1753 | struct iscsi_session *session; |
1723 | struct iscsi_conn *conn; | 1754 | struct iscsi_conn *conn; |
1724 | enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; | ||
1725 | 1755 | ||
1726 | cls_session = starget_to_session(scsi_target(scmd->device)); | 1756 | cls_session = starget_to_session(scsi_target(sc->device)); |
1727 | session = cls_session->dd_data; | 1757 | session = cls_session->dd_data; |
1728 | 1758 | ||
1729 | ISCSI_DBG_SESSION(session, "scsi cmd %p timedout\n", scmd); | 1759 | ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); |
1730 | 1760 | ||
1731 | spin_lock(&session->lock); | 1761 | spin_lock(&session->lock); |
1732 | if (session->state != ISCSI_STATE_LOGGED_IN) { | 1762 | if (session->state != ISCSI_STATE_LOGGED_IN) { |
@@ -1745,6 +1775,26 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | |||
1745 | goto done; | 1775 | goto done; |
1746 | } | 1776 | } |
1747 | 1777 | ||
1778 | task = (struct iscsi_task *)sc->SCp.ptr; | ||
1779 | if (!task) | ||
1780 | goto done; | ||
1781 | /* | ||
1782 | * If we have sent (at least queued to the network layer) a pdu or | ||
1783 | * recvd one for the task since the last timeout ask for | ||
1784 | * more time. If on the next timeout we have not made progress | ||
1785 | * we can check if it is the task or connection when we send the | ||
1786 | * nop as a ping. | ||
1787 | */ | ||
1788 | if (time_after_eq(task->last_xfer, task->last_timeout)) { | ||
1789 | ISCSI_DBG_EH(session, "Command making progress. Asking " | ||
1790 | "scsi-ml for more time to complete. " | ||
1791 | "Last data recv at %lu. Last timeout was at " | ||
1792 | "%lu\n.", task->last_xfer, task->last_timeout); | ||
1793 | task->have_checked_conn = false; | ||
1794 | rc = BLK_EH_RESET_TIMER; | ||
1795 | goto done; | ||
1796 | } | ||
1797 | |||
1748 | if (!conn->recv_timeout && !conn->ping_timeout) | 1798 | if (!conn->recv_timeout && !conn->ping_timeout) |
1749 | goto done; | 1799 | goto done; |
1750 | /* | 1800 | /* |
@@ -1755,23 +1805,32 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | |||
1755 | rc = BLK_EH_RESET_TIMER; | 1805 | rc = BLK_EH_RESET_TIMER; |
1756 | goto done; | 1806 | goto done; |
1757 | } | 1807 | } |
1808 | |||
1809 | /* Assumes nop timeout is shorter than scsi cmd timeout */ | ||
1810 | if (task->have_checked_conn) | ||
1811 | goto done; | ||
1812 | |||
1758 | /* | 1813 | /* |
1759 | * if we are about to check the transport then give the command | 1814 | * Checking the transport already or nop from a cmd timeout still |
1760 | * more time | 1815 | * running |
1761 | */ | 1816 | */ |
1762 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), | 1817 | if (conn->ping_task) { |
1763 | jiffies)) { | 1818 | task->have_checked_conn = true; |
1764 | rc = BLK_EH_RESET_TIMER; | 1819 | rc = BLK_EH_RESET_TIMER; |
1765 | goto done; | 1820 | goto done; |
1766 | } | 1821 | } |
1767 | 1822 | ||
1768 | /* if in the middle of checking the transport then give us more time */ | 1823 | /* Make sure there is a transport check done */ |
1769 | if (conn->ping_task) | 1824 | iscsi_send_nopout(conn, NULL); |
1770 | rc = BLK_EH_RESET_TIMER; | 1825 | task->have_checked_conn = true; |
1826 | rc = BLK_EH_RESET_TIMER; | ||
1827 | |||
1771 | done: | 1828 | done: |
1829 | if (task) | ||
1830 | task->last_timeout = jiffies; | ||
1772 | spin_unlock(&session->lock); | 1831 | spin_unlock(&session->lock); |
1773 | ISCSI_DBG_SESSION(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? | 1832 | ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? |
1774 | "timer reset" : "nh"); | 1833 | "timer reset" : "nh"); |
1775 | return rc; | 1834 | return rc; |
1776 | } | 1835 | } |
1777 | 1836 | ||
@@ -1841,7 +1900,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1841 | cls_session = starget_to_session(scsi_target(sc->device)); | 1900 | cls_session = starget_to_session(scsi_target(sc->device)); |
1842 | session = cls_session->dd_data; | 1901 | session = cls_session->dd_data; |
1843 | 1902 | ||
1844 | ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc); | 1903 | ISCSI_DBG_EH(session, "aborting sc %p\n", sc); |
1845 | 1904 | ||
1846 | mutex_lock(&session->eh_mutex); | 1905 | mutex_lock(&session->eh_mutex); |
1847 | spin_lock_bh(&session->lock); | 1906 | spin_lock_bh(&session->lock); |
@@ -1850,8 +1909,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1850 | * got the command. | 1909 | * got the command. |
1851 | */ | 1910 | */ |
1852 | if (!sc->SCp.ptr) { | 1911 | if (!sc->SCp.ptr) { |
1853 | ISCSI_DBG_SESSION(session, "sc never reached iscsi layer or " | 1912 | ISCSI_DBG_EH(session, "sc never reached iscsi layer or " |
1854 | "it completed.\n"); | 1913 | "it completed.\n"); |
1855 | spin_unlock_bh(&session->lock); | 1914 | spin_unlock_bh(&session->lock); |
1856 | mutex_unlock(&session->eh_mutex); | 1915 | mutex_unlock(&session->eh_mutex); |
1857 | return SUCCESS; | 1916 | return SUCCESS; |
@@ -1865,7 +1924,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1865 | sc->SCp.phase != session->age) { | 1924 | sc->SCp.phase != session->age) { |
1866 | spin_unlock_bh(&session->lock); | 1925 | spin_unlock_bh(&session->lock); |
1867 | mutex_unlock(&session->eh_mutex); | 1926 | mutex_unlock(&session->eh_mutex); |
1868 | ISCSI_DBG_SESSION(session, "failing abort due to dropped " | 1927 | ISCSI_DBG_EH(session, "failing abort due to dropped " |
1869 | "session.\n"); | 1928 | "session.\n"); |
1870 | return FAILED; | 1929 | return FAILED; |
1871 | } | 1930 | } |
@@ -1875,13 +1934,12 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1875 | age = session->age; | 1934 | age = session->age; |
1876 | 1935 | ||
1877 | task = (struct iscsi_task *)sc->SCp.ptr; | 1936 | task = (struct iscsi_task *)sc->SCp.ptr; |
1878 | ISCSI_DBG_SESSION(session, "aborting [sc %p itt 0x%x]\n", | 1937 | ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", |
1879 | sc, task->itt); | 1938 | sc, task->itt); |
1880 | 1939 | ||
1881 | /* task completed before time out */ | 1940 | /* task completed before time out */ |
1882 | if (!task->sc) { | 1941 | if (!task->sc) { |
1883 | ISCSI_DBG_SESSION(session, "sc completed while abort in " | 1942 | ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); |
1884 | "progress\n"); | ||
1885 | goto success; | 1943 | goto success; |
1886 | } | 1944 | } |
1887 | 1945 | ||
@@ -1930,8 +1988,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1930 | if (!sc->SCp.ptr) { | 1988 | if (!sc->SCp.ptr) { |
1931 | conn->tmf_state = TMF_INITIAL; | 1989 | conn->tmf_state = TMF_INITIAL; |
1932 | /* task completed before tmf abort response */ | 1990 | /* task completed before tmf abort response */ |
1933 | ISCSI_DBG_SESSION(session, "sc completed while abort " | 1991 | ISCSI_DBG_EH(session, "sc completed while abort in " |
1934 | "in progress\n"); | 1992 | "progress\n"); |
1935 | goto success; | 1993 | goto success; |
1936 | } | 1994 | } |
1937 | /* fall through */ | 1995 | /* fall through */ |
@@ -1943,16 +2001,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1943 | success: | 2001 | success: |
1944 | spin_unlock_bh(&session->lock); | 2002 | spin_unlock_bh(&session->lock); |
1945 | success_unlocked: | 2003 | success_unlocked: |
1946 | ISCSI_DBG_SESSION(session, "abort success [sc %p itt 0x%x]\n", | 2004 | ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", |
1947 | sc, task->itt); | 2005 | sc, task->itt); |
1948 | mutex_unlock(&session->eh_mutex); | 2006 | mutex_unlock(&session->eh_mutex); |
1949 | return SUCCESS; | 2007 | return SUCCESS; |
1950 | 2008 | ||
1951 | failed: | 2009 | failed: |
1952 | spin_unlock_bh(&session->lock); | 2010 | spin_unlock_bh(&session->lock); |
1953 | failed_unlocked: | 2011 | failed_unlocked: |
1954 | ISCSI_DBG_SESSION(session, "abort failed [sc %p itt 0x%x]\n", sc, | 2012 | ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, |
1955 | task ? task->itt : 0); | 2013 | task ? task->itt : 0); |
1956 | mutex_unlock(&session->eh_mutex); | 2014 | mutex_unlock(&session->eh_mutex); |
1957 | return FAILED; | 2015 | return FAILED; |
1958 | } | 2016 | } |
@@ -1979,8 +2037,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
1979 | cls_session = starget_to_session(scsi_target(sc->device)); | 2037 | cls_session = starget_to_session(scsi_target(sc->device)); |
1980 | session = cls_session->dd_data; | 2038 | session = cls_session->dd_data; |
1981 | 2039 | ||
1982 | ISCSI_DBG_SESSION(session, "LU Reset [sc %p lun %u]\n", | 2040 | ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun); |
1983 | sc, sc->device->lun); | ||
1984 | 2041 | ||
1985 | mutex_lock(&session->eh_mutex); | 2042 | mutex_lock(&session->eh_mutex); |
1986 | spin_lock_bh(&session->lock); | 2043 | spin_lock_bh(&session->lock); |
@@ -2034,8 +2091,8 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
2034 | unlock: | 2091 | unlock: |
2035 | spin_unlock_bh(&session->lock); | 2092 | spin_unlock_bh(&session->lock); |
2036 | done: | 2093 | done: |
2037 | ISCSI_DBG_SESSION(session, "dev reset result = %s\n", | 2094 | ISCSI_DBG_EH(session, "dev reset result = %s\n", |
2038 | rc == SUCCESS ? "SUCCESS" : "FAILED"); | 2095 | rc == SUCCESS ? "SUCCESS" : "FAILED"); |
2039 | mutex_unlock(&session->eh_mutex); | 2096 | mutex_unlock(&session->eh_mutex); |
2040 | return rc; | 2097 | return rc; |
2041 | } | 2098 | } |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 2bc07090321d..2e0746d70303 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -686,6 +686,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
686 | "offset=%d, datalen=%d)\n", | 686 | "offset=%d, datalen=%d)\n", |
687 | tcp_task->data_offset, | 687 | tcp_task->data_offset, |
688 | tcp_conn->in.datalen); | 688 | tcp_conn->in.datalen); |
689 | task->last_xfer = jiffies; | ||
689 | rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, | 690 | rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, |
690 | sdb->table.sgl, | 691 | sdb->table.sgl, |
691 | sdb->table.nents, | 692 | sdb->table.nents, |
@@ -713,9 +714,10 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
713 | rc = ISCSI_ERR_BAD_ITT; | 714 | rc = ISCSI_ERR_BAD_ITT; |
714 | else if (ahslen) | 715 | else if (ahslen) |
715 | rc = ISCSI_ERR_AHSLEN; | 716 | rc = ISCSI_ERR_AHSLEN; |
716 | else if (task->sc->sc_data_direction == DMA_TO_DEVICE) | 717 | else if (task->sc->sc_data_direction == DMA_TO_DEVICE) { |
718 | task->last_xfer = jiffies; | ||
717 | rc = iscsi_tcp_r2t_rsp(conn, task); | 719 | rc = iscsi_tcp_r2t_rsp(conn, task); |
718 | else | 720 | } else |
719 | rc = ISCSI_ERR_PROTO; | 721 | rc = ISCSI_ERR_PROTO; |
720 | spin_unlock(&conn->session->lock); | 722 | spin_unlock(&conn->session->lock); |
721 | break; | 723 | break; |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 4a990f4da4ea..cca8e4ab0372 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -216,7 +216,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha) | |||
216 | 216 | ||
217 | static int | 217 | static int |
218 | qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, | 218 | qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, |
219 | uint16_t ram_words, void **nxt) | 219 | uint32_t ram_words, void **nxt) |
220 | { | 220 | { |
221 | int rval; | 221 | int rval; |
222 | uint32_t cnt, stat, timer, words, idx; | 222 | uint32_t cnt, stat, timer, words, idx; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 262026129325..f2ce8e3cc91b 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -2301,7 +2301,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2301 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; | 2301 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; |
2302 | char *link_speed; | 2302 | char *link_speed; |
2303 | int rval; | 2303 | int rval; |
2304 | uint16_t mb[6]; | 2304 | uint16_t mb[4]; |
2305 | struct qla_hw_data *ha = vha->hw; | 2305 | struct qla_hw_data *ha = vha->hw; |
2306 | 2306 | ||
2307 | if (!IS_IIDMA_CAPABLE(ha)) | 2307 | if (!IS_IIDMA_CAPABLE(ha)) |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 451ece0760b0..fe69f3057671 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -1267,17 +1267,22 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) | |||
1267 | 1267 | ||
1268 | mcp->mb[0] = MBC_GET_FIRMWARE_STATE; | 1268 | mcp->mb[0] = MBC_GET_FIRMWARE_STATE; |
1269 | mcp->out_mb = MBX_0; | 1269 | mcp->out_mb = MBX_0; |
1270 | mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 1270 | if (IS_FWI2_CAPABLE(vha->hw)) |
1271 | mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
1272 | else | ||
1273 | mcp->in_mb = MBX_1|MBX_0; | ||
1271 | mcp->tov = MBX_TOV_SECONDS; | 1274 | mcp->tov = MBX_TOV_SECONDS; |
1272 | mcp->flags = 0; | 1275 | mcp->flags = 0; |
1273 | rval = qla2x00_mailbox_command(vha, mcp); | 1276 | rval = qla2x00_mailbox_command(vha, mcp); |
1274 | 1277 | ||
1275 | /* Return firmware states. */ | 1278 | /* Return firmware states. */ |
1276 | states[0] = mcp->mb[1]; | 1279 | states[0] = mcp->mb[1]; |
1277 | states[1] = mcp->mb[2]; | 1280 | if (IS_FWI2_CAPABLE(vha->hw)) { |
1278 | states[2] = mcp->mb[3]; | 1281 | states[1] = mcp->mb[2]; |
1279 | states[3] = mcp->mb[4]; | 1282 | states[2] = mcp->mb[3]; |
1280 | states[4] = mcp->mb[5]; | 1283 | states[3] = mcp->mb[4]; |
1284 | states[4] = mcp->mb[5]; | ||
1285 | } | ||
1281 | 1286 | ||
1282 | if (rval != QLA_SUCCESS) { | 1287 | if (rval != QLA_SUCCESS) { |
1283 | /*EMPTY*/ | 1288 | /*EMPTY*/ |
@@ -2697,10 +2702,13 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
2697 | mcp->mb[0] = MBC_PORT_PARAMS; | 2702 | mcp->mb[0] = MBC_PORT_PARAMS; |
2698 | mcp->mb[1] = loop_id; | 2703 | mcp->mb[1] = loop_id; |
2699 | mcp->mb[2] = BIT_0; | 2704 | mcp->mb[2] = BIT_0; |
2700 | mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); | 2705 | if (IS_QLA81XX(vha->hw)) |
2701 | mcp->mb[4] = mcp->mb[5] = 0; | 2706 | mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); |
2702 | mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 2707 | else |
2703 | mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; | 2708 | mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); |
2709 | mcp->mb[9] = vha->vp_idx; | ||
2710 | mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; | ||
2711 | mcp->in_mb = MBX_3|MBX_1|MBX_0; | ||
2704 | mcp->tov = MBX_TOV_SECONDS; | 2712 | mcp->tov = MBX_TOV_SECONDS; |
2705 | mcp->flags = 0; | 2713 | mcp->flags = 0; |
2706 | rval = qla2x00_mailbox_command(vha, mcp); | 2714 | rval = qla2x00_mailbox_command(vha, mcp); |
@@ -2710,8 +2718,6 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
2710 | mb[0] = mcp->mb[0]; | 2718 | mb[0] = mcp->mb[0]; |
2711 | mb[1] = mcp->mb[1]; | 2719 | mb[1] = mcp->mb[1]; |
2712 | mb[3] = mcp->mb[3]; | 2720 | mb[3] = mcp->mb[3]; |
2713 | mb[4] = mcp->mb[4]; | ||
2714 | mb[5] = mcp->mb[5]; | ||
2715 | } | 2721 | } |
2716 | 2722 | ||
2717 | if (rval != QLA_SUCCESS) { | 2723 | if (rval != QLA_SUCCESS) { |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index dcf011679c8b..f0396e79b6fa 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1663,7 +1663,7 @@ skip_pio: | |||
1663 | /* queue 0 uses two msix vectors */ | 1663 | /* queue 0 uses two msix vectors */ |
1664 | if (ql2xmultique_tag) { | 1664 | if (ql2xmultique_tag) { |
1665 | cpus = num_online_cpus(); | 1665 | cpus = num_online_cpus(); |
1666 | ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ? | 1666 | ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? |
1667 | (cpus + 1) : (ha->msix_count - 1); | 1667 | (cpus + 1) : (ha->msix_count - 1); |
1668 | ha->max_req_queues = 2; | 1668 | ha->max_req_queues = 2; |
1669 | } else if (ql2xmaxqueues > 1) { | 1669 | } else if (ql2xmaxqueues > 1) { |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index b63feaf43126..84369705a9ad 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.01-k3" | 10 | #define QLA2XXX_VERSION "8.03.01-k4" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 41a21772df12..fb9af207d61d 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -101,6 +101,8 @@ static const char * scsi_debug_version_date = "20070104"; | |||
101 | #define DEF_DIF 0 | 101 | #define DEF_DIF 0 |
102 | #define DEF_GUARD 0 | 102 | #define DEF_GUARD 0 |
103 | #define DEF_ATO 1 | 103 | #define DEF_ATO 1 |
104 | #define DEF_PHYSBLK_EXP 0 | ||
105 | #define DEF_LOWEST_ALIGNED 0 | ||
104 | 106 | ||
105 | /* bit mask values for scsi_debug_opts */ | 107 | /* bit mask values for scsi_debug_opts */ |
106 | #define SCSI_DEBUG_OPT_NOISE 1 | 108 | #define SCSI_DEBUG_OPT_NOISE 1 |
@@ -156,6 +158,8 @@ static int scsi_debug_dix = DEF_DIX; | |||
156 | static int scsi_debug_dif = DEF_DIF; | 158 | static int scsi_debug_dif = DEF_DIF; |
157 | static int scsi_debug_guard = DEF_GUARD; | 159 | static int scsi_debug_guard = DEF_GUARD; |
158 | static int scsi_debug_ato = DEF_ATO; | 160 | static int scsi_debug_ato = DEF_ATO; |
161 | static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; | ||
162 | static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; | ||
159 | 163 | ||
160 | static int scsi_debug_cmnd_count = 0; | 164 | static int scsi_debug_cmnd_count = 0; |
161 | 165 | ||
@@ -657,7 +661,12 @@ static unsigned char vpdb0_data[] = { | |||
657 | 661 | ||
658 | static int inquiry_evpd_b0(unsigned char * arr) | 662 | static int inquiry_evpd_b0(unsigned char * arr) |
659 | { | 663 | { |
664 | unsigned int gran; | ||
665 | |||
660 | memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); | 666 | memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); |
667 | gran = 1 << scsi_debug_physblk_exp; | ||
668 | arr[2] = (gran >> 8) & 0xff; | ||
669 | arr[3] = gran & 0xff; | ||
661 | if (sdebug_store_sectors > 0x400) { | 670 | if (sdebug_store_sectors > 0x400) { |
662 | arr[4] = (sdebug_store_sectors >> 24) & 0xff; | 671 | arr[4] = (sdebug_store_sectors >> 24) & 0xff; |
663 | arr[5] = (sdebug_store_sectors >> 16) & 0xff; | 672 | arr[5] = (sdebug_store_sectors >> 16) & 0xff; |
@@ -945,6 +954,9 @@ static int resp_readcap16(struct scsi_cmnd * scp, | |||
945 | arr[9] = (scsi_debug_sector_size >> 16) & 0xff; | 954 | arr[9] = (scsi_debug_sector_size >> 16) & 0xff; |
946 | arr[10] = (scsi_debug_sector_size >> 8) & 0xff; | 955 | arr[10] = (scsi_debug_sector_size >> 8) & 0xff; |
947 | arr[11] = scsi_debug_sector_size & 0xff; | 956 | arr[11] = scsi_debug_sector_size & 0xff; |
957 | arr[13] = scsi_debug_physblk_exp & 0xf; | ||
958 | arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; | ||
959 | arr[15] = scsi_debug_lowest_aligned & 0xff; | ||
948 | 960 | ||
949 | if (scsi_debug_dif) { | 961 | if (scsi_debug_dif) { |
950 | arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ | 962 | arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ |
@@ -2380,6 +2392,8 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO); | |||
2380 | module_param_named(dif, scsi_debug_dif, int, S_IRUGO); | 2392 | module_param_named(dif, scsi_debug_dif, int, S_IRUGO); |
2381 | module_param_named(guard, scsi_debug_guard, int, S_IRUGO); | 2393 | module_param_named(guard, scsi_debug_guard, int, S_IRUGO); |
2382 | module_param_named(ato, scsi_debug_ato, int, S_IRUGO); | 2394 | module_param_named(ato, scsi_debug_ato, int, S_IRUGO); |
2395 | module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); | ||
2396 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); | ||
2383 | 2397 | ||
2384 | MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); | 2398 | MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); |
2385 | MODULE_DESCRIPTION("SCSI debug adapter driver"); | 2399 | MODULE_DESCRIPTION("SCSI debug adapter driver"); |
@@ -2401,7 +2415,9 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); | |||
2401 | MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); | 2415 | MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); |
2402 | MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); | 2416 | MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); |
2403 | MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); | 2417 | MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); |
2404 | MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)"); | 2418 | MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); |
2419 | MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); | ||
2420 | MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); | ||
2405 | MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); | 2421 | MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); |
2406 | MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); | 2422 | MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); |
2407 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); | 2423 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); |
@@ -2874,6 +2890,18 @@ static int __init scsi_debug_init(void) | |||
2874 | return -EINVAL; | 2890 | return -EINVAL; |
2875 | } | 2891 | } |
2876 | 2892 | ||
2893 | if (scsi_debug_physblk_exp > 15) { | ||
2894 | printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n", | ||
2895 | scsi_debug_physblk_exp); | ||
2896 | return -EINVAL; | ||
2897 | } | ||
2898 | |||
2899 | if (scsi_debug_lowest_aligned > 0x3fff) { | ||
2900 | printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n", | ||
2901 | scsi_debug_lowest_aligned); | ||
2902 | return -EINVAL; | ||
2903 | } | ||
2904 | |||
2877 | if (scsi_debug_dev_size_mb < 1) | 2905 | if (scsi_debug_dev_size_mb < 1) |
2878 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ | 2906 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ |
2879 | sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; | 2907 | sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 8821df9a277b..93c2622cb969 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -24,6 +24,13 @@ struct scsi_dev_info_list { | |||
24 | unsigned compatible; /* for use with scsi_static_device_list entries */ | 24 | unsigned compatible; /* for use with scsi_static_device_list entries */ |
25 | }; | 25 | }; |
26 | 26 | ||
27 | struct scsi_dev_info_list_table { | ||
28 | struct list_head node; /* our node for being on the master list */ | ||
29 | struct list_head scsi_dev_info_list; /* head of dev info list */ | ||
30 | const char *name; /* name of list for /proc (NULL for global) */ | ||
31 | int key; /* unique numeric identifier */ | ||
32 | }; | ||
33 | |||
27 | 34 | ||
28 | static const char spaces[] = " "; /* 16 of them */ | 35 | static const char spaces[] = " "; /* 16 of them */ |
29 | static unsigned scsi_default_dev_flags; | 36 | static unsigned scsi_default_dev_flags; |
@@ -247,6 +254,22 @@ static struct { | |||
247 | { NULL, NULL, NULL, 0 }, | 254 | { NULL, NULL, NULL, 0 }, |
248 | }; | 255 | }; |
249 | 256 | ||
257 | static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key) | ||
258 | { | ||
259 | struct scsi_dev_info_list_table *devinfo_table; | ||
260 | int found = 0; | ||
261 | |||
262 | list_for_each_entry(devinfo_table, &scsi_dev_info_list, node) | ||
263 | if (devinfo_table->key == key) { | ||
264 | found = 1; | ||
265 | break; | ||
266 | } | ||
267 | if (!found) | ||
268 | return ERR_PTR(-EINVAL); | ||
269 | |||
270 | return devinfo_table; | ||
271 | } | ||
272 | |||
250 | /* | 273 | /* |
251 | * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into | 274 | * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into |
252 | * devinfo vendor and model strings. | 275 | * devinfo vendor and model strings. |
@@ -296,7 +319,38 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length, | |||
296 | static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, | 319 | static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, |
297 | char *strflags, int flags) | 320 | char *strflags, int flags) |
298 | { | 321 | { |
322 | return scsi_dev_info_list_add_keyed(compatible, vendor, model, | ||
323 | strflags, flags, | ||
324 | SCSI_DEVINFO_GLOBAL); | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * scsi_dev_info_list_add_keyed - add one dev_info list entry. | ||
329 | * @compatible: if true, null terminate short strings. Otherwise space pad. | ||
330 | * @vendor: vendor string | ||
331 | * @model: model (product) string | ||
332 | * @strflags: integer string | ||
333 | * @flags: if strflags NULL, use this flag value | ||
334 | * @key: specify list to use | ||
335 | * | ||
336 | * Description: | ||
337 | * Create and add one dev_info entry for @vendor, @model, | ||
338 | * @strflags or @flag in list specified by @key. If @compatible, | ||
339 | * add to the tail of the list, do not space pad, and set | ||
340 | * devinfo->compatible. The scsi_static_device_list entries are | ||
341 | * added with @compatible 1 and @clfags NULL. | ||
342 | * | ||
343 | * Returns: 0 OK, -error on failure. | ||
344 | **/ | ||
345 | int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, | ||
346 | char *strflags, int flags, int key) | ||
347 | { | ||
299 | struct scsi_dev_info_list *devinfo; | 348 | struct scsi_dev_info_list *devinfo; |
349 | struct scsi_dev_info_list_table *devinfo_table = | ||
350 | scsi_devinfo_lookup_by_key(key); | ||
351 | |||
352 | if (IS_ERR(devinfo_table)) | ||
353 | return PTR_ERR(devinfo_table); | ||
300 | 354 | ||
301 | devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); | 355 | devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); |
302 | if (!devinfo) { | 356 | if (!devinfo) { |
@@ -317,12 +371,15 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, | |||
317 | devinfo->compatible = compatible; | 371 | devinfo->compatible = compatible; |
318 | 372 | ||
319 | if (compatible) | 373 | if (compatible) |
320 | list_add_tail(&devinfo->dev_info_list, &scsi_dev_info_list); | 374 | list_add_tail(&devinfo->dev_info_list, |
375 | &devinfo_table->scsi_dev_info_list); | ||
321 | else | 376 | else |
322 | list_add(&devinfo->dev_info_list, &scsi_dev_info_list); | 377 | list_add(&devinfo->dev_info_list, |
378 | &devinfo_table->scsi_dev_info_list); | ||
323 | 379 | ||
324 | return 0; | 380 | return 0; |
325 | } | 381 | } |
382 | EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); | ||
326 | 383 | ||
327 | /** | 384 | /** |
328 | * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. | 385 | * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. |
@@ -382,22 +439,48 @@ static int scsi_dev_info_list_add_str(char *dev_list) | |||
382 | * @model: model name | 439 | * @model: model name |
383 | * | 440 | * |
384 | * Description: | 441 | * Description: |
385 | * Search the scsi_dev_info_list for an entry matching @vendor and | 442 | * Search the global scsi_dev_info_list (specified by list zero) |
386 | * @model, if found, return the matching flags value, else return | 443 | * for an entry matching @vendor and @model, if found, return the |
387 | * the host or global default settings. Called during scan time. | 444 | * matching flags value, else return the host or global default |
445 | * settings. Called during scan time. | ||
388 | **/ | 446 | **/ |
389 | int scsi_get_device_flags(struct scsi_device *sdev, | 447 | int scsi_get_device_flags(struct scsi_device *sdev, |
390 | const unsigned char *vendor, | 448 | const unsigned char *vendor, |
391 | const unsigned char *model) | 449 | const unsigned char *model) |
392 | { | 450 | { |
451 | return scsi_get_device_flags_keyed(sdev, vendor, model, | ||
452 | SCSI_DEVINFO_GLOBAL); | ||
453 | } | ||
454 | |||
455 | |||
456 | /** | ||
457 | * get_device_flags_keyed - get device specific flags from the dynamic device list. | ||
458 | * @sdev: &scsi_device to get flags for | ||
459 | * @vendor: vendor name | ||
460 | * @model: model name | ||
461 | * @key: list to look up | ||
462 | * | ||
463 | * Description: | ||
464 | * Search the scsi_dev_info_list specified by @key for an entry | ||
465 | * matching @vendor and @model, if found, return the matching | ||
466 | * flags value, else return the host or global default settings. | ||
467 | * Called during scan time. | ||
468 | **/ | ||
469 | int scsi_get_device_flags_keyed(struct scsi_device *sdev, | ||
470 | const unsigned char *vendor, | ||
471 | const unsigned char *model, | ||
472 | int key) | ||
473 | { | ||
393 | struct scsi_dev_info_list *devinfo; | 474 | struct scsi_dev_info_list *devinfo; |
394 | unsigned int bflags; | 475 | struct scsi_dev_info_list_table *devinfo_table; |
476 | |||
477 | devinfo_table = scsi_devinfo_lookup_by_key(key); | ||
395 | 478 | ||
396 | bflags = sdev->sdev_bflags; | 479 | if (IS_ERR(devinfo_table)) |
397 | if (!bflags) | 480 | return PTR_ERR(devinfo_table); |
398 | bflags = scsi_default_dev_flags; | ||
399 | 481 | ||
400 | list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) { | 482 | list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, |
483 | dev_info_list) { | ||
401 | if (devinfo->compatible) { | 484 | if (devinfo->compatible) { |
402 | /* | 485 | /* |
403 | * Behave like the older version of get_device_flags. | 486 | * Behave like the older version of get_device_flags. |
@@ -447,32 +530,89 @@ int scsi_get_device_flags(struct scsi_device *sdev, | |||
447 | return devinfo->flags; | 530 | return devinfo->flags; |
448 | } | 531 | } |
449 | } | 532 | } |
450 | return bflags; | 533 | /* nothing found, return nothing */ |
534 | if (key != SCSI_DEVINFO_GLOBAL) | ||
535 | return 0; | ||
536 | |||
537 | /* except for the global list, where we have an exception */ | ||
538 | if (sdev->sdev_bflags) | ||
539 | return sdev->sdev_bflags; | ||
540 | |||
541 | return scsi_default_dev_flags; | ||
451 | } | 542 | } |
543 | EXPORT_SYMBOL(scsi_get_device_flags_keyed); | ||
452 | 544 | ||
453 | #ifdef CONFIG_SCSI_PROC_FS | 545 | #ifdef CONFIG_SCSI_PROC_FS |
546 | struct double_list { | ||
547 | struct list_head *top; | ||
548 | struct list_head *bottom; | ||
549 | }; | ||
550 | |||
454 | static int devinfo_seq_show(struct seq_file *m, void *v) | 551 | static int devinfo_seq_show(struct seq_file *m, void *v) |
455 | { | 552 | { |
553 | struct double_list *dl = v; | ||
554 | struct scsi_dev_info_list_table *devinfo_table = | ||
555 | list_entry(dl->top, struct scsi_dev_info_list_table, node); | ||
456 | struct scsi_dev_info_list *devinfo = | 556 | struct scsi_dev_info_list *devinfo = |
457 | list_entry(v, struct scsi_dev_info_list, dev_info_list); | 557 | list_entry(dl->bottom, struct scsi_dev_info_list, |
558 | dev_info_list); | ||
559 | |||
560 | if (devinfo_table->scsi_dev_info_list.next == dl->bottom && | ||
561 | devinfo_table->name) | ||
562 | seq_printf(m, "[%s]:\n", devinfo_table->name); | ||
458 | 563 | ||
459 | seq_printf(m, "'%.8s' '%.16s' 0x%x\n", | 564 | seq_printf(m, "'%.8s' '%.16s' 0x%x\n", |
460 | devinfo->vendor, devinfo->model, devinfo->flags); | 565 | devinfo->vendor, devinfo->model, devinfo->flags); |
461 | return 0; | 566 | return 0; |
462 | } | 567 | } |
463 | 568 | ||
464 | static void * devinfo_seq_start(struct seq_file *m, loff_t *pos) | 569 | static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos) |
465 | { | 570 | { |
466 | return seq_list_start(&scsi_dev_info_list, *pos); | 571 | struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL); |
572 | loff_t pos = *ppos; | ||
573 | |||
574 | if (!dl) | ||
575 | return NULL; | ||
576 | |||
577 | list_for_each(dl->top, &scsi_dev_info_list) { | ||
578 | struct scsi_dev_info_list_table *devinfo_table = | ||
579 | list_entry(dl->top, struct scsi_dev_info_list_table, | ||
580 | node); | ||
581 | list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list) | ||
582 | if (pos-- == 0) | ||
583 | return dl; | ||
584 | } | ||
585 | |||
586 | kfree(dl); | ||
587 | return NULL; | ||
467 | } | 588 | } |
468 | 589 | ||
469 | static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos) | 590 | static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos) |
470 | { | 591 | { |
471 | return seq_list_next(v, &scsi_dev_info_list, pos); | 592 | struct double_list *dl = v; |
593 | struct scsi_dev_info_list_table *devinfo_table = | ||
594 | list_entry(dl->top, struct scsi_dev_info_list_table, node); | ||
595 | |||
596 | ++*ppos; | ||
597 | dl->bottom = dl->bottom->next; | ||
598 | while (&devinfo_table->scsi_dev_info_list == dl->bottom) { | ||
599 | dl->top = dl->top->next; | ||
600 | if (dl->top == &scsi_dev_info_list) { | ||
601 | kfree(dl); | ||
602 | return NULL; | ||
603 | } | ||
604 | devinfo_table = list_entry(dl->top, | ||
605 | struct scsi_dev_info_list_table, | ||
606 | node); | ||
607 | dl->bottom = devinfo_table->scsi_dev_info_list.next; | ||
608 | } | ||
609 | |||
610 | return dl; | ||
472 | } | 611 | } |
473 | 612 | ||
474 | static void devinfo_seq_stop(struct seq_file *m, void *v) | 613 | static void devinfo_seq_stop(struct seq_file *m, void *v) |
475 | { | 614 | { |
615 | kfree(v); | ||
476 | } | 616 | } |
477 | 617 | ||
478 | static const struct seq_operations scsi_devinfo_seq_ops = { | 618 | static const struct seq_operations scsi_devinfo_seq_ops = { |
@@ -549,19 +689,78 @@ MODULE_PARM_DESC(default_dev_flags, | |||
549 | **/ | 689 | **/ |
550 | void scsi_exit_devinfo(void) | 690 | void scsi_exit_devinfo(void) |
551 | { | 691 | { |
552 | struct list_head *lh, *lh_next; | ||
553 | struct scsi_dev_info_list *devinfo; | ||
554 | |||
555 | #ifdef CONFIG_SCSI_PROC_FS | 692 | #ifdef CONFIG_SCSI_PROC_FS |
556 | remove_proc_entry("scsi/device_info", NULL); | 693 | remove_proc_entry("scsi/device_info", NULL); |
557 | #endif | 694 | #endif |
558 | 695 | ||
559 | list_for_each_safe(lh, lh_next, &scsi_dev_info_list) { | 696 | scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL); |
697 | } | ||
698 | |||
699 | /** | ||
700 | * scsi_dev_info_add_list - add a new devinfo list | ||
701 | * @key: key of the list to add | ||
702 | * @name: Name of the list to add (for /proc/scsi/device_info) | ||
703 | * | ||
704 | * Adds the requested list, returns zero on success, -EEXIST if the | ||
705 | * key is already registered to a list, or other error on failure. | ||
706 | */ | ||
707 | int scsi_dev_info_add_list(int key, const char *name) | ||
708 | { | ||
709 | struct scsi_dev_info_list_table *devinfo_table = | ||
710 | scsi_devinfo_lookup_by_key(key); | ||
711 | |||
712 | if (!IS_ERR(devinfo_table)) | ||
713 | /* list already exists */ | ||
714 | return -EEXIST; | ||
715 | |||
716 | devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL); | ||
717 | |||
718 | if (!devinfo_table) | ||
719 | return -ENOMEM; | ||
720 | |||
721 | INIT_LIST_HEAD(&devinfo_table->node); | ||
722 | INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list); | ||
723 | devinfo_table->name = name; | ||
724 | devinfo_table->key = key; | ||
725 | list_add_tail(&devinfo_table->node, &scsi_dev_info_list); | ||
726 | |||
727 | return 0; | ||
728 | } | ||
729 | EXPORT_SYMBOL(scsi_dev_info_add_list); | ||
730 | |||
731 | /** | ||
732 | * scsi_dev_info_remove_list - destroy an added devinfo list | ||
733 | * @key: key of the list to destroy | ||
734 | * | ||
735 | * Iterates over the entire list first, freeing all the values, then | ||
736 | * frees the list itself. Returns 0 on success or -EINVAL if the key | ||
737 | * can't be found. | ||
738 | */ | ||
739 | int scsi_dev_info_remove_list(int key) | ||
740 | { | ||
741 | struct list_head *lh, *lh_next; | ||
742 | struct scsi_dev_info_list_table *devinfo_table = | ||
743 | scsi_devinfo_lookup_by_key(key); | ||
744 | |||
745 | if (IS_ERR(devinfo_table)) | ||
746 | /* no such list */ | ||
747 | return -EINVAL; | ||
748 | |||
749 | /* remove from the master list */ | ||
750 | list_del(&devinfo_table->node); | ||
751 | |||
752 | list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) { | ||
753 | struct scsi_dev_info_list *devinfo; | ||
754 | |||
560 | devinfo = list_entry(lh, struct scsi_dev_info_list, | 755 | devinfo = list_entry(lh, struct scsi_dev_info_list, |
561 | dev_info_list); | 756 | dev_info_list); |
562 | kfree(devinfo); | 757 | kfree(devinfo); |
563 | } | 758 | } |
759 | kfree(devinfo_table); | ||
760 | |||
761 | return 0; | ||
564 | } | 762 | } |
763 | EXPORT_SYMBOL(scsi_dev_info_remove_list); | ||
565 | 764 | ||
566 | /** | 765 | /** |
567 | * scsi_init_devinfo - set up the dynamic device list. | 766 | * scsi_init_devinfo - set up the dynamic device list. |
@@ -577,10 +776,14 @@ int __init scsi_init_devinfo(void) | |||
577 | #endif | 776 | #endif |
578 | int error, i; | 777 | int error, i; |
579 | 778 | ||
580 | error = scsi_dev_info_list_add_str(scsi_dev_flags); | 779 | error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL); |
581 | if (error) | 780 | if (error) |
582 | return error; | 781 | return error; |
583 | 782 | ||
783 | error = scsi_dev_info_list_add_str(scsi_dev_flags); | ||
784 | if (error) | ||
785 | goto out; | ||
786 | |||
584 | for (i = 0; scsi_static_device_list[i].vendor; i++) { | 787 | for (i = 0; scsi_static_device_list[i].vendor; i++) { |
585 | error = scsi_dev_info_list_add(1 /* compatibile */, | 788 | error = scsi_dev_info_list_add(1 /* compatibile */, |
586 | scsi_static_device_list[i].vendor, | 789 | scsi_static_device_list[i].vendor, |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 30f3275e119e..f3c40898fc7d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1207,6 +1207,7 @@ int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1207 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | 1207 | ret = scsi_setup_blk_pc_cmnd(sdev, req); |
1208 | return scsi_prep_return(q, req, ret); | 1208 | return scsi_prep_return(q, req, ret); |
1209 | } | 1209 | } |
1210 | EXPORT_SYMBOL(scsi_prep_fn); | ||
1210 | 1211 | ||
1211 | /* | 1212 | /* |
1212 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | 1213 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index fbc83bebdd8e..021e503c8c44 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -39,9 +39,25 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | /* scsi_devinfo.c */ | 41 | /* scsi_devinfo.c */ |
42 | |||
43 | /* list of keys for the lists */ | ||
44 | enum { | ||
45 | SCSI_DEVINFO_GLOBAL = 0, | ||
46 | SCSI_DEVINFO_SPI, | ||
47 | }; | ||
48 | |||
42 | extern int scsi_get_device_flags(struct scsi_device *sdev, | 49 | extern int scsi_get_device_flags(struct scsi_device *sdev, |
43 | const unsigned char *vendor, | 50 | const unsigned char *vendor, |
44 | const unsigned char *model); | 51 | const unsigned char *model); |
52 | extern int scsi_get_device_flags_keyed(struct scsi_device *sdev, | ||
53 | const unsigned char *vendor, | ||
54 | const unsigned char *model, int key); | ||
55 | extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, | ||
56 | char *model, char *strflags, | ||
57 | int flags, int key); | ||
58 | extern int scsi_dev_info_add_list(int key, const char *name); | ||
59 | extern int scsi_dev_info_remove_list(int key); | ||
60 | |||
45 | extern int __init scsi_init_devinfo(void); | 61 | extern int __init scsi_init_devinfo(void); |
46 | extern void scsi_exit_devinfo(void); | 62 | extern void scsi_exit_devinfo(void); |
47 | 63 | ||
@@ -71,7 +87,6 @@ extern int scsi_init_queue(void); | |||
71 | extern void scsi_exit_queue(void); | 87 | extern void scsi_exit_queue(void); |
72 | struct request_queue; | 88 | struct request_queue; |
73 | struct request; | 89 | struct request; |
74 | extern int scsi_prep_fn(struct request_queue *, struct request *); | ||
75 | extern struct kmem_cache *scsi_sdb_cache; | 90 | extern struct kmem_cache *scsi_sdb_cache; |
76 | 91 | ||
77 | /* scsi_proc.c */ | 92 | /* scsi_proc.c */ |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index fa4711d12744..91482f2dcc50 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -420,29 +420,12 @@ static int scsi_bus_resume(struct device * dev) | |||
420 | return err; | 420 | return err; |
421 | } | 421 | } |
422 | 422 | ||
423 | static int scsi_bus_remove(struct device *dev) | ||
424 | { | ||
425 | struct device_driver *drv = dev->driver; | ||
426 | struct scsi_device *sdev = to_scsi_device(dev); | ||
427 | int err = 0; | ||
428 | |||
429 | /* reset the prep_fn back to the default since the | ||
430 | * driver may have altered it and it's being removed */ | ||
431 | blk_queue_prep_rq(sdev->request_queue, scsi_prep_fn); | ||
432 | |||
433 | if (drv && drv->remove) | ||
434 | err = drv->remove(dev); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | struct bus_type scsi_bus_type = { | 423 | struct bus_type scsi_bus_type = { |
440 | .name = "scsi", | 424 | .name = "scsi", |
441 | .match = scsi_bus_match, | 425 | .match = scsi_bus_match, |
442 | .uevent = scsi_bus_uevent, | 426 | .uevent = scsi_bus_uevent, |
443 | .suspend = scsi_bus_suspend, | 427 | .suspend = scsi_bus_suspend, |
444 | .resume = scsi_bus_resume, | 428 | .resume = scsi_bus_resume, |
445 | .remove = scsi_bus_remove, | ||
446 | }; | 429 | }; |
447 | EXPORT_SYMBOL_GPL(scsi_bus_type); | 430 | EXPORT_SYMBOL_GPL(scsi_bus_type); |
448 | 431 | ||
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 3f64d93b6c8b..2eee9e6e4fe8 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3397,7 +3397,6 @@ fc_destroy_bsgjob(struct fc_bsg_job *job) | |||
3397 | kfree(job); | 3397 | kfree(job); |
3398 | } | 3398 | } |
3399 | 3399 | ||
3400 | |||
3401 | /** | 3400 | /** |
3402 | * fc_bsg_jobdone - completion routine for bsg requests that the LLD has | 3401 | * fc_bsg_jobdone - completion routine for bsg requests that the LLD has |
3403 | * completed | 3402 | * completed |
@@ -3408,15 +3407,10 @@ fc_bsg_jobdone(struct fc_bsg_job *job) | |||
3408 | { | 3407 | { |
3409 | struct request *req = job->req; | 3408 | struct request *req = job->req; |
3410 | struct request *rsp = req->next_rq; | 3409 | struct request *rsp = req->next_rq; |
3411 | unsigned long flags; | ||
3412 | int err; | 3410 | int err; |
3413 | 3411 | ||
3414 | spin_lock_irqsave(&job->job_lock, flags); | ||
3415 | job->state_flags |= FC_RQST_STATE_DONE; | ||
3416 | job->ref_cnt--; | ||
3417 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3418 | |||
3419 | err = job->req->errors = job->reply->result; | 3412 | err = job->req->errors = job->reply->result; |
3413 | |||
3420 | if (err < 0) | 3414 | if (err < 0) |
3421 | /* we're only returning the result field in the reply */ | 3415 | /* we're only returning the result field in the reply */ |
3422 | job->req->sense_len = sizeof(uint32_t); | 3416 | job->req->sense_len = sizeof(uint32_t); |
@@ -3433,13 +3427,27 @@ fc_bsg_jobdone(struct fc_bsg_job *job) | |||
3433 | rsp->resid_len -= min(job->reply->reply_payload_rcv_len, | 3427 | rsp->resid_len -= min(job->reply->reply_payload_rcv_len, |
3434 | rsp->resid_len); | 3428 | rsp->resid_len); |
3435 | } | 3429 | } |
3430 | blk_complete_request(req); | ||
3431 | } | ||
3436 | 3432 | ||
3437 | blk_end_request_all(req, err); | 3433 | /** |
3434 | * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests | ||
3435 | * @req: BSG request that holds the job to be destroyed | ||
3436 | */ | ||
3437 | static void fc_bsg_softirq_done(struct request *rq) | ||
3438 | { | ||
3439 | struct fc_bsg_job *job = rq->special; | ||
3440 | unsigned long flags; | ||
3438 | 3441 | ||
3442 | spin_lock_irqsave(&job->job_lock, flags); | ||
3443 | job->state_flags |= FC_RQST_STATE_DONE; | ||
3444 | job->ref_cnt--; | ||
3445 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3446 | |||
3447 | blk_end_request_all(rq, rq->errors); | ||
3439 | fc_destroy_bsgjob(job); | 3448 | fc_destroy_bsgjob(job); |
3440 | } | 3449 | } |
3441 | 3450 | ||
3442 | |||
3443 | /** | 3451 | /** |
3444 | * fc_bsg_job_timeout - handler for when a bsg request timesout | 3452 | * fc_bsg_job_timeout - handler for when a bsg request timesout |
3445 | * @req: request that timed out | 3453 | * @req: request that timed out |
@@ -3471,19 +3479,13 @@ fc_bsg_job_timeout(struct request *req) | |||
3471 | "abort failed with status %d\n", err); | 3479 | "abort failed with status %d\n", err); |
3472 | } | 3480 | } |
3473 | 3481 | ||
3474 | if (!done) { | ||
3475 | spin_lock_irqsave(&job->job_lock, flags); | ||
3476 | job->ref_cnt--; | ||
3477 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3478 | fc_destroy_bsgjob(job); | ||
3479 | } | ||
3480 | |||
3481 | /* the blk_end_sync_io() doesn't check the error */ | 3482 | /* the blk_end_sync_io() doesn't check the error */ |
3482 | return BLK_EH_HANDLED; | 3483 | if (done) |
3484 | return BLK_EH_NOT_HANDLED; | ||
3485 | else | ||
3486 | return BLK_EH_HANDLED; | ||
3483 | } | 3487 | } |
3484 | 3488 | ||
3485 | |||
3486 | |||
3487 | static int | 3489 | static int |
3488 | fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) | 3490 | fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) |
3489 | { | 3491 | { |
@@ -3859,7 +3861,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) | |||
3859 | struct fc_internal *i = to_fc_internal(shost->transportt); | 3861 | struct fc_internal *i = to_fc_internal(shost->transportt); |
3860 | struct request_queue *q; | 3862 | struct request_queue *q; |
3861 | int err; | 3863 | int err; |
3862 | char bsg_name[BUS_ID_SIZE]; /*20*/ | 3864 | char bsg_name[20]; |
3863 | 3865 | ||
3864 | fc_host->rqst_q = NULL; | 3866 | fc_host->rqst_q = NULL; |
3865 | 3867 | ||
@@ -3879,6 +3881,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) | |||
3879 | 3881 | ||
3880 | q->queuedata = shost; | 3882 | q->queuedata = shost; |
3881 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | 3883 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); |
3884 | blk_queue_softirq_done(q, fc_bsg_softirq_done); | ||
3882 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); | 3885 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); |
3883 | blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); | 3886 | blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); |
3884 | 3887 | ||
@@ -3924,6 +3927,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) | |||
3924 | 3927 | ||
3925 | q->queuedata = rport; | 3928 | q->queuedata = rport; |
3926 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | 3929 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); |
3930 | blk_queue_softirq_done(q, fc_bsg_softirq_done); | ||
3927 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); | 3931 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); |
3928 | blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); | 3932 | blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); |
3929 | 3933 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f3e664628d7a..783e33c65eb7 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -692,6 +692,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) | |||
692 | "Too many iscsi targets. Max " | 692 | "Too many iscsi targets. Max " |
693 | "number of targets is %d.\n", | 693 | "number of targets is %d.\n", |
694 | ISCSI_MAX_TARGET - 1); | 694 | ISCSI_MAX_TARGET - 1); |
695 | err = -EOVERFLOW; | ||
695 | goto release_host; | 696 | goto release_host; |
696 | } | 697 | } |
697 | } | 698 | } |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index d606452297cf..0895d3c71b03 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -173,9 +173,9 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, | |||
173 | ret = handler(shost, rphy, req); | 173 | ret = handler(shost, rphy, req); |
174 | req->errors = ret; | 174 | req->errors = ret; |
175 | 175 | ||
176 | spin_lock_irq(q->queue_lock); | 176 | blk_end_request_all(req, ret); |
177 | 177 | ||
178 | req->end_io(req, ret); | 178 | spin_lock_irq(q->queue_lock); |
179 | } | 179 | } |
180 | } | 180 | } |
181 | 181 | ||
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 654a34fb04cb..c25bd9a34e02 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c | |||
@@ -46,6 +46,22 @@ | |||
46 | #define DV_RETRIES 3 /* should only need at most | 46 | #define DV_RETRIES 3 /* should only need at most |
47 | * two cc/ua clears */ | 47 | * two cc/ua clears */ |
48 | 48 | ||
49 | /* Our blacklist flags */ | ||
50 | enum { | ||
51 | SPI_BLIST_NOIUS = 0x1, | ||
52 | }; | ||
53 | |||
54 | /* blacklist table, modelled on scsi_devinfo.c */ | ||
55 | static struct { | ||
56 | char *vendor; | ||
57 | char *model; | ||
58 | unsigned flags; | ||
59 | } spi_static_device_list[] __initdata = { | ||
60 | {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, | ||
61 | {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, | ||
62 | {NULL, NULL, 0} | ||
63 | }; | ||
64 | |||
49 | /* Private data accessors (keep these out of the header file) */ | 65 | /* Private data accessors (keep these out of the header file) */ |
50 | #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) | 66 | #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) |
51 | #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) | 67 | #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) |
@@ -207,6 +223,9 @@ static int spi_device_configure(struct transport_container *tc, | |||
207 | { | 223 | { |
208 | struct scsi_device *sdev = to_scsi_device(dev); | 224 | struct scsi_device *sdev = to_scsi_device(dev); |
209 | struct scsi_target *starget = sdev->sdev_target; | 225 | struct scsi_target *starget = sdev->sdev_target; |
226 | unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], | ||
227 | &sdev->inquiry[16], | ||
228 | SCSI_DEVINFO_SPI); | ||
210 | 229 | ||
211 | /* Populate the target capability fields with the values | 230 | /* Populate the target capability fields with the values |
212 | * gleaned from the device inquiry */ | 231 | * gleaned from the device inquiry */ |
@@ -216,6 +235,10 @@ static int spi_device_configure(struct transport_container *tc, | |||
216 | spi_support_dt(starget) = scsi_device_dt(sdev); | 235 | spi_support_dt(starget) = scsi_device_dt(sdev); |
217 | spi_support_dt_only(starget) = scsi_device_dt_only(sdev); | 236 | spi_support_dt_only(starget) = scsi_device_dt_only(sdev); |
218 | spi_support_ius(starget) = scsi_device_ius(sdev); | 237 | spi_support_ius(starget) = scsi_device_ius(sdev); |
238 | if (bflags & SPI_BLIST_NOIUS) { | ||
239 | dev_info(dev, "Information Units disabled by blacklist\n"); | ||
240 | spi_support_ius(starget) = 0; | ||
241 | } | ||
219 | spi_support_qas(starget) = scsi_device_qas(sdev); | 242 | spi_support_qas(starget) = scsi_device_qas(sdev); |
220 | 243 | ||
221 | return 0; | 244 | return 0; |
@@ -833,7 +856,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
833 | return; | 856 | return; |
834 | } | 857 | } |
835 | 858 | ||
836 | if (!scsi_device_wide(sdev)) { | 859 | if (!spi_support_wide(starget)) { |
837 | spi_max_width(starget) = 0; | 860 | spi_max_width(starget) = 0; |
838 | max_width = 0; | 861 | max_width = 0; |
839 | } | 862 | } |
@@ -860,7 +883,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
860 | return; | 883 | return; |
861 | 884 | ||
862 | /* device can't handle synchronous */ | 885 | /* device can't handle synchronous */ |
863 | if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) | 886 | if (!spi_support_sync(starget) && !spi_support_dt(starget)) |
864 | return; | 887 | return; |
865 | 888 | ||
866 | /* len == -1 is the signal that we need to ascertain the | 889 | /* len == -1 is the signal that we need to ascertain the |
@@ -876,13 +899,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
876 | 899 | ||
877 | /* try QAS requests; this should be harmless to set if the | 900 | /* try QAS requests; this should be harmless to set if the |
878 | * target supports it */ | 901 | * target supports it */ |
879 | if (scsi_device_qas(sdev) && spi_max_qas(starget)) { | 902 | if (spi_support_qas(starget) && spi_max_qas(starget)) { |
880 | DV_SET(qas, 1); | 903 | DV_SET(qas, 1); |
881 | } else { | 904 | } else { |
882 | DV_SET(qas, 0); | 905 | DV_SET(qas, 0); |
883 | } | 906 | } |
884 | 907 | ||
885 | if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) { | 908 | if (spi_support_ius(starget) && spi_max_iu(starget) && |
909 | min_period < 9) { | ||
886 | /* This u320 (or u640). Set IU transfers */ | 910 | /* This u320 (or u640). Set IU transfers */ |
887 | DV_SET(iu, 1); | 911 | DV_SET(iu, 1); |
888 | /* Then set the optional parameters */ | 912 | /* Then set the optional parameters */ |
@@ -902,7 +926,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
902 | i->f->get_signalling(shost); | 926 | i->f->get_signalling(shost); |
903 | if (spi_signalling(shost) == SPI_SIGNAL_SE || | 927 | if (spi_signalling(shost) == SPI_SIGNAL_SE || |
904 | spi_signalling(shost) == SPI_SIGNAL_HVD || | 928 | spi_signalling(shost) == SPI_SIGNAL_HVD || |
905 | !scsi_device_dt(sdev)) { | 929 | !spi_support_dt(starget)) { |
906 | DV_SET(dt, 0); | 930 | DV_SET(dt, 0); |
907 | } else { | 931 | } else { |
908 | DV_SET(dt, 1); | 932 | DV_SET(dt, 1); |
@@ -1523,7 +1547,21 @@ EXPORT_SYMBOL(spi_release_transport); | |||
1523 | 1547 | ||
1524 | static __init int spi_transport_init(void) | 1548 | static __init int spi_transport_init(void) |
1525 | { | 1549 | { |
1526 | int error = transport_class_register(&spi_transport_class); | 1550 | int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, |
1551 | "SCSI Parallel Transport Class"); | ||
1552 | if (!error) { | ||
1553 | int i; | ||
1554 | |||
1555 | for (i = 0; spi_static_device_list[i].vendor; i++) | ||
1556 | scsi_dev_info_list_add_keyed(1, /* compatible */ | ||
1557 | spi_static_device_list[i].vendor, | ||
1558 | spi_static_device_list[i].model, | ||
1559 | NULL, | ||
1560 | spi_static_device_list[i].flags, | ||
1561 | SCSI_DEVINFO_SPI); | ||
1562 | } | ||
1563 | |||
1564 | error = transport_class_register(&spi_transport_class); | ||
1527 | if (error) | 1565 | if (error) |
1528 | return error; | 1566 | return error; |
1529 | error = anon_transport_class_register(&spi_device_class); | 1567 | error = anon_transport_class_register(&spi_device_class); |
@@ -1535,6 +1573,7 @@ static void __exit spi_transport_exit(void) | |||
1535 | transport_class_unregister(&spi_transport_class); | 1573 | transport_class_unregister(&spi_transport_class); |
1536 | anon_transport_class_unregister(&spi_device_class); | 1574 | anon_transport_class_unregister(&spi_device_class); |
1537 | transport_class_unregister(&spi_host_class); | 1575 | transport_class_unregister(&spi_host_class); |
1576 | scsi_dev_info_remove_list(SCSI_DEVINFO_SPI); | ||
1538 | } | 1577 | } |
1539 | 1578 | ||
1540 | MODULE_AUTHOR("Martin Hicks"); | 1579 | MODULE_AUTHOR("Martin Hicks"); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 878b17a9af30..5616cd780ff3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1307,6 +1307,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, | |||
1307 | int sense_valid = 0; | 1307 | int sense_valid = 0; |
1308 | int the_result; | 1308 | int the_result; |
1309 | int retries = 3; | 1309 | int retries = 3; |
1310 | unsigned int alignment; | ||
1310 | unsigned long long lba; | 1311 | unsigned long long lba; |
1311 | unsigned sector_size; | 1312 | unsigned sector_size; |
1312 | 1313 | ||
@@ -1358,6 +1359,16 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, | |||
1358 | return -EOVERFLOW; | 1359 | return -EOVERFLOW; |
1359 | } | 1360 | } |
1360 | 1361 | ||
1362 | /* Logical blocks per physical block exponent */ | ||
1363 | sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size; | ||
1364 | |||
1365 | /* Lowest aligned logical block */ | ||
1366 | alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; | ||
1367 | blk_queue_alignment_offset(sdp->request_queue, alignment); | ||
1368 | if (alignment && sdkp->first_scan) | ||
1369 | sd_printk(KERN_NOTICE, sdkp, | ||
1370 | "physical block alignment offset: %u\n", alignment); | ||
1371 | |||
1361 | sdkp->capacity = lba + 1; | 1372 | sdkp->capacity = lba + 1; |
1362 | return sector_size; | 1373 | return sector_size; |
1363 | } | 1374 | } |
@@ -1409,6 +1420,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, | |||
1409 | } | 1420 | } |
1410 | 1421 | ||
1411 | sdkp->capacity = lba + 1; | 1422 | sdkp->capacity = lba + 1; |
1423 | sdkp->hw_sector_size = sector_size; | ||
1412 | return sector_size; | 1424 | return sector_size; |
1413 | } | 1425 | } |
1414 | 1426 | ||
@@ -1521,11 +1533,17 @@ got_data: | |||
1521 | string_get_size(sz, STRING_UNITS_10, cap_str_10, | 1533 | string_get_size(sz, STRING_UNITS_10, cap_str_10, |
1522 | sizeof(cap_str_10)); | 1534 | sizeof(cap_str_10)); |
1523 | 1535 | ||
1524 | if (sdkp->first_scan || old_capacity != sdkp->capacity) | 1536 | if (sdkp->first_scan || old_capacity != sdkp->capacity) { |
1525 | sd_printk(KERN_NOTICE, sdkp, | 1537 | sd_printk(KERN_NOTICE, sdkp, |
1526 | "%llu %d-byte hardware sectors: (%s/%s)\n", | 1538 | "%llu %d-byte logical blocks: (%s/%s)\n", |
1527 | (unsigned long long)sdkp->capacity, | 1539 | (unsigned long long)sdkp->capacity, |
1528 | sector_size, cap_str_10, cap_str_2); | 1540 | sector_size, cap_str_10, cap_str_2); |
1541 | |||
1542 | if (sdkp->hw_sector_size != sector_size) | ||
1543 | sd_printk(KERN_NOTICE, sdkp, | ||
1544 | "%u-byte physical blocks\n", | ||
1545 | sdkp->hw_sector_size); | ||
1546 | } | ||
1529 | } | 1547 | } |
1530 | 1548 | ||
1531 | /* Rescale capacity to 512-byte units */ | 1549 | /* Rescale capacity to 512-byte units */ |
@@ -1538,6 +1556,7 @@ got_data: | |||
1538 | else if (sector_size == 256) | 1556 | else if (sector_size == 256) |
1539 | sdkp->capacity >>= 1; | 1557 | sdkp->capacity >>= 1; |
1540 | 1558 | ||
1559 | blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size); | ||
1541 | sdkp->device->sector_size = sector_size; | 1560 | sdkp->device->sector_size = sector_size; |
1542 | } | 1561 | } |
1543 | 1562 | ||
@@ -1776,6 +1795,52 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) | |||
1776 | } | 1795 | } |
1777 | 1796 | ||
1778 | /** | 1797 | /** |
1798 | * sd_read_block_limits - Query disk device for preferred I/O sizes. | ||
1799 | * @disk: disk to query | ||
1800 | */ | ||
1801 | static void sd_read_block_limits(struct scsi_disk *sdkp) | ||
1802 | { | ||
1803 | unsigned int sector_sz = sdkp->device->sector_size; | ||
1804 | char *buffer; | ||
1805 | |||
1806 | /* Block Limits VPD */ | ||
1807 | buffer = scsi_get_vpd_page(sdkp->device, 0xb0); | ||
1808 | |||
1809 | if (buffer == NULL) | ||
1810 | return; | ||
1811 | |||
1812 | blk_queue_io_min(sdkp->disk->queue, | ||
1813 | get_unaligned_be16(&buffer[6]) * sector_sz); | ||
1814 | blk_queue_io_opt(sdkp->disk->queue, | ||
1815 | get_unaligned_be32(&buffer[12]) * sector_sz); | ||
1816 | |||
1817 | kfree(buffer); | ||
1818 | } | ||
1819 | |||
1820 | /** | ||
1821 | * sd_read_block_characteristics - Query block dev. characteristics | ||
1822 | * @disk: disk to query | ||
1823 | */ | ||
1824 | static void sd_read_block_characteristics(struct scsi_disk *sdkp) | ||
1825 | { | ||
1826 | char *buffer; | ||
1827 | u16 rot; | ||
1828 | |||
1829 | /* Block Device Characteristics VPD */ | ||
1830 | buffer = scsi_get_vpd_page(sdkp->device, 0xb1); | ||
1831 | |||
1832 | if (buffer == NULL) | ||
1833 | return; | ||
1834 | |||
1835 | rot = get_unaligned_be16(&buffer[4]); | ||
1836 | |||
1837 | if (rot == 1) | ||
1838 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); | ||
1839 | |||
1840 | kfree(buffer); | ||
1841 | } | ||
1842 | |||
1843 | /** | ||
1779 | * sd_revalidate_disk - called the first time a new disk is seen, | 1844 | * sd_revalidate_disk - called the first time a new disk is seen, |
1780 | * performs disk spin up, read_capacity, etc. | 1845 | * performs disk spin up, read_capacity, etc. |
1781 | * @disk: struct gendisk we care about | 1846 | * @disk: struct gendisk we care about |
@@ -1812,6 +1877,8 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
1812 | */ | 1877 | */ |
1813 | if (sdkp->media_present) { | 1878 | if (sdkp->media_present) { |
1814 | sd_read_capacity(sdkp, buffer); | 1879 | sd_read_capacity(sdkp, buffer); |
1880 | sd_read_block_limits(sdkp); | ||
1881 | sd_read_block_characteristics(sdkp); | ||
1815 | sd_read_write_protect_flag(sdkp, buffer); | 1882 | sd_read_write_protect_flag(sdkp, buffer); |
1816 | sd_read_cache_type(sdkp, buffer); | 1883 | sd_read_cache_type(sdkp, buffer); |
1817 | sd_read_app_tag_own(sdkp, buffer); | 1884 | sd_read_app_tag_own(sdkp, buffer); |
@@ -1934,6 +2001,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie) | |||
1934 | add_disk(gd); | 2001 | add_disk(gd); |
1935 | sd_dif_config_host(sdkp); | 2002 | sd_dif_config_host(sdkp); |
1936 | 2003 | ||
2004 | sd_revalidate_disk(gd); | ||
2005 | |||
1937 | sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", | 2006 | sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", |
1938 | sdp->removable ? "removable " : ""); | 2007 | sdp->removable ? "removable " : ""); |
1939 | } | 2008 | } |
@@ -2054,6 +2123,7 @@ static int sd_remove(struct device *dev) | |||
2054 | 2123 | ||
2055 | async_synchronize_full(); | 2124 | async_synchronize_full(); |
2056 | sdkp = dev_get_drvdata(dev); | 2125 | sdkp = dev_get_drvdata(dev); |
2126 | blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); | ||
2057 | device_del(&sdkp->dev); | 2127 | device_del(&sdkp->dev); |
2058 | del_gendisk(sdkp->disk); | 2128 | del_gendisk(sdkp->disk); |
2059 | sd_shutdown(dev); | 2129 | sd_shutdown(dev); |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 708778cf5f06..8474b5bad3fe 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
@@ -45,6 +45,7 @@ struct scsi_disk { | |||
45 | unsigned int openers; /* protected by BKL for now, yuck */ | 45 | unsigned int openers; /* protected by BKL for now, yuck */ |
46 | sector_t capacity; /* size in 512-byte sectors */ | 46 | sector_t capacity; /* size in 512-byte sectors */ |
47 | u32 index; | 47 | u32 index; |
48 | unsigned short hw_sector_size; | ||
48 | u8 media_present; | 49 | u8 media_present; |
49 | u8 write_prot; | 50 | u8 write_prot; |
50 | u8 protection_type;/* Data Integrity Field */ | 51 | u8 protection_type;/* Data Integrity Field */ |
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index cd350dfc1216..cce0fe4c8a3b 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c | |||
@@ -881,6 +881,7 @@ static int sr_remove(struct device *dev) | |||
881 | { | 881 | { |
882 | struct scsi_cd *cd = dev_get_drvdata(dev); | 882 | struct scsi_cd *cd = dev_get_drvdata(dev); |
883 | 883 | ||
884 | blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn); | ||
884 | del_gendisk(cd->disk); | 885 | del_gendisk(cd->disk); |
885 | 886 | ||
886 | mutex_lock(&sr_ref_mutex); | 887 | mutex_lock(&sr_ref_mutex); |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 69ad4945c936..297deb817a5d 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -2321,8 +2321,9 @@ static void sym_int_par (struct sym_hcb *np, u_short sist) | |||
2321 | int phase = cmd & 7; | 2321 | int phase = cmd & 7; |
2322 | struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); | 2322 | struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); |
2323 | 2323 | ||
2324 | printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", | 2324 | if (printk_ratelimit()) |
2325 | sym_name(np), hsts, dbc, sbcl); | 2325 | printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", |
2326 | sym_name(np), hsts, dbc, sbcl); | ||
2326 | 2327 | ||
2327 | /* | 2328 | /* |
2328 | * Check that the chip is connected to the SCSI BUS. | 2329 | * Check that the chip is connected to the SCSI BUS. |