aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/ll_rw_blk.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c34
-rw-r--r--drivers/scsi/53c700.c3
-rw-r--r--drivers/scsi/53c700.h192
-rw-r--r--drivers/scsi/Kconfig18
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR_D700.c5
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c18
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1950
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h40
-rw-r--r--drivers/scsi/aic7xxx/cam.h6
-rw-r--r--drivers/scsi/aic7xxx_old.c8
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/cpqfcTSinit.c6
-rw-r--r--drivers/scsi/cpqfcTSworker.c16
-rw-r--r--drivers/scsi/gdth.c24
-rw-r--r--drivers/scsi/gdth.h41
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/lasi700.c1
-rw-r--r--drivers/scsi/libata-scsi.c12
-rw-r--r--drivers/scsi/lpfc/Makefile32
-rw-r--r--drivers/scsi/lpfc/lpfc.h384
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c1291
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h97
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h216
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1237
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h206
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3258
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2537
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2687
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1739
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h41
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c646
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c179
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1842
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1246
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h157
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2885
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h216
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h32
-rw-r--r--drivers/scsi/pci2000.c16
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c338
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h128
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c981
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c98
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c184
-rw-r--r--drivers/scsi/qla2xxx/qla_listops.h351
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2674
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicfc.c18
-rw-r--r--drivers/scsi/qlogicisp.c8
-rw-r--r--drivers/scsi/scsi.h15
-rw-r--r--drivers/scsi/scsi_error.c25
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/sg.c211
-rw-r--r--drivers/scsi/sim710.c5
64 files changed, 22238 insertions, 6226 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 46e54b441663..11ef9d9ea139 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -1715,6 +1715,15 @@ request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1715 if (blk_init_free_list(q)) 1715 if (blk_init_free_list(q))
1716 goto out_init; 1716 goto out_init;
1717 1717
1718 /*
1719 * if caller didn't supply a lock, they get per-queue locking with
1720 * our embedded lock
1721 */
1722 if (!lock) {
1723 spin_lock_init(&q->__queue_lock);
1724 lock = &q->__queue_lock;
1725 }
1726
1718 q->request_fn = rfn; 1727 q->request_fn = rfn;
1719 q->back_merge_fn = ll_back_merge_fn; 1728 q->back_merge_fn = ll_back_merge_fn;
1720 q->front_merge_fn = ll_front_merge_fn; 1729 q->front_merge_fn = ll_front_merge_fn;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index a393cf4d0313..1f9aeb4accc6 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -52,19 +52,18 @@ static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
52static inline int zfcp_sg_list_copy_to_user(void __user *, 52static inline int zfcp_sg_list_copy_to_user(void __user *,
53 struct zfcp_sg_list *, size_t); 53 struct zfcp_sg_list *, size_t);
54 54
55static int zfcp_cfdc_dev_ioctl(struct inode *, struct file *, 55static int zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
56 unsigned int, unsigned long);
57 56
58#define ZFCP_CFDC_IOC_MAGIC 0xDD 57#define ZFCP_CFDC_IOC_MAGIC 0xDD
59#define ZFCP_CFDC_IOC \ 58#define ZFCP_CFDC_IOC \
60 _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data) 59 _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data)
61 60
62#ifdef CONFIG_COMPAT
63static struct ioctl_trans zfcp_ioctl_trans = {ZFCP_CFDC_IOC, (void*) sys_ioctl};
64#endif
65 61
66static struct file_operations zfcp_cfdc_fops = { 62static struct file_operations zfcp_cfdc_fops = {
67 .ioctl = zfcp_cfdc_dev_ioctl 63 .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
64#ifdef CONFIG_COMPAT
65 .compat_ioctl = zfcp_cfdc_dev_ioctl
66#endif
68}; 67};
69 68
70static struct miscdevice zfcp_cfdc_misc = { 69static struct miscdevice zfcp_cfdc_misc = {
@@ -308,23 +307,16 @@ zfcp_module_init(void)
308 if (!zfcp_transport_template) 307 if (!zfcp_transport_template)
309 return -ENODEV; 308 return -ENODEV;
310 309
311 retval = register_ioctl32_conversion(zfcp_ioctl_trans.cmd,
312 zfcp_ioctl_trans.handler);
313 if (retval != 0) {
314 ZFCP_LOG_INFO("registration of ioctl32 conversion failed\n");
315 goto out;
316 }
317
318 retval = misc_register(&zfcp_cfdc_misc); 310 retval = misc_register(&zfcp_cfdc_misc);
319 if (retval != 0) { 311 if (retval != 0) {
320 ZFCP_LOG_INFO("registration of misc device " 312 ZFCP_LOG_INFO("registration of misc device "
321 "zfcp_cfdc failed\n"); 313 "zfcp_cfdc failed\n");
322 goto out_misc_register; 314 goto out;
323 } else {
324 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
325 ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
326 } 315 }
327 316
317 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
318 ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
319
328 /* Initialise proc semaphores */ 320 /* Initialise proc semaphores */
329 sema_init(&zfcp_data.config_sema, 1); 321 sema_init(&zfcp_data.config_sema, 1);
330 322
@@ -348,8 +340,6 @@ zfcp_module_init(void)
348 340
349 out_ccw_register: 341 out_ccw_register:
350 misc_deregister(&zfcp_cfdc_misc); 342 misc_deregister(&zfcp_cfdc_misc);
351 out_misc_register:
352 unregister_ioctl32_conversion(zfcp_ioctl_trans.cmd);
353 out: 343 out:
354 return retval; 344 return retval;
355} 345}
@@ -370,9 +360,9 @@ zfcp_module_init(void)
370 * -EPERM - Cannot create or queue FSF request or create SBALs 360 * -EPERM - Cannot create or queue FSF request or create SBALs
371 * -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS) 361 * -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS)
372 */ 362 */
373static int 363static long
374zfcp_cfdc_dev_ioctl(struct inode *inode, struct file *file, 364zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
375 unsigned int command, unsigned long buffer) 365 unsigned long buffer)
376{ 366{
377 struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user; 367 struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user;
378 struct zfcp_adapter *adapter = NULL; 368 struct zfcp_adapter *adapter = NULL;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index a591fcb8aab1..4b1bb529f676 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -389,8 +389,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
389 host->max_lun = NCR_700_MAX_LUNS; 389 host->max_lun = NCR_700_MAX_LUNS;
390 BUG_ON(NCR_700_transport_template == NULL); 390 BUG_ON(NCR_700_transport_template == NULL);
391 host->transportt = NCR_700_transport_template; 391 host->transportt = NCR_700_transport_template;
392 host->unique_id = hostdata->base; 392 host->unique_id = (unsigned long)hostdata->base;
393 host->base = hostdata->base;
394 hostdata->eh_complete = NULL; 393 hostdata->eh_complete = NULL;
395 host->hostdata[0] = (unsigned long)hostdata; 394 host->hostdata[0] = (unsigned long)hostdata;
396 /* kick the chip */ 395 /* kick the chip */
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index df4aa30ae0aa..e86012cf6ab7 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -14,10 +14,6 @@
14#include <scsi/scsi_device.h> 14#include <scsi/scsi_device.h>
15 15
16 16
17#if defined(CONFIG_53C700_MEM_MAPPED) && defined(CONFIG_53C700_IO_MAPPED)
18#define CONFIG_53C700_BOTH_MAPPED
19#endif
20
21/* Turn on for general debugging---too verbose for normal use */ 17/* Turn on for general debugging---too verbose for normal use */
22#undef NCR_700_DEBUG 18#undef NCR_700_DEBUG
23/* Debug the tag queues, checking hash queue allocation and deallocation 19/* Debug the tag queues, checking hash queue allocation and deallocation
@@ -49,13 +45,6 @@
49/* magic byte identifying an internally generated REQUEST_SENSE command */ 45/* magic byte identifying an internally generated REQUEST_SENSE command */
50#define NCR_700_INTERNAL_SENSE_MAGIC 0x42 46#define NCR_700_INTERNAL_SENSE_MAGIC 0x42
51 47
52/* WARNING: Leave this in for now: the dependency preprocessor doesn't
53 * pick up file specific flags, so must define here if they are not
54 * set */
55#if !defined(CONFIG_53C700_IO_MAPPED) && !defined(CONFIG_53C700_MEM_MAPPED)
56#error "Config.in must define either CONFIG_53C700_IO_MAPPED or CONFIG_53C700_MEM_MAPPED to use this scsi core."
57#endif
58
59struct NCR_700_Host_Parameters; 48struct NCR_700_Host_Parameters;
60 49
61/* These are the externally used routines */ 50/* These are the externally used routines */
@@ -184,7 +173,7 @@ struct NCR_700_command_slot {
184struct NCR_700_Host_Parameters { 173struct NCR_700_Host_Parameters {
185 /* These must be filled in by the calling driver */ 174 /* These must be filled in by the calling driver */
186 int clock; /* board clock speed in MHz */ 175 int clock; /* board clock speed in MHz */
187 unsigned long base; /* the base for the port (copied to host) */ 176 void __iomem *base; /* the base for the port (copied to host) */
188 struct device *dev; 177 struct device *dev;
189 __u32 dmode_extra; /* adjustable bus settings */ 178 __u32 dmode_extra; /* adjustable bus settings */
190 __u32 differential:1; /* if we are differential */ 179 __u32 differential:1; /* if we are differential */
@@ -199,9 +188,6 @@ struct NCR_700_Host_Parameters {
199 /* NOTHING BELOW HERE NEEDS ALTERING */ 188 /* NOTHING BELOW HERE NEEDS ALTERING */
200 __u32 fast:1; /* if we can alter the SCSI bus clock 189 __u32 fast:1; /* if we can alter the SCSI bus clock
201 speed (so can negiotiate sync) */ 190 speed (so can negiotiate sync) */
202#ifdef CONFIG_53C700_BOTH_MAPPED
203 __u32 mem_mapped; /* set if memory mapped */
204#endif
205 int sync_clock; /* The speed of the SYNC core */ 191 int sync_clock; /* The speed of the SYNC core */
206 192
207 __u32 *script; /* pointer to script location */ 193 __u32 *script; /* pointer to script location */
@@ -246,12 +232,18 @@ struct NCR_700_Host_Parameters {
246#ifdef CONFIG_53C700_LE_ON_BE 232#ifdef CONFIG_53C700_LE_ON_BE
247#define bE (hostdata->force_le_on_be ? 0 : 3) 233#define bE (hostdata->force_le_on_be ? 0 : 3)
248#define bSWAP (hostdata->force_le_on_be) 234#define bSWAP (hostdata->force_le_on_be)
235/* This is terrible, but there's no raw version of ioread32. That means
236 * that on a be board we swap twice (once in ioread32 and once again to
237 * get the value correct) */
238#define bS_to_io(x) ((hostdata->force_le_on_be) ? (x) : cpu_to_le32(x))
249#elif defined(__BIG_ENDIAN) 239#elif defined(__BIG_ENDIAN)
250#define bE 3 240#define bE 3
251#define bSWAP 0 241#define bSWAP 0
242#define bS_to_io(x) (x)
252#elif defined(__LITTLE_ENDIAN) 243#elif defined(__LITTLE_ENDIAN)
253#define bE 0 244#define bE 0
254#define bSWAP 0 245#define bSWAP 0
246#define bS_to_io(x) (x)
255#else 247#else
256#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined, did you include byteorder.h?" 248#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined, did you include byteorder.h?"
257#endif 249#endif
@@ -455,91 +447,42 @@ struct NCR_700_Host_Parameters {
455 447
456 448
457static inline __u8 449static inline __u8
458NCR_700_mem_readb(struct Scsi_Host *host, __u32 reg) 450NCR_700_readb(struct Scsi_Host *host, __u32 reg)
459{
460 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
461 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
462
463 return readb(host->base + (reg^bE));
464}
465
466static inline __u32
467NCR_700_mem_readl(struct Scsi_Host *host, __u32 reg)
468{
469 __u32 value = __raw_readl(host->base + reg);
470 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
471 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
472#if 1
473 /* sanity check the register */
474 if((reg & 0x3) != 0)
475 BUG();
476#endif
477
478 return bS_to_cpu(value);
479}
480
481static inline void
482NCR_700_mem_writeb(__u8 value, struct Scsi_Host *host, __u32 reg)
483{
484 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
485 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
486
487 writeb(value, host->base + (reg^bE));
488}
489
490static inline void
491NCR_700_mem_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
492{
493 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
494 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
495
496#if 1
497 /* sanity check the register */
498 if((reg & 0x3) != 0)
499 BUG();
500#endif
501
502 __raw_writel(bS_to_host(value), host->base + reg);
503}
504
505static inline __u8
506NCR_700_io_readb(struct Scsi_Host *host, __u32 reg)
507{ 451{
508 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused)) 452 const struct NCR_700_Host_Parameters *hostdata
509 = (struct NCR_700_Host_Parameters *)host->hostdata[0]; 453 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
510 454
511 return inb(host->base + (reg^bE)); 455 return ioread8(hostdata->base + (reg^bE));
512} 456}
513 457
514static inline __u32 458static inline __u32
515NCR_700_io_readl(struct Scsi_Host *host, __u32 reg) 459NCR_700_readl(struct Scsi_Host *host, __u32 reg)
516{ 460{
517 __u32 value = inl(host->base + reg); 461 const struct NCR_700_Host_Parameters *hostdata
518 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
519 = (struct NCR_700_Host_Parameters *)host->hostdata[0]; 462 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
520 463 __u32 value = ioread32(hostdata->base + reg);
521#if 1 464#if 1
522 /* sanity check the register */ 465 /* sanity check the register */
523 if((reg & 0x3) != 0) 466 if((reg & 0x3) != 0)
524 BUG(); 467 BUG();
525#endif 468#endif
526 469
527 return bS_to_cpu(value); 470 return bS_to_io(value);
528} 471}
529 472
530static inline void 473static inline void
531NCR_700_io_writeb(__u8 value, struct Scsi_Host *host, __u32 reg) 474NCR_700_writeb(__u8 value, struct Scsi_Host *host, __u32 reg)
532{ 475{
533 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused)) 476 const struct NCR_700_Host_Parameters *hostdata
534 = (struct NCR_700_Host_Parameters *)host->hostdata[0]; 477 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
535 478
536 outb(value, host->base + (reg^bE)); 479 iowrite8(value, hostdata->base + (reg^bE));
537} 480}
538 481
539static inline void 482static inline void
540NCR_700_io_writel(__u32 value, struct Scsi_Host *host, __u32 reg) 483NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
541{ 484{
542 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused)) 485 const struct NCR_700_Host_Parameters *hostdata
543 = (struct NCR_700_Host_Parameters *)host->hostdata[0]; 486 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
544 487
545#if 1 488#if 1
@@ -548,102 +491,7 @@ NCR_700_io_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
548 BUG(); 491 BUG();
549#endif 492#endif
550 493
551 outl(bS_to_host(value), host->base + reg); 494 iowrite32(bS_to_io(value), hostdata->base + reg);
552}
553
554#ifdef CONFIG_53C700_BOTH_MAPPED
555
556static inline __u8
557NCR_700_readb(struct Scsi_Host *host, __u32 reg)
558{
559 __u8 val;
560
561 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
562 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
563
564 if(hostdata->mem_mapped)
565 val = NCR_700_mem_readb(host, reg);
566 else
567 val = NCR_700_io_readb(host, reg);
568
569 return val;
570}
571
572static inline __u32
573NCR_700_readl(struct Scsi_Host *host, __u32 reg)
574{
575 __u32 val;
576
577 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
578 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
579
580 if(hostdata->mem_mapped)
581 val = NCR_700_mem_readl(host, reg);
582 else
583 val = NCR_700_io_readl(host, reg);
584
585 return val;
586}
587
588static inline void
589NCR_700_writeb(__u8 value, struct Scsi_Host *host, __u32 reg)
590{
591 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
592 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
593
594 if(hostdata->mem_mapped)
595 NCR_700_mem_writeb(value, host, reg);
596 else
597 NCR_700_io_writeb(value, host, reg);
598}
599
600static inline void
601NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
602{
603 const struct NCR_700_Host_Parameters *hostdata __attribute__((unused))
604 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
605
606 if(hostdata->mem_mapped)
607 NCR_700_mem_writel(value, host, reg);
608 else
609 NCR_700_io_writel(value, host, reg);
610}
611
612static inline void
613NCR_700_set_mem_mapped(struct NCR_700_Host_Parameters *hostdata)
614{
615 hostdata->mem_mapped = 1;
616}
617
618static inline void
619NCR_700_set_io_mapped(struct NCR_700_Host_Parameters *hostdata)
620{
621 hostdata->mem_mapped = 0;
622} 495}
623 496
624
625#elif defined(CONFIG_53C700_IO_MAPPED)
626
627#define NCR_700_readb NCR_700_io_readb
628#define NCR_700_readl NCR_700_io_readl
629#define NCR_700_writeb NCR_700_io_writeb
630#define NCR_700_writel NCR_700_io_writel
631
632#define NCR_700_set_io_mapped(x)
633#define NCR_700_set_mem_mapped(x) error I/O mapped only
634
635#elif defined(CONFIG_53C700_MEM_MAPPED)
636
637#define NCR_700_readb NCR_700_mem_readb
638#define NCR_700_readl NCR_700_mem_readl
639#define NCR_700_writeb NCR_700_mem_writeb
640#define NCR_700_writel NCR_700_mem_writel
641
642#define NCR_700_set_io_mapped(x) error MEM mapped only
643#define NCR_700_set_mem_mapped(x)
644
645#else
646#error neither CONFIG_53C700_MEM_MAPPED nor CONFIG_53C700_IO_MAPPED is set
647#endif
648
649#endif 497#endif
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d22b32f4662d..750b11cefd93 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -942,11 +942,6 @@ config SCSI_NCR_D700
942 Unless you have an NCR manufactured machine, the chances are that 942 Unless you have an NCR manufactured machine, the chances are that
943 you do not have this SCSI card, so say N. 943 you do not have this SCSI card, so say N.
944 944
945config 53C700_IO_MAPPED
946 bool
947 depends on SCSI_NCR_D700
948 default y
949
950config SCSI_LASI700 945config SCSI_LASI700
951 tristate "HP Lasi SCSI support for 53c700/710" 946 tristate "HP Lasi SCSI support for 53c700/710"
952 depends on GSC && SCSI 947 depends on GSC && SCSI
@@ -956,11 +951,6 @@ config SCSI_LASI700
956 many PA-RISC workstations & servers. If you do not know whether you 951 many PA-RISC workstations & servers. If you do not know whether you
957 have a Lasi chip, it is safe to say "Y" here. 952 have a Lasi chip, it is safe to say "Y" here.
958 953
959config 53C700_MEM_MAPPED
960 bool
961 depends on SCSI_LASI700
962 default y
963
964config 53C700_LE_ON_BE 954config 53C700_LE_ON_BE
965 bool 955 bool
966 depends on SCSI_LASI700 956 depends on SCSI_LASI700
@@ -1324,6 +1314,14 @@ config SCSI_QLOGICPTI
1324 1314
1325source "drivers/scsi/qla2xxx/Kconfig" 1315source "drivers/scsi/qla2xxx/Kconfig"
1326 1316
1317config SCSI_LPFC
1318 tristate "Emulex LightPulse Fibre Channel Support"
1319 depends on PCI && SCSI
1320 select SCSI_FC_ATTRS
1321 help
1322 This lpfc driver supports the Emulex LightPulse
1323 Family of Fibre Channel PCI host adapters.
1324
1327config SCSI_SEAGATE 1325config SCSI_SEAGATE
1328 tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support" 1326 tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support"
1329 depends on X86 && ISA && SCSI && BROKEN 1327 depends on X86 && ISA && SCSI && BROKEN
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 29fcee35ec01..9cb9fe7d623a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_SCSI_QLOGIC_ISP) += qlogicisp.o
80obj-$(CONFIG_SCSI_QLOGIC_FC) += qlogicfc.o 80obj-$(CONFIG_SCSI_QLOGIC_FC) += qlogicfc.o
81obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o 81obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
82obj-$(CONFIG_SCSI_QLA2XXX) += qla2xxx/ 82obj-$(CONFIG_SCSI_QLA2XXX) += qla2xxx/
83obj-$(CONFIG_SCSI_LPFC) += lpfc/
83obj-$(CONFIG_SCSI_PAS16) += pas16.o 84obj-$(CONFIG_SCSI_PAS16) += pas16.o
84obj-$(CONFIG_SCSI_SEAGATE) += seagate.o 85obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
85obj-$(CONFIG_SCSI_FD_8xx) += seagate.o 86obj-$(CONFIG_SCSI_FD_8xx) += seagate.o
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 507751941f1e..e993a7ba276f 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -197,12 +197,10 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
197 } 197 }
198 198
199 /* Fill in the three required pieces of hostdata */ 199 /* Fill in the three required pieces of hostdata */
200 hostdata->base = region; 200 hostdata->base = ioport_map(region, 64);
201 hostdata->differential = (((1<<siop) & differential) != 0); 201 hostdata->differential = (((1<<siop) & differential) != 0);
202 hostdata->clock = NCR_D700_CLOCK_MHZ; 202 hostdata->clock = NCR_D700_CLOCK_MHZ;
203 203
204 NCR_700_set_io_mapped(hostdata);
205
206 /* and register the siop */ 204 /* and register the siop */
207 host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev); 205 host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
208 if (!host) { 206 if (!host) {
@@ -214,6 +212,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
214 /* FIXME: read this from SUS */ 212 /* FIXME: read this from SUS */
215 host->this_id = id_array[slot * 2 + siop]; 213 host->this_id = id_array[slot * 2 + siop];
216 host->irq = irq; 214 host->irq = irq;
215 host->base = region;
217 scsi_scan_host(host); 216 scsi_scan_host(host);
218 217
219 return 0; 218 return 0;
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 8398e0dd4810..ac8de03c9fa2 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -5,6 +5,7 @@
5config SCSI_AIC7XXX 5config SCSI_AIC7XXX
6 tristate "Adaptec AIC7xxx Fast -> U160 support (New Driver)" 6 tristate "Adaptec AIC7xxx Fast -> U160 support (New Driver)"
7 depends on (PCI || EISA) && SCSI 7 depends on (PCI || EISA) && SCSI
8 select SCSI_SPI_ATTRS
8 ---help--- 9 ---help---
9 This driver supports all of Adaptec's Fast through Ultra 160 PCI 10 This driver supports all of Adaptec's Fast through Ultra 160 PCI
10 based SCSI controllers as well as the aic7770 based EISA and VLB 11 based SCSI controllers as well as the aic7770 based EISA and VLB
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index fb2877c303f0..550c9921691a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -687,7 +687,7 @@ ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
687 int direction; 687 int direction;
688 688
689 cmd = scb->io_ctx; 689 cmd = scb->io_ctx;
690 direction = scsi_to_pci_dma_dir(cmd->sc_data_direction); 690 direction = cmd->sc_data_direction;
691 ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE); 691 ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
692 if (cmd->use_sg != 0) { 692 if (cmd->use_sg != 0) {
693 struct scatterlist *sg; 693 struct scatterlist *sg;
@@ -3338,7 +3338,7 @@ ahd_linux_dv_inq(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3338 } 3338 }
3339 3339
3340 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo); 3340 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3341 cmd->sc_data_direction = SCSI_DATA_READ; 3341 cmd->sc_data_direction = DMA_FROM_DEVICE;
3342 cmd->cmd_len = 6; 3342 cmd->cmd_len = 6;
3343 cmd->cmnd[0] = INQUIRY; 3343 cmd->cmnd[0] = INQUIRY;
3344 cmd->cmnd[4] = request_length; 3344 cmd->cmnd[4] = request_length;
@@ -3363,7 +3363,7 @@ ahd_linux_dv_tur(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3363#endif 3363#endif
3364 /* Do a TUR to clear out any non-fatal transitional state */ 3364 /* Do a TUR to clear out any non-fatal transitional state */
3365 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo); 3365 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3366 cmd->sc_data_direction = SCSI_DATA_NONE; 3366 cmd->sc_data_direction = DMA_NONE;
3367 cmd->cmd_len = 6; 3367 cmd->cmd_len = 6;
3368 cmd->cmnd[0] = TEST_UNIT_READY; 3368 cmd->cmnd[0] = TEST_UNIT_READY;
3369} 3369}
@@ -3385,7 +3385,7 @@ ahd_linux_dv_rebd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3385 free(targ->dv_buffer, M_DEVBUF); 3385 free(targ->dv_buffer, M_DEVBUF);
3386 targ->dv_buffer = malloc(AHD_REBD_LEN, M_DEVBUF, M_WAITOK); 3386 targ->dv_buffer = malloc(AHD_REBD_LEN, M_DEVBUF, M_WAITOK);
3387 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo); 3387 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3388 cmd->sc_data_direction = SCSI_DATA_READ; 3388 cmd->sc_data_direction = DMA_FROM_DEVICE;
3389 cmd->cmd_len = 10; 3389 cmd->cmd_len = 10;
3390 cmd->cmnd[0] = READ_BUFFER; 3390 cmd->cmnd[0] = READ_BUFFER;
3391 cmd->cmnd[1] = 0x0b; 3391 cmd->cmnd[1] = 0x0b;
@@ -3407,7 +3407,7 @@ ahd_linux_dv_web(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3407 } 3407 }
3408#endif 3408#endif
3409 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo); 3409 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3410 cmd->sc_data_direction = SCSI_DATA_WRITE; 3410 cmd->sc_data_direction = DMA_TO_DEVICE;
3411 cmd->cmd_len = 10; 3411 cmd->cmd_len = 10;
3412 cmd->cmnd[0] = WRITE_BUFFER; 3412 cmd->cmnd[0] = WRITE_BUFFER;
3413 cmd->cmnd[1] = 0x0a; 3413 cmd->cmnd[1] = 0x0a;
@@ -3429,7 +3429,7 @@ ahd_linux_dv_reb(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3429 } 3429 }
3430#endif 3430#endif
3431 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo); 3431 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3432 cmd->sc_data_direction = SCSI_DATA_READ; 3432 cmd->sc_data_direction = DMA_FROM_DEVICE;
3433 cmd->cmd_len = 10; 3433 cmd->cmd_len = 10;
3434 cmd->cmnd[0] = READ_BUFFER; 3434 cmd->cmnd[0] = READ_BUFFER;
3435 cmd->cmnd[1] = 0x0a; 3435 cmd->cmnd[1] = 0x0a;
@@ -3455,7 +3455,7 @@ ahd_linux_dv_su(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3455 } 3455 }
3456#endif 3456#endif
3457 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo); 3457 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3458 cmd->sc_data_direction = SCSI_DATA_NONE; 3458 cmd->sc_data_direction = DMA_NONE;
3459 cmd->cmd_len = 6; 3459 cmd->cmd_len = 6;
3460 cmd->cmnd[0] = START_STOP_UNIT; 3460 cmd->cmnd[0] = START_STOP_UNIT;
3461 cmd->cmnd[4] = le | SSS_START; 3461 cmd->cmnd[4] = le | SSS_START;
@@ -4018,7 +4018,7 @@ ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
4018 int dir; 4018 int dir;
4019 4019
4020 cur_seg = (struct scatterlist *)cmd->request_buffer; 4020 cur_seg = (struct scatterlist *)cmd->request_buffer;
4021 dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); 4021 dir = cmd->sc_data_direction;
4022 nseg = pci_map_sg(ahd->dev_softc, cur_seg, 4022 nseg = pci_map_sg(ahd->dev_softc, cur_seg,
4023 cmd->use_sg, dir); 4023 cmd->use_sg, dir);
4024 scb->platform_data->xfer_len = 0; 4024 scb->platform_data->xfer_len = 0;
@@ -4038,7 +4038,7 @@ ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
4038 int dir; 4038 int dir;
4039 4039
4040 sg = scb->sg_list; 4040 sg = scb->sg_list;
4041 dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); 4041 dir = cmd->sc_data_direction;
4042 addr = pci_map_single(ahd->dev_softc, 4042 addr = pci_map_single(ahd->dev_softc,
4043 cmd->request_buffer, 4043 cmd->request_buffer,
4044 cmd->request_bufflen, dir); 4044 cmd->request_bufflen, dir);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 031c6aaa5ca5..d74b99dab7ec 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -122,6 +122,10 @@
122#include "aic7xxx_osm.h" 122#include "aic7xxx_osm.h"
123#include "aic7xxx_inline.h" 123#include "aic7xxx_inline.h"
124#include <scsi/scsicam.h> 124#include <scsi/scsicam.h>
125#include <scsi/scsi_transport.h>
126#include <scsi/scsi_transport_spi.h>
127
128static struct scsi_transport_template *ahc_linux_transport_template = NULL;
125 129
126/* 130/*
127 * Include aiclib.c as part of our 131 * Include aiclib.c as part of our
@@ -271,39 +275,6 @@ static adapter_tag_info_t aic7xxx_tag_info[] =
271}; 275};
272 276
273/* 277/*
274 * DV option:
275 *
276 * positive value = DV Enabled
277 * zero = DV Disabled
278 * negative value = DV Default for adapter type/seeprom
279 */
280#ifdef CONFIG_AIC7XXX_DV_SETTING
281#define AIC7XXX_CONFIGED_DV CONFIG_AIC7XXX_DV_SETTING
282#else
283#define AIC7XXX_CONFIGED_DV -1
284#endif
285
286static int8_t aic7xxx_dv_settings[] =
287{
288 AIC7XXX_CONFIGED_DV,
289 AIC7XXX_CONFIGED_DV,
290 AIC7XXX_CONFIGED_DV,
291 AIC7XXX_CONFIGED_DV,
292 AIC7XXX_CONFIGED_DV,
293 AIC7XXX_CONFIGED_DV,
294 AIC7XXX_CONFIGED_DV,
295 AIC7XXX_CONFIGED_DV,
296 AIC7XXX_CONFIGED_DV,
297 AIC7XXX_CONFIGED_DV,
298 AIC7XXX_CONFIGED_DV,
299 AIC7XXX_CONFIGED_DV,
300 AIC7XXX_CONFIGED_DV,
301 AIC7XXX_CONFIGED_DV,
302 AIC7XXX_CONFIGED_DV,
303 AIC7XXX_CONFIGED_DV
304};
305
306/*
307 * There should be a specific return value for this in scsi.h, but 278 * There should be a specific return value for this in scsi.h, but
308 * it seems that most drivers ignore it. 279 * it seems that most drivers ignore it.
309 */ 280 */
@@ -450,7 +421,6 @@ MODULE_PARM_DESC(aic7xxx,
450" tag_info:<tag_str> Set per-target tag depth\n" 421" tag_info:<tag_str> Set per-target tag depth\n"
451" global_tag_depth:<int> Global tag depth for every target\n" 422" global_tag_depth:<int> Global tag depth for every target\n"
452" on every bus\n" 423" on every bus\n"
453" dv:<dv_settings> Set per-controller Domain Validation Setting.\n"
454" seltime:<int> Selection Timeout\n" 424" seltime:<int> Selection Timeout\n"
455" (0/256ms,1/128ms,2/64ms,3/32ms)\n" 425" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
456"\n" 426"\n"
@@ -467,7 +437,6 @@ static void ahc_linux_handle_scsi_status(struct ahc_softc *,
467 struct scb *); 437 struct scb *);
468static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, 438static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
469 Scsi_Cmnd *cmd); 439 Scsi_Cmnd *cmd);
470static void ahc_linux_filter_inquiry(struct ahc_softc*, struct ahc_devinfo*);
471static void ahc_linux_sem_timeout(u_long arg); 440static void ahc_linux_sem_timeout(u_long arg);
472static void ahc_linux_freeze_simq(struct ahc_softc *ahc); 441static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
473static void ahc_linux_release_simq(u_long arg); 442static void ahc_linux_release_simq(u_long arg);
@@ -476,49 +445,8 @@ static int ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);
476static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc); 445static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
477static void ahc_linux_size_nseg(void); 446static void ahc_linux_size_nseg(void);
478static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc); 447static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);
479static void ahc_linux_start_dv(struct ahc_softc *ahc);
480static void ahc_linux_dv_timeout(struct scsi_cmnd *cmd);
481static int ahc_linux_dv_thread(void *data);
482static void ahc_linux_kill_dv_thread(struct ahc_softc *ahc);
483static void ahc_linux_dv_target(struct ahc_softc *ahc, u_int target);
484static void ahc_linux_dv_transition(struct ahc_softc *ahc,
485 struct scsi_cmnd *cmd,
486 struct ahc_devinfo *devinfo,
487 struct ahc_linux_target *targ);
488static void ahc_linux_dv_fill_cmd(struct ahc_softc *ahc,
489 struct scsi_cmnd *cmd,
490 struct ahc_devinfo *devinfo);
491static void ahc_linux_dv_inq(struct ahc_softc *ahc,
492 struct scsi_cmnd *cmd,
493 struct ahc_devinfo *devinfo,
494 struct ahc_linux_target *targ,
495 u_int request_length);
496static void ahc_linux_dv_tur(struct ahc_softc *ahc,
497 struct scsi_cmnd *cmd,
498 struct ahc_devinfo *devinfo);
499static void ahc_linux_dv_rebd(struct ahc_softc *ahc,
500 struct scsi_cmnd *cmd,
501 struct ahc_devinfo *devinfo,
502 struct ahc_linux_target *targ);
503static void ahc_linux_dv_web(struct ahc_softc *ahc,
504 struct scsi_cmnd *cmd,
505 struct ahc_devinfo *devinfo,
506 struct ahc_linux_target *targ);
507static void ahc_linux_dv_reb(struct ahc_softc *ahc,
508 struct scsi_cmnd *cmd,
509 struct ahc_devinfo *devinfo,
510 struct ahc_linux_target *targ);
511static void ahc_linux_dv_su(struct ahc_softc *ahc,
512 struct scsi_cmnd *cmd,
513 struct ahc_devinfo *devinfo,
514 struct ahc_linux_target *targ);
515static int ahc_linux_fallback(struct ahc_softc *ahc,
516 struct ahc_devinfo *devinfo);
517static void ahc_linux_dv_complete(Scsi_Cmnd *cmd);
518static void ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ);
519static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, 448static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
520 struct ahc_devinfo *devinfo); 449 struct ahc_devinfo *devinfo);
521static u_int ahc_linux_user_dv_setting(struct ahc_softc *ahc);
522static void ahc_linux_device_queue_depth(struct ahc_softc *ahc, 450static void ahc_linux_device_queue_depth(struct ahc_softc *ahc,
523 struct ahc_linux_device *dev); 451 struct ahc_linux_device *dev);
524static struct ahc_linux_target* ahc_linux_alloc_target(struct ahc_softc*, 452static struct ahc_linux_target* ahc_linux_alloc_target(struct ahc_softc*,
@@ -534,7 +462,6 @@ static void ahc_linux_run_device_queue(struct ahc_softc*,
534 struct ahc_linux_device*); 462 struct ahc_linux_device*);
535static void ahc_linux_setup_tag_info_global(char *p); 463static void ahc_linux_setup_tag_info_global(char *p);
536static aic_option_callback_t ahc_linux_setup_tag_info; 464static aic_option_callback_t ahc_linux_setup_tag_info;
537static aic_option_callback_t ahc_linux_setup_dv;
538static int aic7xxx_setup(char *s); 465static int aic7xxx_setup(char *s);
539static int ahc_linux_next_unit(void); 466static int ahc_linux_next_unit(void);
540static void ahc_runq_tasklet(unsigned long data); 467static void ahc_runq_tasklet(unsigned long data);
@@ -663,8 +590,7 @@ ahc_linux_next_device_to_run(struct ahc_softc *ahc)
663{ 590{
664 591
665 if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0 592 if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
666 || (ahc->platform_data->qfrozen != 0 593 || (ahc->platform_data->qfrozen != 0))
667 && AHC_DV_SIMQ_FROZEN(ahc) == 0))
668 return (NULL); 594 return (NULL);
669 return (TAILQ_FIRST(&ahc->platform_data->device_runq)); 595 return (TAILQ_FIRST(&ahc->platform_data->device_runq));
670} 596}
@@ -693,12 +619,12 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
693 619
694 sg = (struct scatterlist *)cmd->request_buffer; 620 sg = (struct scatterlist *)cmd->request_buffer;
695 pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg, 621 pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
696 scsi_to_pci_dma_dir(cmd->sc_data_direction)); 622 cmd->sc_data_direction);
697 } else if (cmd->request_bufflen != 0) { 623 } else if (cmd->request_bufflen != 0) {
698 pci_unmap_single(ahc->dev_softc, 624 pci_unmap_single(ahc->dev_softc,
699 scb->platform_data->buf_busaddr, 625 scb->platform_data->buf_busaddr,
700 cmd->request_bufflen, 626 cmd->request_bufflen,
701 scsi_to_pci_dma_dir(cmd->sc_data_direction)); 627 cmd->sc_data_direction);
702 } 628 }
703} 629}
704 630
@@ -962,8 +888,7 @@ ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
962 * DV commands through so long as we are only frozen to 888 * DV commands through so long as we are only frozen to
963 * perform DV. 889 * perform DV.
964 */ 890 */
965 if (ahc->platform_data->qfrozen != 0 891 if (ahc->platform_data->qfrozen != 0) {
966 && AHC_DV_CMD(cmd) == 0) {
967 892
968 ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ); 893 ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
969 ahc_linux_queue_cmd_complete(ahc, cmd); 894 ahc_linux_queue_cmd_complete(ahc, cmd);
@@ -1030,6 +955,11 @@ ahc_linux_slave_configure(Scsi_Device *device)
1030 ahc_linux_device_queue_depth(ahc, dev); 955 ahc_linux_device_queue_depth(ahc, dev);
1031 } 956 }
1032 ahc_midlayer_entrypoint_unlock(ahc, &flags); 957 ahc_midlayer_entrypoint_unlock(ahc, &flags);
958
959 /* Initial Domain Validation */
960 if (!spi_initial_dv(device->sdev_target))
961 spi_dv_device(device);
962
1033 return (0); 963 return (0);
1034} 964}
1035 965
@@ -1545,18 +1475,6 @@ ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
1545 } 1475 }
1546} 1476}
1547 1477
1548static void
1549ahc_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
1550{
1551
1552 if ((instance >= 0)
1553 && (instance < NUM_ELEMENTS(aic7xxx_dv_settings))) {
1554 aic7xxx_dv_settings[instance] = value;
1555 if (bootverbose)
1556 printf("dv[%d] = %d\n", instance, value);
1557 }
1558}
1559
1560/* 1478/*
1561 * Handle Linux boot parameters. This routine allows for assigning a value 1479 * Handle Linux boot parameters. This routine allows for assigning a value
1562 * to a parameter with a ':' between the parameter and the value. 1480 * to a parameter with a ':' between the parameter and the value.
@@ -1616,9 +1534,6 @@ aic7xxx_setup(char *s)
1616 } else if (strncmp(p, "tag_info", n) == 0) { 1534 } else if (strncmp(p, "tag_info", n) == 0) {
1617 s = aic_parse_brace_option("tag_info", p + n, end, 1535 s = aic_parse_brace_option("tag_info", p + n, end,
1618 2, ahc_linux_setup_tag_info, 0); 1536 2, ahc_linux_setup_tag_info, 0);
1619 } else if (strncmp(p, "dv", n) == 0) {
1620 s = aic_parse_brace_option("dv", p + n, end, 1,
1621 ahc_linux_setup_dv, 0);
1622 } else if (p[n] == ':') { 1537 } else if (p[n] == ':') {
1623 *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); 1538 *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
1624 } else if (strncmp(p, "verbose", n) == 0) { 1539 } else if (strncmp(p, "verbose", n) == 0) {
@@ -1641,7 +1556,6 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
1641 struct Scsi_Host *host; 1556 struct Scsi_Host *host;
1642 char *new_name; 1557 char *new_name;
1643 u_long s; 1558 u_long s;
1644 u_int targ_offset;
1645 1559
1646 template->name = ahc->description; 1560 template->name = ahc->description;
1647 host = scsi_host_alloc(template, sizeof(struct ahc_softc *)); 1561 host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
@@ -1677,57 +1591,11 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
1677 scsi_set_pci_device(host, ahc->dev_softc); 1591 scsi_set_pci_device(host, ahc->dev_softc);
1678#endif 1592#endif
1679 ahc_linux_initialize_scsi_bus(ahc); 1593 ahc_linux_initialize_scsi_bus(ahc);
1680 ahc_unlock(ahc, &s);
1681 ahc->platform_data->dv_pid = kernel_thread(ahc_linux_dv_thread, ahc, 0);
1682 ahc_lock(ahc, &s);
1683 if (ahc->platform_data->dv_pid < 0) {
1684 printf("%s: Failed to create DV thread, error= %d\n",
1685 ahc_name(ahc), ahc->platform_data->dv_pid);
1686 return (-ahc->platform_data->dv_pid);
1687 }
1688 /*
1689 * Initially allocate *all* of our linux target objects
1690 * so that the DV thread will scan them all in parallel
1691 * just after driver initialization. Any device that
1692 * does not exist will have its target object destroyed
1693 * by the selection timeout handler. In the case of a
1694 * device that appears after the initial DV scan, async
1695 * negotiation will occur for the first command, and DV
1696 * will comence should that first command be successful.
1697 */
1698 for (targ_offset = 0;
1699 targ_offset < host->max_id * (host->max_channel + 1);
1700 targ_offset++) {
1701 u_int channel;
1702 u_int target;
1703
1704 channel = 0;
1705 target = targ_offset;
1706 if (target > 7
1707 && (ahc->features & AHC_TWIN) != 0) {
1708 channel = 1;
1709 target &= 0x7;
1710 }
1711 /*
1712 * Skip our own ID. Some Compaq/HP storage devices
1713 * have enclosure management devices that respond to
1714 * single bit selection (i.e. selecting ourselves).
1715 * It is expected that either an external application
1716 * or a modified kernel will be used to probe this
1717 * ID if it is appropriate. To accommodate these
1718 * installations, ahc_linux_alloc_target() will allocate
1719 * for our ID if asked to do so.
1720 */
1721 if ((channel == 0 && target == ahc->our_id)
1722 || (channel == 1 && target == ahc->our_id_b))
1723 continue;
1724
1725 ahc_linux_alloc_target(ahc, channel, target);
1726 }
1727 ahc_intr_enable(ahc, TRUE); 1594 ahc_intr_enable(ahc, TRUE);
1728 ahc_linux_start_dv(ahc);
1729 ahc_unlock(ahc, &s); 1595 ahc_unlock(ahc, &s);
1730 1596
1597 host->transportt = ahc_linux_transport_template;
1598
1731#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1599#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1732 scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */ 1600 scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */
1733 scsi_scan_host(host); 1601 scsi_scan_host(host);
@@ -1860,8 +1728,6 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1860 ahc->platform_data->completeq_timer.function = 1728 ahc->platform_data->completeq_timer.function =
1861 (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue; 1729 (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;
1862 init_MUTEX_LOCKED(&ahc->platform_data->eh_sem); 1730 init_MUTEX_LOCKED(&ahc->platform_data->eh_sem);
1863 init_MUTEX_LOCKED(&ahc->platform_data->dv_sem);
1864 init_MUTEX_LOCKED(&ahc->platform_data->dv_cmd_sem);
1865 tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet, 1731 tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet,
1866 (unsigned long)ahc); 1732 (unsigned long)ahc);
1867 ahc->seltime = (aic7xxx_seltime & 0x3) << 4; 1733 ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
@@ -1881,7 +1747,6 @@ ahc_platform_free(struct ahc_softc *ahc)
1881 1747
1882 if (ahc->platform_data != NULL) { 1748 if (ahc->platform_data != NULL) {
1883 del_timer_sync(&ahc->platform_data->completeq_timer); 1749 del_timer_sync(&ahc->platform_data->completeq_timer);
1884 ahc_linux_kill_dv_thread(ahc);
1885 tasklet_kill(&ahc->platform_data->runq_tasklet); 1750 tasklet_kill(&ahc->platform_data->runq_tasklet);
1886 if (ahc->platform_data->host != NULL) { 1751 if (ahc->platform_data->host != NULL) {
1887#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1752#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
@@ -2120,1331 +1985,6 @@ ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc)
2120 ahc_unlock(ahc, &flags); 1985 ahc_unlock(ahc, &flags);
2121} 1986}
2122 1987
2123static void
2124ahc_linux_start_dv(struct ahc_softc *ahc)
2125{
2126
2127 /*
2128 * Freeze the simq and signal ahc_linux_queue to not let any
2129 * more commands through.
2130 */
2131 if ((ahc->platform_data->flags & AHC_DV_ACTIVE) == 0) {
2132#ifdef AHC_DEBUG
2133 if (ahc_debug & AHC_SHOW_DV)
2134 printf("%s: Waking DV thread\n", ahc_name(ahc));
2135#endif
2136
2137 ahc->platform_data->flags |= AHC_DV_ACTIVE;
2138 ahc_linux_freeze_simq(ahc);
2139
2140 /* Wake up the DV kthread */
2141 up(&ahc->platform_data->dv_sem);
2142 }
2143}
2144
2145static void
2146ahc_linux_kill_dv_thread(struct ahc_softc *ahc)
2147{
2148 u_long s;
2149
2150 ahc_lock(ahc, &s);
2151 if (ahc->platform_data->dv_pid != 0) {
2152 ahc->platform_data->flags |= AHC_DV_SHUTDOWN;
2153 ahc_unlock(ahc, &s);
2154 up(&ahc->platform_data->dv_sem);
2155
2156 /*
2157 * Use the eh_sem as an indicator that the
2158 * dv thread is exiting. Note that the dv
2159 * thread must still return after performing
2160 * the up on our semaphore before it has
2161 * completely exited this module. Unfortunately,
2162 * there seems to be no easy way to wait for the
2163 * exit of a thread for which you are not the
2164 * parent (dv threads are parented by init).
2165 * Cross your fingers...
2166 */
2167 down(&ahc->platform_data->eh_sem);
2168
2169 /*
2170 * Mark the dv thread as already dead. This
2171 * avoids attempting to kill it a second time.
2172 * This is necessary because we must kill the
2173 * DV thread before calling ahc_free() in the
2174 * module shutdown case to avoid bogus locking
2175 * in the SCSI mid-layer, but we ahc_free() is
2176 * called without killing the DV thread in the
2177 * instance detach case, so ahc_platform_free()
2178 * calls us again to verify that the DV thread
2179 * is dead.
2180 */
2181 ahc->platform_data->dv_pid = 0;
2182 } else {
2183 ahc_unlock(ahc, &s);
2184 }
2185}
2186
2187static int
2188ahc_linux_dv_thread(void *data)
2189{
2190 struct ahc_softc *ahc;
2191 int target;
2192 u_long s;
2193
2194 ahc = (struct ahc_softc *)data;
2195
2196#ifdef AHC_DEBUG
2197 if (ahc_debug & AHC_SHOW_DV)
2198 printf("Launching DV Thread\n");
2199#endif
2200
2201 /*
2202 * Complete thread creation.
2203 */
2204 lock_kernel();
2205#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
2206 /*
2207 * Don't care about any signals.
2208 */
2209 siginitsetinv(&current->blocked, 0);
2210
2211 daemonize();
2212 sprintf(current->comm, "ahc_dv_%d", ahc->unit);
2213#else
2214 daemonize("ahc_dv_%d", ahc->unit);
2215 current->flags |= PF_FREEZE;
2216#endif
2217 unlock_kernel();
2218
2219 while (1) {
2220 /*
2221 * Use down_interruptible() rather than down() to
2222 * avoid inclusion in the load average.
2223 */
2224 down_interruptible(&ahc->platform_data->dv_sem);
2225
2226 /* Check to see if we've been signaled to exit */
2227 ahc_lock(ahc, &s);
2228 if ((ahc->platform_data->flags & AHC_DV_SHUTDOWN) != 0) {
2229 ahc_unlock(ahc, &s);
2230 break;
2231 }
2232 ahc_unlock(ahc, &s);
2233
2234#ifdef AHC_DEBUG
2235 if (ahc_debug & AHC_SHOW_DV)
2236 printf("%s: Beginning Domain Validation\n",
2237 ahc_name(ahc));
2238#endif
2239
2240 /*
2241 * Wait for any pending commands to drain before proceeding.
2242 */
2243 ahc_lock(ahc, &s);
2244 while (LIST_FIRST(&ahc->pending_scbs) != NULL) {
2245 ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_EMPTY;
2246 ahc_unlock(ahc, &s);
2247 down_interruptible(&ahc->platform_data->dv_sem);
2248 ahc_lock(ahc, &s);
2249 }
2250
2251 /*
2252 * Wait for the SIMQ to be released so that DV is the
2253 * only reason the queue is frozen.
2254 */
2255 while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
2256 ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
2257 ahc_unlock(ahc, &s);
2258 down_interruptible(&ahc->platform_data->dv_sem);
2259 ahc_lock(ahc, &s);
2260 }
2261 ahc_unlock(ahc, &s);
2262
2263 for (target = 0; target < AHC_NUM_TARGETS; target++)
2264 ahc_linux_dv_target(ahc, target);
2265
2266 ahc_lock(ahc, &s);
2267 ahc->platform_data->flags &= ~AHC_DV_ACTIVE;
2268 ahc_unlock(ahc, &s);
2269
2270 /*
2271 * Release the SIMQ so that normal commands are
2272 * allowed to continue on the bus.
2273 */
2274 ahc_linux_release_simq((u_long)ahc);
2275 }
2276 up(&ahc->platform_data->eh_sem);
2277 return (0);
2278}
2279
2280#define AHC_LINUX_DV_INQ_SHORT_LEN 36
2281#define AHC_LINUX_DV_INQ_LEN 256
2282#define AHC_LINUX_DV_TIMEOUT (HZ / 4)
2283
2284#define AHC_SET_DV_STATE(ahc, targ, newstate) \
2285 ahc_set_dv_state(ahc, targ, newstate, __LINE__)
2286
2287static __inline void
2288ahc_set_dv_state(struct ahc_softc *ahc, struct ahc_linux_target *targ,
2289 ahc_dv_state newstate, u_int line)
2290{
2291 ahc_dv_state oldstate;
2292
2293 oldstate = targ->dv_state;
2294#ifdef AHC_DEBUG
2295 if (ahc_debug & AHC_SHOW_DV)
2296 printf("%s:%d: Going from state %d to state %d\n",
2297 ahc_name(ahc), line, oldstate, newstate);
2298#endif
2299
2300 if (oldstate == newstate)
2301 targ->dv_state_retry++;
2302 else
2303 targ->dv_state_retry = 0;
2304 targ->dv_state = newstate;
2305}
2306
2307static void
2308ahc_linux_dv_target(struct ahc_softc *ahc, u_int target_offset)
2309{
2310 struct ahc_devinfo devinfo;
2311 struct ahc_linux_target *targ;
2312 struct scsi_cmnd *cmd;
2313 struct scsi_device *scsi_dev;
2314 struct scsi_sense_data *sense;
2315 uint8_t *buffer;
2316 u_long s;
2317 u_int timeout;
2318 int echo_size;
2319
2320 sense = NULL;
2321 buffer = NULL;
2322 echo_size = 0;
2323 ahc_lock(ahc, &s);
2324 targ = ahc->platform_data->targets[target_offset];
2325 if (targ == NULL || (targ->flags & AHC_DV_REQUIRED) == 0) {
2326 ahc_unlock(ahc, &s);
2327 return;
2328 }
2329 ahc_compile_devinfo(&devinfo,
2330 targ->channel == 0 ? ahc->our_id : ahc->our_id_b,
2331 targ->target, /*lun*/0, targ->channel + 'A',
2332 ROLE_INITIATOR);
2333#ifdef AHC_DEBUG
2334 if (ahc_debug & AHC_SHOW_DV) {
2335 ahc_print_devinfo(ahc, &devinfo);
2336 printf("Performing DV\n");
2337 }
2338#endif
2339
2340 ahc_unlock(ahc, &s);
2341
2342 cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
2343 scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
2344 scsi_dev->host = ahc->platform_data->host;
2345 scsi_dev->id = devinfo.target;
2346 scsi_dev->lun = devinfo.lun;
2347 scsi_dev->channel = devinfo.channel - 'A';
2348 ahc->platform_data->dv_scsi_dev = scsi_dev;
2349
2350 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_SHORT_ASYNC);
2351
2352 while (targ->dv_state != AHC_DV_STATE_EXIT) {
2353 timeout = AHC_LINUX_DV_TIMEOUT;
2354 switch (targ->dv_state) {
2355 case AHC_DV_STATE_INQ_SHORT_ASYNC:
2356 case AHC_DV_STATE_INQ_ASYNC:
2357 case AHC_DV_STATE_INQ_ASYNC_VERIFY:
2358 /*
2359 * Set things to async narrow to reduce the
2360 * chance that the INQ will fail.
2361 */
2362 ahc_lock(ahc, &s);
2363 ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
2364 AHC_TRANS_GOAL, /*paused*/FALSE);
2365 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
2366 AHC_TRANS_GOAL, /*paused*/FALSE);
2367 ahc_unlock(ahc, &s);
2368 timeout = 10 * HZ;
2369 targ->flags &= ~AHC_INQ_VALID;
2370 /* FALLTHROUGH */
2371 case AHC_DV_STATE_INQ_VERIFY:
2372 {
2373 u_int inq_len;
2374
2375 if (targ->dv_state == AHC_DV_STATE_INQ_SHORT_ASYNC)
2376 inq_len = AHC_LINUX_DV_INQ_SHORT_LEN;
2377 else
2378 inq_len = targ->inq_data->additional_length + 5;
2379 ahc_linux_dv_inq(ahc, cmd, &devinfo, targ, inq_len);
2380 break;
2381 }
2382 case AHC_DV_STATE_TUR:
2383 case AHC_DV_STATE_BUSY:
2384 timeout = 5 * HZ;
2385 ahc_linux_dv_tur(ahc, cmd, &devinfo);
2386 break;
2387 case AHC_DV_STATE_REBD:
2388 ahc_linux_dv_rebd(ahc, cmd, &devinfo, targ);
2389 break;
2390 case AHC_DV_STATE_WEB:
2391 ahc_linux_dv_web(ahc, cmd, &devinfo, targ);
2392 break;
2393
2394 case AHC_DV_STATE_REB:
2395 ahc_linux_dv_reb(ahc, cmd, &devinfo, targ);
2396 break;
2397
2398 case AHC_DV_STATE_SU:
2399 ahc_linux_dv_su(ahc, cmd, &devinfo, targ);
2400 timeout = 50 * HZ;
2401 break;
2402
2403 default:
2404 ahc_print_devinfo(ahc, &devinfo);
2405 printf("Unknown DV state %d\n", targ->dv_state);
2406 goto out;
2407 }
2408
2409 /* Queue the command and wait for it to complete */
2410 /* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
2411 init_timer(&cmd->eh_timeout);
2412#ifdef AHC_DEBUG
2413 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2414 /*
2415 * All of the printfs during negotiation
2416 * really slow down the negotiation.
2417 * Add a bit of time just to be safe.
2418 */
2419 timeout += HZ;
2420#endif
2421 scsi_add_timer(cmd, timeout, ahc_linux_dv_timeout);
2422 /*
2423 * In 2.5.X, it is assumed that all calls from the
2424 * "midlayer" (which we are emulating) will have the
2425 * ahc host lock held. For other kernels, the
2426 * io_request_lock must be held.
2427 */
2428#if AHC_SCSI_HAS_HOST_LOCK != 0
2429 ahc_lock(ahc, &s);
2430#else
2431 spin_lock_irqsave(&io_request_lock, s);
2432#endif
2433 ahc_linux_queue(cmd, ahc_linux_dv_complete);
2434#if AHC_SCSI_HAS_HOST_LOCK != 0
2435 ahc_unlock(ahc, &s);
2436#else
2437 spin_unlock_irqrestore(&io_request_lock, s);
2438#endif
2439 down_interruptible(&ahc->platform_data->dv_cmd_sem);
2440 /*
2441 * Wait for the SIMQ to be released so that DV is the
2442 * only reason the queue is frozen.
2443 */
2444 ahc_lock(ahc, &s);
2445 while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
2446 ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
2447 ahc_unlock(ahc, &s);
2448 down_interruptible(&ahc->platform_data->dv_sem);
2449 ahc_lock(ahc, &s);
2450 }
2451 ahc_unlock(ahc, &s);
2452
2453 ahc_linux_dv_transition(ahc, cmd, &devinfo, targ);
2454 }
2455
2456out:
2457 if ((targ->flags & AHC_INQ_VALID) != 0
2458 && ahc_linux_get_device(ahc, devinfo.channel - 'A',
2459 devinfo.target, devinfo.lun,
2460 /*alloc*/FALSE) == NULL) {
2461 /*
2462 * The DV state machine failed to configure this device.
2463 * This is normal if DV is disabled. Since we have inquiry
2464 * data, filter it and use the "optimistic" negotiation
2465 * parameters found in the inquiry string.
2466 */
2467 ahc_linux_filter_inquiry(ahc, &devinfo);
2468 if ((targ->flags & (AHC_BASIC_DV|AHC_ENHANCED_DV)) != 0) {
2469 ahc_print_devinfo(ahc, &devinfo);
2470 printf("DV failed to configure device. "
2471 "Please file a bug report against "
2472 "this driver.\n");
2473 }
2474 }
2475
2476 if (cmd != NULL)
2477 free(cmd, M_DEVBUF);
2478
2479 if (ahc->platform_data->dv_scsi_dev != NULL) {
2480 free(ahc->platform_data->dv_scsi_dev, M_DEVBUF);
2481 ahc->platform_data->dv_scsi_dev = NULL;
2482 }
2483
2484 ahc_lock(ahc, &s);
2485 if (targ->dv_buffer != NULL) {
2486 free(targ->dv_buffer, M_DEVBUF);
2487 targ->dv_buffer = NULL;
2488 }
2489 if (targ->dv_buffer1 != NULL) {
2490 free(targ->dv_buffer1, M_DEVBUF);
2491 targ->dv_buffer1 = NULL;
2492 }
2493 targ->flags &= ~AHC_DV_REQUIRED;
2494 if (targ->refcount == 0)
2495 ahc_linux_free_target(ahc, targ);
2496 ahc_unlock(ahc, &s);
2497}
2498
2499static void
2500ahc_linux_dv_transition(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
2501 struct ahc_devinfo *devinfo,
2502 struct ahc_linux_target *targ)
2503{
2504 u_int32_t status;
2505
2506 status = aic_error_action(cmd, targ->inq_data,
2507 ahc_cmd_get_transaction_status(cmd),
2508 ahc_cmd_get_scsi_status(cmd));
2509
2510#ifdef AHC_DEBUG
2511 if (ahc_debug & AHC_SHOW_DV) {
2512 ahc_print_devinfo(ahc, devinfo);
2513 printf("Entering ahc_linux_dv_transition, state= %d, "
2514 "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
2515 status, cmd->result);
2516 }
2517#endif
2518
2519 switch (targ->dv_state) {
2520 case AHC_DV_STATE_INQ_SHORT_ASYNC:
2521 case AHC_DV_STATE_INQ_ASYNC:
2522 switch (status & SS_MASK) {
2523 case SS_NOP:
2524 {
2525 AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
2526 break;
2527 }
2528 case SS_INQ_REFRESH:
2529 AHC_SET_DV_STATE(ahc, targ,
2530 AHC_DV_STATE_INQ_SHORT_ASYNC);
2531 break;
2532 case SS_TUR:
2533 case SS_RETRY:
2534 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2535 if (ahc_cmd_get_transaction_status(cmd)
2536 == CAM_REQUEUE_REQ)
2537 targ->dv_state_retry--;
2538 if ((status & SS_ERRMASK) == EBUSY)
2539 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
2540 if (targ->dv_state_retry < 10)
2541 break;
2542 /* FALLTHROUGH */
2543 default:
2544 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2545#ifdef AHC_DEBUG
2546 if (ahc_debug & AHC_SHOW_DV) {
2547 ahc_print_devinfo(ahc, devinfo);
2548 printf("Failed DV inquiry, skipping\n");
2549 }
2550#endif
2551 break;
2552 }
2553 break;
2554 case AHC_DV_STATE_INQ_ASYNC_VERIFY:
2555 switch (status & SS_MASK) {
2556 case SS_NOP:
2557 {
2558 u_int xportflags;
2559 u_int spi3data;
2560
2561 if (memcmp(targ->inq_data, targ->dv_buffer,
2562 AHC_LINUX_DV_INQ_LEN) != 0) {
2563 /*
2564 * Inquiry data must have changed.
2565 * Try from the top again.
2566 */
2567 AHC_SET_DV_STATE(ahc, targ,
2568 AHC_DV_STATE_INQ_SHORT_ASYNC);
2569 break;
2570 }
2571
2572 AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
2573 targ->flags |= AHC_INQ_VALID;
2574 if (ahc_linux_user_dv_setting(ahc) == 0)
2575 break;
2576
2577 xportflags = targ->inq_data->flags;
2578 if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
2579 break;
2580
2581 spi3data = targ->inq_data->spi3data;
2582 switch (spi3data & SID_SPI_CLOCK_DT_ST) {
2583 default:
2584 case SID_SPI_CLOCK_ST:
2585 /* Assume only basic DV is supported. */
2586 targ->flags |= AHC_BASIC_DV;
2587 break;
2588 case SID_SPI_CLOCK_DT:
2589 case SID_SPI_CLOCK_DT_ST:
2590 targ->flags |= AHC_ENHANCED_DV;
2591 break;
2592 }
2593 break;
2594 }
2595 case SS_INQ_REFRESH:
2596 AHC_SET_DV_STATE(ahc, targ,
2597 AHC_DV_STATE_INQ_SHORT_ASYNC);
2598 break;
2599 case SS_TUR:
2600 case SS_RETRY:
2601 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2602 if (ahc_cmd_get_transaction_status(cmd)
2603 == CAM_REQUEUE_REQ)
2604 targ->dv_state_retry--;
2605
2606 if ((status & SS_ERRMASK) == EBUSY)
2607 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
2608 if (targ->dv_state_retry < 10)
2609 break;
2610 /* FALLTHROUGH */
2611 default:
2612 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2613#ifdef AHC_DEBUG
2614 if (ahc_debug & AHC_SHOW_DV) {
2615 ahc_print_devinfo(ahc, devinfo);
2616 printf("Failed DV inquiry, skipping\n");
2617 }
2618#endif
2619 break;
2620 }
2621 break;
2622 case AHC_DV_STATE_INQ_VERIFY:
2623 switch (status & SS_MASK) {
2624 case SS_NOP:
2625 {
2626
2627 if (memcmp(targ->inq_data, targ->dv_buffer,
2628 AHC_LINUX_DV_INQ_LEN) == 0) {
2629 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2630 break;
2631 }
2632#ifdef AHC_DEBUG
2633 if (ahc_debug & AHC_SHOW_DV) {
2634 int i;
2635
2636 ahc_print_devinfo(ahc, devinfo);
2637 printf("Inquiry buffer mismatch:");
2638 for (i = 0; i < AHC_LINUX_DV_INQ_LEN; i++) {
2639 if ((i & 0xF) == 0)
2640 printf("\n ");
2641 printf("0x%x:0x0%x ",
2642 ((uint8_t *)targ->inq_data)[i],
2643 targ->dv_buffer[i]);
2644 }
2645 printf("\n");
2646 }
2647#endif
2648
2649 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2650 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2651 break;
2652 }
2653 /*
2654 * Do not count "falling back"
2655 * against our retries.
2656 */
2657 targ->dv_state_retry = 0;
2658 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2659 break;
2660 }
2661 case SS_INQ_REFRESH:
2662 AHC_SET_DV_STATE(ahc, targ,
2663 AHC_DV_STATE_INQ_SHORT_ASYNC);
2664 break;
2665 case SS_TUR:
2666 case SS_RETRY:
2667 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2668 if (ahc_cmd_get_transaction_status(cmd)
2669 == CAM_REQUEUE_REQ) {
2670 targ->dv_state_retry--;
2671 } else if ((status & SSQ_FALLBACK) != 0) {
2672 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2673 AHC_SET_DV_STATE(ahc, targ,
2674 AHC_DV_STATE_EXIT);
2675 break;
2676 }
2677 /*
2678 * Do not count "falling back"
2679 * against our retries.
2680 */
2681 targ->dv_state_retry = 0;
2682 } else if ((status & SS_ERRMASK) == EBUSY)
2683 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
2684 if (targ->dv_state_retry < 10)
2685 break;
2686 /* FALLTHROUGH */
2687 default:
2688 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2689#ifdef AHC_DEBUG
2690 if (ahc_debug & AHC_SHOW_DV) {
2691 ahc_print_devinfo(ahc, devinfo);
2692 printf("Failed DV inquiry, skipping\n");
2693 }
2694#endif
2695 break;
2696 }
2697 break;
2698
2699 case AHC_DV_STATE_TUR:
2700 switch (status & SS_MASK) {
2701 case SS_NOP:
2702 if ((targ->flags & AHC_BASIC_DV) != 0) {
2703 ahc_linux_filter_inquiry(ahc, devinfo);
2704 AHC_SET_DV_STATE(ahc, targ,
2705 AHC_DV_STATE_INQ_VERIFY);
2706 } else if ((targ->flags & AHC_ENHANCED_DV) != 0) {
2707 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REBD);
2708 } else {
2709 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2710 }
2711 break;
2712 case SS_RETRY:
2713 case SS_TUR:
2714 if ((status & SS_ERRMASK) == EBUSY) {
2715 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
2716 break;
2717 }
2718 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2719 if (ahc_cmd_get_transaction_status(cmd)
2720 == CAM_REQUEUE_REQ) {
2721 targ->dv_state_retry--;
2722 } else if ((status & SSQ_FALLBACK) != 0) {
2723 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2724 AHC_SET_DV_STATE(ahc, targ,
2725 AHC_DV_STATE_EXIT);
2726 break;
2727 }
2728 /*
2729 * Do not count "falling back"
2730 * against our retries.
2731 */
2732 targ->dv_state_retry = 0;
2733 }
2734 if (targ->dv_state_retry >= 10) {
2735#ifdef AHC_DEBUG
2736 if (ahc_debug & AHC_SHOW_DV) {
2737 ahc_print_devinfo(ahc, devinfo);
2738 printf("DV TUR reties exhausted\n");
2739 }
2740#endif
2741 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2742 break;
2743 }
2744 if (status & SSQ_DELAY)
2745 ssleep(1);
2746
2747 break;
2748 case SS_START:
2749 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_SU);
2750 break;
2751 case SS_INQ_REFRESH:
2752 AHC_SET_DV_STATE(ahc, targ,
2753 AHC_DV_STATE_INQ_SHORT_ASYNC);
2754 break;
2755 default:
2756 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2757 break;
2758 }
2759 break;
2760
2761 case AHC_DV_STATE_REBD:
2762 switch (status & SS_MASK) {
2763 case SS_NOP:
2764 {
2765 uint32_t echo_size;
2766
2767 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
2768 echo_size = scsi_3btoul(&targ->dv_buffer[1]);
2769 echo_size &= 0x1FFF;
2770#ifdef AHC_DEBUG
2771 if (ahc_debug & AHC_SHOW_DV) {
2772 ahc_print_devinfo(ahc, devinfo);
2773 printf("Echo buffer size= %d\n", echo_size);
2774 }
2775#endif
2776 if (echo_size == 0) {
2777 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2778 break;
2779 }
2780
2781 /* Generate the buffer pattern */
2782 targ->dv_echo_size = echo_size;
2783 ahc_linux_generate_dv_pattern(targ);
2784 /*
2785 * Setup initial negotiation values.
2786 */
2787 ahc_linux_filter_inquiry(ahc, devinfo);
2788 break;
2789 }
2790 case SS_INQ_REFRESH:
2791 AHC_SET_DV_STATE(ahc, targ,
2792 AHC_DV_STATE_INQ_SHORT_ASYNC);
2793 break;
2794 case SS_RETRY:
2795 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2796 if (ahc_cmd_get_transaction_status(cmd)
2797 == CAM_REQUEUE_REQ)
2798 targ->dv_state_retry--;
2799 if (targ->dv_state_retry <= 10)
2800 break;
2801#ifdef AHC_DEBUG
2802 if (ahc_debug & AHC_SHOW_DV) {
2803 ahc_print_devinfo(ahc, devinfo);
2804 printf("DV REBD reties exhausted\n");
2805 }
2806#endif
2807 /* FALLTHROUGH */
2808 case SS_FATAL:
2809 default:
2810 /*
2811 * Setup initial negotiation values
2812 * and try level 1 DV.
2813 */
2814 ahc_linux_filter_inquiry(ahc, devinfo);
2815 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_VERIFY);
2816 targ->dv_echo_size = 0;
2817 break;
2818 }
2819 break;
2820
2821 case AHC_DV_STATE_WEB:
2822 switch (status & SS_MASK) {
2823 case SS_NOP:
2824 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REB);
2825 break;
2826 case SS_INQ_REFRESH:
2827 AHC_SET_DV_STATE(ahc, targ,
2828 AHC_DV_STATE_INQ_SHORT_ASYNC);
2829 break;
2830 case SS_RETRY:
2831 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2832 if (ahc_cmd_get_transaction_status(cmd)
2833 == CAM_REQUEUE_REQ) {
2834 targ->dv_state_retry--;
2835 } else if ((status & SSQ_FALLBACK) != 0) {
2836 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2837 AHC_SET_DV_STATE(ahc, targ,
2838 AHC_DV_STATE_EXIT);
2839 break;
2840 }
2841 /*
2842 * Do not count "falling back"
2843 * against our retries.
2844 */
2845 targ->dv_state_retry = 0;
2846 }
2847 if (targ->dv_state_retry <= 10)
2848 break;
2849 /* FALLTHROUGH */
2850#ifdef AHC_DEBUG
2851 if (ahc_debug & AHC_SHOW_DV) {
2852 ahc_print_devinfo(ahc, devinfo);
2853 printf("DV WEB reties exhausted\n");
2854 }
2855#endif
2856 default:
2857 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2858 break;
2859 }
2860 break;
2861
2862 case AHC_DV_STATE_REB:
2863 switch (status & SS_MASK) {
2864 case SS_NOP:
2865 if (memcmp(targ->dv_buffer, targ->dv_buffer1,
2866 targ->dv_echo_size) != 0) {
2867 if (ahc_linux_fallback(ahc, devinfo) != 0)
2868 AHC_SET_DV_STATE(ahc, targ,
2869 AHC_DV_STATE_EXIT);
2870 else
2871 AHC_SET_DV_STATE(ahc, targ,
2872 AHC_DV_STATE_WEB);
2873 break;
2874 }
2875
2876 if (targ->dv_buffer != NULL) {
2877 free(targ->dv_buffer, M_DEVBUF);
2878 targ->dv_buffer = NULL;
2879 }
2880 if (targ->dv_buffer1 != NULL) {
2881 free(targ->dv_buffer1, M_DEVBUF);
2882 targ->dv_buffer1 = NULL;
2883 }
2884 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2885 break;
2886 case SS_INQ_REFRESH:
2887 AHC_SET_DV_STATE(ahc, targ,
2888 AHC_DV_STATE_INQ_SHORT_ASYNC);
2889 break;
2890 case SS_RETRY:
2891 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2892 if (ahc_cmd_get_transaction_status(cmd)
2893 == CAM_REQUEUE_REQ) {
2894 targ->dv_state_retry--;
2895 } else if ((status & SSQ_FALLBACK) != 0) {
2896 if (ahc_linux_fallback(ahc, devinfo) != 0) {
2897 AHC_SET_DV_STATE(ahc, targ,
2898 AHC_DV_STATE_EXIT);
2899 break;
2900 }
2901 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
2902 }
2903 if (targ->dv_state_retry <= 10) {
2904 if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
2905 msleep(ahc->our_id*1000/10);
2906 break;
2907 }
2908#ifdef AHC_DEBUG
2909 if (ahc_debug & AHC_SHOW_DV) {
2910 ahc_print_devinfo(ahc, devinfo);
2911 printf("DV REB reties exhausted\n");
2912 }
2913#endif
2914 /* FALLTHROUGH */
2915 default:
2916 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2917 break;
2918 }
2919 break;
2920
2921 case AHC_DV_STATE_SU:
2922 switch (status & SS_MASK) {
2923 case SS_NOP:
2924 case SS_INQ_REFRESH:
2925 AHC_SET_DV_STATE(ahc, targ,
2926 AHC_DV_STATE_INQ_SHORT_ASYNC);
2927 break;
2928 default:
2929 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2930 break;
2931 }
2932 break;
2933
2934 case AHC_DV_STATE_BUSY:
2935 switch (status & SS_MASK) {
2936 case SS_NOP:
2937 case SS_INQ_REFRESH:
2938 AHC_SET_DV_STATE(ahc, targ,
2939 AHC_DV_STATE_INQ_SHORT_ASYNC);
2940 break;
2941 case SS_TUR:
2942 case SS_RETRY:
2943 AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
2944 if (ahc_cmd_get_transaction_status(cmd)
2945 == CAM_REQUEUE_REQ) {
2946 targ->dv_state_retry--;
2947 } else if (targ->dv_state_retry < 60) {
2948 if ((status & SSQ_DELAY) != 0)
2949 ssleep(1);
2950 } else {
2951#ifdef AHC_DEBUG
2952 if (ahc_debug & AHC_SHOW_DV) {
2953 ahc_print_devinfo(ahc, devinfo);
2954 printf("DV BUSY reties exhausted\n");
2955 }
2956#endif
2957 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2958 }
2959 break;
2960 default:
2961 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2962 break;
2963 }
2964 break;
2965
2966 default:
2967 printf("%s: Invalid DV completion state %d\n", ahc_name(ahc),
2968 targ->dv_state);
2969 AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
2970 break;
2971 }
2972}
2973
2974static void
2975ahc_linux_dv_fill_cmd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
2976 struct ahc_devinfo *devinfo)
2977{
2978 memset(cmd, 0, sizeof(struct scsi_cmnd));
2979 cmd->device = ahc->platform_data->dv_scsi_dev;
2980 cmd->scsi_done = ahc_linux_dv_complete;
2981}
2982
2983/*
2984 * Synthesize an inquiry command. On the return trip, it'll be
2985 * sniffed and the device transfer settings set for us.
2986 */
2987static void
2988ahc_linux_dv_inq(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
2989 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ,
2990 u_int request_length)
2991{
2992
2993#ifdef AHC_DEBUG
2994 if (ahc_debug & AHC_SHOW_DV) {
2995 ahc_print_devinfo(ahc, devinfo);
2996 printf("Sending INQ\n");
2997 }
2998#endif
2999 if (targ->inq_data == NULL)
3000 targ->inq_data = malloc(AHC_LINUX_DV_INQ_LEN,
3001 M_DEVBUF, M_WAITOK);
3002 if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC) {
3003 if (targ->dv_buffer != NULL)
3004 free(targ->dv_buffer, M_DEVBUF);
3005 targ->dv_buffer = malloc(AHC_LINUX_DV_INQ_LEN,
3006 M_DEVBUF, M_WAITOK);
3007 }
3008
3009 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3010 cmd->sc_data_direction = SCSI_DATA_READ;
3011 cmd->cmd_len = 6;
3012 cmd->cmnd[0] = INQUIRY;
3013 cmd->cmnd[4] = request_length;
3014 cmd->request_bufflen = request_length;
3015 if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC)
3016 cmd->request_buffer = targ->dv_buffer;
3017 else
3018 cmd->request_buffer = targ->inq_data;
3019 memset(cmd->request_buffer, 0, AHC_LINUX_DV_INQ_LEN);
3020}
3021
3022static void
3023ahc_linux_dv_tur(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3024 struct ahc_devinfo *devinfo)
3025{
3026
3027#ifdef AHC_DEBUG
3028 if (ahc_debug & AHC_SHOW_DV) {
3029 ahc_print_devinfo(ahc, devinfo);
3030 printf("Sending TUR\n");
3031 }
3032#endif
3033 /* Do a TUR to clear out any non-fatal transitional state */
3034 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3035 cmd->sc_data_direction = SCSI_DATA_NONE;
3036 cmd->cmd_len = 6;
3037 cmd->cmnd[0] = TEST_UNIT_READY;
3038}
3039
3040#define AHC_REBD_LEN 4
3041
3042static void
3043ahc_linux_dv_rebd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3044 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
3045{
3046
3047#ifdef AHC_DEBUG
3048 if (ahc_debug & AHC_SHOW_DV) {
3049 ahc_print_devinfo(ahc, devinfo);
3050 printf("Sending REBD\n");
3051 }
3052#endif
3053 if (targ->dv_buffer != NULL)
3054 free(targ->dv_buffer, M_DEVBUF);
3055 targ->dv_buffer = malloc(AHC_REBD_LEN, M_DEVBUF, M_WAITOK);
3056 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3057 cmd->sc_data_direction = SCSI_DATA_READ;
3058 cmd->cmd_len = 10;
3059 cmd->cmnd[0] = READ_BUFFER;
3060 cmd->cmnd[1] = 0x0b;
3061 scsi_ulto3b(AHC_REBD_LEN, &cmd->cmnd[6]);
3062 cmd->request_bufflen = AHC_REBD_LEN;
3063 cmd->underflow = cmd->request_bufflen;
3064 cmd->request_buffer = targ->dv_buffer;
3065}
3066
3067static void
3068ahc_linux_dv_web(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3069 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
3070{
3071
3072#ifdef AHC_DEBUG
3073 if (ahc_debug & AHC_SHOW_DV) {
3074 ahc_print_devinfo(ahc, devinfo);
3075 printf("Sending WEB\n");
3076 }
3077#endif
3078 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3079 cmd->sc_data_direction = SCSI_DATA_WRITE;
3080 cmd->cmd_len = 10;
3081 cmd->cmnd[0] = WRITE_BUFFER;
3082 cmd->cmnd[1] = 0x0a;
3083 scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3084 cmd->request_bufflen = targ->dv_echo_size;
3085 cmd->underflow = cmd->request_bufflen;
3086 cmd->request_buffer = targ->dv_buffer;
3087}
3088
3089static void
3090ahc_linux_dv_reb(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3091 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
3092{
3093
3094#ifdef AHC_DEBUG
3095 if (ahc_debug & AHC_SHOW_DV) {
3096 ahc_print_devinfo(ahc, devinfo);
3097 printf("Sending REB\n");
3098 }
3099#endif
3100 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3101 cmd->sc_data_direction = SCSI_DATA_READ;
3102 cmd->cmd_len = 10;
3103 cmd->cmnd[0] = READ_BUFFER;
3104 cmd->cmnd[1] = 0x0a;
3105 scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3106 cmd->request_bufflen = targ->dv_echo_size;
3107 cmd->underflow = cmd->request_bufflen;
3108 cmd->request_buffer = targ->dv_buffer1;
3109}
3110
3111static void
3112ahc_linux_dv_su(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
3113 struct ahc_devinfo *devinfo,
3114 struct ahc_linux_target *targ)
3115{
3116 u_int le;
3117
3118 le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
3119
3120#ifdef AHC_DEBUG
3121 if (ahc_debug & AHC_SHOW_DV) {
3122 ahc_print_devinfo(ahc, devinfo);
3123 printf("Sending SU\n");
3124 }
3125#endif
3126 ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
3127 cmd->sc_data_direction = SCSI_DATA_NONE;
3128 cmd->cmd_len = 6;
3129 cmd->cmnd[0] = START_STOP_UNIT;
3130 cmd->cmnd[4] = le | SSS_START;
3131}
3132
3133static int
3134ahc_linux_fallback(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3135{
3136 struct ahc_linux_target *targ;
3137 struct ahc_initiator_tinfo *tinfo;
3138 struct ahc_transinfo *goal;
3139 struct ahc_tmode_tstate *tstate;
3140 struct ahc_syncrate *syncrate;
3141 u_long s;
3142 u_int width;
3143 u_int period;
3144 u_int offset;
3145 u_int ppr_options;
3146 u_int cur_speed;
3147 u_int wide_speed;
3148 u_int narrow_speed;
3149 u_int fallback_speed;
3150
3151#ifdef AHC_DEBUG
3152 if (ahc_debug & AHC_SHOW_DV) {
3153 ahc_print_devinfo(ahc, devinfo);
3154 printf("Trying to fallback\n");
3155 }
3156#endif
3157 ahc_lock(ahc, &s);
3158 targ = ahc->platform_data->targets[devinfo->target_offset];
3159 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3160 devinfo->our_scsiid,
3161 devinfo->target, &tstate);
3162 goal = &tinfo->goal;
3163 width = goal->width;
3164 period = goal->period;
3165 offset = goal->offset;
3166 ppr_options = goal->ppr_options;
3167 if (offset == 0)
3168 period = AHC_ASYNC_XFER_PERIOD;
3169 if (targ->dv_next_narrow_period == 0)
3170 targ->dv_next_narrow_period = MAX(period, AHC_SYNCRATE_ULTRA2);
3171 if (targ->dv_next_wide_period == 0)
3172 targ->dv_next_wide_period = period;
3173 if (targ->dv_max_width == 0)
3174 targ->dv_max_width = width;
3175 if (targ->dv_max_ppr_options == 0)
3176 targ->dv_max_ppr_options = ppr_options;
3177 if (targ->dv_last_ppr_options == 0)
3178 targ->dv_last_ppr_options = ppr_options;
3179
3180 cur_speed = aic_calc_speed(width, period, offset, AHC_SYNCRATE_MIN);
3181 wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
3182 targ->dv_next_wide_period,
3183 MAX_OFFSET,
3184 AHC_SYNCRATE_MIN);
3185 narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
3186 targ->dv_next_narrow_period,
3187 MAX_OFFSET,
3188 AHC_SYNCRATE_MIN);
3189 fallback_speed = aic_calc_speed(width, period+1, offset,
3190 AHC_SYNCRATE_MIN);
3191#ifdef AHC_DEBUG
3192 if (ahc_debug & AHC_SHOW_DV) {
3193 printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
3194 "fallback_speed= %d\n", cur_speed, wide_speed,
3195 narrow_speed, fallback_speed);
3196 }
3197#endif
3198
3199 if (cur_speed > 160000) {
3200 /*
3201 * Paced/DT/IU_REQ only transfer speeds. All we
3202 * can do is fallback in terms of syncrate.
3203 */
3204 period++;
3205 } else if (cur_speed > 80000) {
3206 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3207 /*
3208 * Try without IU_REQ as it may be confusing
3209 * an expander.
3210 */
3211 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
3212 } else {
3213 /*
3214 * Paced/DT only transfer speeds. All we
3215 * can do is fallback in terms of syncrate.
3216 */
3217 period++;
3218 ppr_options = targ->dv_max_ppr_options;
3219 }
3220 } else if (cur_speed > 3300) {
3221
3222 /*
3223 * In this range we the following
3224 * options ordered from highest to
3225 * lowest desireability:
3226 *
3227 * o Wide/DT
3228 * o Wide/non-DT
3229 * o Narrow at a potentally higher sync rate.
3230 *
3231 * All modes are tested with and without IU_REQ
3232 * set since using IUs may confuse an expander.
3233 */
3234 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3235
3236 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
3237 } else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
3238 /*
3239 * Try going non-DT.
3240 */
3241 ppr_options = targ->dv_max_ppr_options;
3242 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
3243 } else if (targ->dv_last_ppr_options != 0) {
3244 /*
3245 * Try without QAS or any other PPR options.
3246 * We may need a non-PPR message to work with
3247 * an expander. We look at the "last PPR options"
3248 * so we will perform this fallback even if the
3249 * target responded to our PPR negotiation with
3250 * no option bits set.
3251 */
3252 ppr_options = 0;
3253 } else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
3254 /*
3255 * If the next narrow speed is greater than
3256 * the next wide speed, fallback to narrow.
3257 * Otherwise fallback to the next DT/Wide setting.
3258 * The narrow async speed will always be smaller
3259 * than the wide async speed, so handle this case
3260 * specifically.
3261 */
3262 ppr_options = targ->dv_max_ppr_options;
3263 if (narrow_speed > fallback_speed
3264 || period >= AHC_ASYNC_XFER_PERIOD) {
3265 targ->dv_next_wide_period = period+1;
3266 width = MSG_EXT_WDTR_BUS_8_BIT;
3267 period = targ->dv_next_narrow_period;
3268 } else {
3269 period++;
3270 }
3271 } else if ((ahc->features & AHC_WIDE) != 0
3272 && targ->dv_max_width != 0
3273 && wide_speed >= fallback_speed
3274 && (targ->dv_next_wide_period <= AHC_ASYNC_XFER_PERIOD
3275 || period >= AHC_ASYNC_XFER_PERIOD)) {
3276
3277 /*
3278 * We are narrow. Try falling back
3279 * to the next wide speed with
3280 * all supported ppr options set.
3281 */
3282 targ->dv_next_narrow_period = period+1;
3283 width = MSG_EXT_WDTR_BUS_16_BIT;
3284 period = targ->dv_next_wide_period;
3285 ppr_options = targ->dv_max_ppr_options;
3286 } else {
3287 /* Only narrow fallback is allowed. */
3288 period++;
3289 ppr_options = targ->dv_max_ppr_options;
3290 }
3291 } else {
3292 ahc_unlock(ahc, &s);
3293 return (-1);
3294 }
3295 offset = MAX_OFFSET;
3296 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
3297 AHC_SYNCRATE_DT);
3298 ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, FALSE);
3299 if (period == 0) {
3300 period = 0;
3301 offset = 0;
3302 ppr_options = 0;
3303 if (width == MSG_EXT_WDTR_BUS_8_BIT)
3304 targ->dv_next_narrow_period = AHC_ASYNC_XFER_PERIOD;
3305 else
3306 targ->dv_next_wide_period = AHC_ASYNC_XFER_PERIOD;
3307 }
3308 ahc_set_syncrate(ahc, devinfo, syncrate, period, offset,
3309 ppr_options, AHC_TRANS_GOAL, FALSE);
3310 targ->dv_last_ppr_options = ppr_options;
3311 ahc_unlock(ahc, &s);
3312 return (0);
3313}
3314
3315static void
3316ahc_linux_dv_timeout(struct scsi_cmnd *cmd)
3317{
3318 struct ahc_softc *ahc;
3319 struct scb *scb;
3320 u_long flags;
3321
3322 ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
3323 ahc_lock(ahc, &flags);
3324
3325#ifdef AHC_DEBUG
3326 if (ahc_debug & AHC_SHOW_DV) {
3327 printf("%s: Timeout while doing DV command %x.\n",
3328 ahc_name(ahc), cmd->cmnd[0]);
3329 ahc_dump_card_state(ahc);
3330 }
3331#endif
3332
3333 /*
3334 * Guard against "done race". No action is
3335 * required if we just completed.
3336 */
3337 if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
3338 ahc_unlock(ahc, &flags);
3339 return;
3340 }
3341
3342 /*
3343 * Command has not completed. Mark this
3344 * SCB as having failing status prior to
3345 * resetting the bus, so we get the correct
3346 * error code.
3347 */
3348 if ((scb->flags & SCB_SENSE) != 0)
3349 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
3350 else
3351 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
3352 ahc_reset_channel(ahc, cmd->device->channel + 'A', /*initiate*/TRUE);
3353
3354 /*
3355 * Add a minimal bus settle delay for devices that are slow to
3356 * respond after bus resets.
3357 */
3358 ahc_linux_freeze_simq(ahc);
3359 init_timer(&ahc->platform_data->reset_timer);
3360 ahc->platform_data->reset_timer.data = (u_long)ahc;
3361 ahc->platform_data->reset_timer.expires = jiffies + HZ / 2;
3362 ahc->platform_data->reset_timer.function =
3363 (ahc_linux_callback_t *)ahc_linux_release_simq;
3364 add_timer(&ahc->platform_data->reset_timer);
3365 if (ahc_linux_next_device_to_run(ahc) != NULL)
3366 ahc_schedule_runq(ahc);
3367 ahc_linux_run_complete_queue(ahc);
3368 ahc_unlock(ahc, &flags);
3369}
3370
3371static void
3372ahc_linux_dv_complete(struct scsi_cmnd *cmd)
3373{
3374 struct ahc_softc *ahc;
3375
3376 ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
3377
3378 /* Delete the DV timer before it goes off! */
3379 scsi_delete_timer(cmd);
3380
3381#ifdef AHC_DEBUG
3382 if (ahc_debug & AHC_SHOW_DV)
3383 printf("%s:%d:%d: Command completed, status= 0x%x\n",
3384 ahc_name(ahc), cmd->device->channel,
3385 cmd->device->id, cmd->result);
3386#endif
3387
3388 /* Wake up the state machine */
3389 up(&ahc->platform_data->dv_cmd_sem);
3390}
3391
3392static void
3393ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ)
3394{
3395 uint16_t b;
3396 u_int i;
3397 u_int j;
3398
3399 if (targ->dv_buffer != NULL)
3400 free(targ->dv_buffer, M_DEVBUF);
3401 targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3402 if (targ->dv_buffer1 != NULL)
3403 free(targ->dv_buffer1, M_DEVBUF);
3404 targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3405
3406 i = 0;
3407 b = 0x0001;
3408 for (j = 0 ; i < targ->dv_echo_size; j++) {
3409 if (j < 32) {
3410 /*
3411 * 32bytes of sequential numbers.
3412 */
3413 targ->dv_buffer[i++] = j & 0xff;
3414 } else if (j < 48) {
3415 /*
3416 * 32bytes of repeating 0x0000, 0xffff.
3417 */
3418 targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
3419 } else if (j < 64) {
3420 /*
3421 * 32bytes of repeating 0x5555, 0xaaaa.
3422 */
3423 targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
3424 } else {
3425 /*
3426 * Remaining buffer is filled with a repeating
3427 * patter of:
3428 *
3429 * 0xffff
3430 * ~0x0001 << shifted once in each loop.
3431 */
3432 if (j & 0x02) {
3433 if (j & 0x01) {
3434 targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
3435 b <<= 1;
3436 if (b == 0x0000)
3437 b = 0x0001;
3438 } else {
3439 targ->dv_buffer[i++] = (~b & 0xff);
3440 }
3441 } else {
3442 targ->dv_buffer[i++] = 0xff;
3443 }
3444 }
3445 }
3446}
3447
3448static u_int 1988static u_int
3449ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1989ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3450{ 1990{
@@ -3476,48 +2016,6 @@ ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3476 return (tags); 2016 return (tags);
3477} 2017}
3478 2018
3479static u_int
3480ahc_linux_user_dv_setting(struct ahc_softc *ahc)
3481{
3482 static int warned_user;
3483 int dv;
3484
3485 if (ahc->unit >= NUM_ELEMENTS(aic7xxx_dv_settings)) {
3486 if (warned_user == 0) {
3487
3488 printf(KERN_WARNING
3489"aic7xxx: WARNING: Insufficient dv settings instances\n"
3490"aic7xxx: for installed controllers. Using defaults\n"
3491"aic7xxx: Please update the aic7xxx_dv_settings array\n"
3492"aic7xxx: in the aic7xxx_osm.c source file.\n");
3493 warned_user++;
3494 }
3495 dv = -1;
3496 } else {
3497
3498 dv = aic7xxx_dv_settings[ahc->unit];
3499 }
3500
3501 if (dv < 0) {
3502 u_long s;
3503
3504 /*
3505 * Apply the default.
3506 */
3507 /*
3508 * XXX - Enable DV on non-U160 controllers once it
3509 * has been tested there.
3510 */
3511 ahc_lock(ahc, &s);
3512 dv = (ahc->features & AHC_DT);
3513 if (ahc->seep_config != 0
3514 && ahc->seep_config->signature >= CFSIGNATURE2)
3515 dv = (ahc->seep_config->adapter_control & CFENABLEDV);
3516 ahc_unlock(ahc, &s);
3517 }
3518 return (dv);
3519}
3520
3521/* 2019/*
3522 * Determines the queue depth for a given device. 2020 * Determines the queue depth for a given device.
3523 */ 2021 */
@@ -3568,8 +2066,7 @@ ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
3568 * Schedule us to run later. The only reason we are not 2066 * Schedule us to run later. The only reason we are not
3569 * running is because the whole controller Q is frozen. 2067 * running is because the whole controller Q is frozen.
3570 */ 2068 */
3571 if (ahc->platform_data->qfrozen != 0 2069 if (ahc->platform_data->qfrozen != 0) {
3572 && AHC_DV_SIMQ_FROZEN(ahc) == 0) {
3573 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, 2070 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
3574 dev, links); 2071 dev, links);
3575 dev->flags |= AHC_DEV_ON_RUN_LIST; 2072 dev->flags |= AHC_DEV_ON_RUN_LIST;
@@ -3610,9 +2107,6 @@ ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
3610 if ((ahc->user_discenable & mask) != 0) 2107 if ((ahc->user_discenable & mask) != 0)
3611 hscb->control |= DISCENB; 2108 hscb->control |= DISCENB;
3612 2109
3613 if (AHC_DV_CMD(cmd) != 0)
3614 scb->flags |= SCB_SILENT;
3615
3616 if ((tstate->auto_negotiate & mask) != 0) { 2110 if ((tstate->auto_negotiate & mask) != 0) {
3617 scb->flags |= SCB_AUTO_NEGOTIATE; 2111 scb->flags |= SCB_AUTO_NEGOTIATE;
3618 scb->hscb->control |= MK_MESSAGE; 2112 scb->hscb->control |= MK_MESSAGE;
@@ -3659,7 +2153,7 @@ ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
3659 2153
3660 cur_seg = (struct scatterlist *)cmd->request_buffer; 2154 cur_seg = (struct scatterlist *)cmd->request_buffer;
3661 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg, 2155 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
3662 scsi_to_pci_dma_dir(cmd->sc_data_direction)); 2156 cmd->sc_data_direction);
3663 end_seg = cur_seg + nseg; 2157 end_seg = cur_seg + nseg;
3664 /* Copy the segments into the SG list. */ 2158 /* Copy the segments into the SG list. */
3665 sg = scb->sg_list; 2159 sg = scb->sg_list;
@@ -3703,7 +2197,7 @@ ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
3703 addr = pci_map_single(ahc->dev_softc, 2197 addr = pci_map_single(ahc->dev_softc,
3704 cmd->request_buffer, 2198 cmd->request_buffer,
3705 cmd->request_bufflen, 2199 cmd->request_bufflen,
3706 scsi_to_pci_dma_dir(cmd->sc_data_direction)); 2200 cmd->sc_data_direction);
3707 scb->platform_data->buf_busaddr = addr; 2201 scb->platform_data->buf_busaddr = addr;
3708 scb->sg_count = ahc_linux_map_seg(ahc, scb, 2202 scb->sg_count = ahc_linux_map_seg(ahc, scb,
3709 sg, addr, 2203 sg, addr,
@@ -3805,7 +2299,6 @@ ahc_linux_alloc_target(struct ahc_softc *ahc, u_int channel, u_int target)
3805 targ->channel = channel; 2299 targ->channel = channel;
3806 targ->target = target; 2300 targ->target = target;
3807 targ->ahc = ahc; 2301 targ->ahc = ahc;
3808 targ->flags = AHC_DV_REQUIRED;
3809 ahc->platform_data->targets[target_offset] = targ; 2302 ahc->platform_data->targets[target_offset] = targ;
3810 return (targ); 2303 return (targ);
3811} 2304}
@@ -3844,10 +2337,6 @@ ahc_linux_free_target(struct ahc_softc *ahc, struct ahc_linux_target *targ)
3844 ahc->platform_data->targets[target_offset] = NULL; 2337 ahc->platform_data->targets[target_offset] = NULL;
3845 if (targ->inq_data != NULL) 2338 if (targ->inq_data != NULL)
3846 free(targ->inq_data, M_DEVBUF); 2339 free(targ->inq_data, M_DEVBUF);
3847 if (targ->dv_buffer != NULL)
3848 free(targ->dv_buffer, M_DEVBUF);
3849 if (targ->dv_buffer1 != NULL)
3850 free(targ->dv_buffer1, M_DEVBUF);
3851 free(targ, M_DEVBUF); 2340 free(targ, M_DEVBUF);
3852} 2341}
3853 2342
@@ -3894,8 +2383,7 @@ __ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
3894 targ->devices[dev->lun] = NULL; 2383 targ->devices[dev->lun] = NULL;
3895 free(dev, M_DEVBUF); 2384 free(dev, M_DEVBUF);
3896 targ->refcount--; 2385 targ->refcount--;
3897 if (targ->refcount == 0 2386 if (targ->refcount == 0)
3898 && (targ->flags & AHC_DV_REQUIRED) == 0)
3899 ahc_linux_free_target(ahc, targ); 2387 ahc_linux_free_target(ahc, targ);
3900} 2388}
3901 2389
@@ -4099,16 +2587,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
4099 ahc_linux_handle_scsi_status(ahc, dev, scb); 2587 ahc_linux_handle_scsi_status(ahc, dev, scb);
4100 } else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) { 2588 } else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
4101 dev->flags |= AHC_DEV_UNCONFIGURED; 2589 dev->flags |= AHC_DEV_UNCONFIGURED;
4102 if (AHC_DV_CMD(cmd) == FALSE)
4103 dev->target->flags &= ~AHC_DV_REQUIRED;
4104 } 2590 }
4105 /*
4106 * Start DV for devices that require it assuming the first command
4107 * sent does not result in a selection timeout.
4108 */
4109 if (ahc_get_transaction_status(scb) != CAM_SEL_TIMEOUT
4110 && (dev->target->flags & AHC_DV_REQUIRED) != 0)
4111 ahc_linux_start_dv(ahc);
4112 2591
4113 if (dev->openings == 1 2592 if (dev->openings == 1
4114 && ahc_get_transaction_status(scb) == CAM_REQ_CMP 2593 && ahc_get_transaction_status(scb) == CAM_REQ_CMP
@@ -4152,13 +2631,6 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
4152 2631
4153 ahc_free_scb(ahc, scb); 2632 ahc_free_scb(ahc, scb);
4154 ahc_linux_queue_cmd_complete(ahc, cmd); 2633 ahc_linux_queue_cmd_complete(ahc, cmd);
4155
4156 if ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_EMPTY) != 0
4157 && LIST_FIRST(&ahc->pending_scbs) == NULL) {
4158 ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_EMPTY;
4159 up(&ahc->platform_data->dv_sem);
4160 }
4161
4162} 2634}
4163 2635
4164static void 2636static void
@@ -4335,7 +2807,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
4335 * full error information available when making 2807 * full error information available when making
4336 * state change decisions. 2808 * state change decisions.
4337 */ 2809 */
4338 if (AHC_DV_CMD(cmd) == FALSE) { 2810 {
4339 u_int new_status; 2811 u_int new_status;
4340 2812
4341 switch (ahc_cmd_get_transaction_status(cmd)) { 2813 switch (ahc_cmd_get_transaction_status(cmd)) {
@@ -4426,115 +2898,6 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
4426} 2898}
4427 2899
4428static void 2900static void
4429ahc_linux_filter_inquiry(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4430{
4431 struct scsi_inquiry_data *sid;
4432 struct ahc_initiator_tinfo *tinfo;
4433 struct ahc_transinfo *user;
4434 struct ahc_transinfo *goal;
4435 struct ahc_transinfo *curr;
4436 struct ahc_tmode_tstate *tstate;
4437 struct ahc_syncrate *syncrate;
4438 struct ahc_linux_device *dev;
4439 u_int maxsync;
4440 u_int width;
4441 u_int period;
4442 u_int offset;
4443 u_int ppr_options;
4444 u_int trans_version;
4445 u_int prot_version;
4446
4447 /*
4448 * Determine if this lun actually exists. If so,
4449 * hold on to its corresponding device structure.
4450 * If not, make sure we release the device and
4451 * don't bother processing the rest of this inquiry
4452 * command.
4453 */
4454 dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
4455 devinfo->target, devinfo->lun,
4456 /*alloc*/TRUE);
4457
4458 sid = (struct scsi_inquiry_data *)dev->target->inq_data;
4459 if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
4460
4461 dev->flags &= ~AHC_DEV_UNCONFIGURED;
4462 } else {
4463 dev->flags |= AHC_DEV_UNCONFIGURED;
4464 return;
4465 }
4466
4467 /*
4468 * Update our notion of this device's transfer
4469 * negotiation capabilities.
4470 */
4471 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
4472 devinfo->our_scsiid,
4473 devinfo->target, &tstate);
4474 user = &tinfo->user;
4475 goal = &tinfo->goal;
4476 curr = &tinfo->curr;
4477 width = user->width;
4478 period = user->period;
4479 offset = user->offset;
4480 ppr_options = user->ppr_options;
4481 trans_version = user->transport_version;
4482 prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
4483
4484 /*
4485 * Only attempt SPI3/4 once we've verified that
4486 * the device claims to support SPI3/4 features.
4487 */
4488 if (prot_version < SCSI_REV_2)
4489 trans_version = SID_ANSI_REV(sid);
4490 else
4491 trans_version = SCSI_REV_2;
4492
4493 if ((sid->flags & SID_WBus16) == 0)
4494 width = MSG_EXT_WDTR_BUS_8_BIT;
4495 if ((sid->flags & SID_Sync) == 0) {
4496 period = 0;
4497 offset = 0;
4498 ppr_options = 0;
4499 }
4500 if ((sid->spi3data & SID_SPI_QAS) == 0)
4501 ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
4502 if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
4503 ppr_options &= MSG_EXT_PPR_QAS_REQ;
4504 if ((sid->spi3data & SID_SPI_IUS) == 0)
4505 ppr_options &= (MSG_EXT_PPR_DT_REQ
4506 | MSG_EXT_PPR_QAS_REQ);
4507
4508 if (prot_version > SCSI_REV_2
4509 && ppr_options != 0)
4510 trans_version = user->transport_version;
4511
4512 ahc_validate_width(ahc, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
4513 if ((ahc->features & AHC_ULTRA2) != 0)
4514 maxsync = AHC_SYNCRATE_DT;
4515 else if ((ahc->features & AHC_ULTRA) != 0)
4516 maxsync = AHC_SYNCRATE_ULTRA;
4517 else
4518 maxsync = AHC_SYNCRATE_FAST;
4519
4520 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, maxsync);
4521 ahc_validate_offset(ahc, /*tinfo limit*/NULL, syncrate,
4522 &offset, width, ROLE_UNKNOWN);
4523 if (offset == 0 || period == 0) {
4524 period = 0;
4525 offset = 0;
4526 ppr_options = 0;
4527 }
4528 /* Apply our filtered user settings. */
4529 curr->transport_version = trans_version;
4530 curr->protocol_version = prot_version;
4531 ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, /*paused*/FALSE);
4532 ahc_set_syncrate(ahc, devinfo, syncrate, period,
4533 offset, ppr_options, AHC_TRANS_GOAL,
4534 /*paused*/FALSE);
4535}
4536
4537static void
4538ahc_linux_sem_timeout(u_long arg) 2901ahc_linux_sem_timeout(u_long arg)
4539{ 2902{
4540 struct ahc_softc *ahc; 2903 struct ahc_softc *ahc;
@@ -4579,11 +2942,6 @@ ahc_linux_release_simq(u_long arg)
4579 ahc->platform_data->qfrozen--; 2942 ahc->platform_data->qfrozen--;
4580 if (ahc->platform_data->qfrozen == 0) 2943 if (ahc->platform_data->qfrozen == 0)
4581 unblock_reqs = 1; 2944 unblock_reqs = 1;
4582 if (AHC_DV_SIMQ_FROZEN(ahc)
4583 && ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_RELEASE) != 0)) {
4584 ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_RELEASE;
4585 up(&ahc->platform_data->dv_sem);
4586 }
4587 ahc_schedule_runq(ahc); 2945 ahc_schedule_runq(ahc);
4588 ahc_unlock(ahc, &s); 2946 ahc_unlock(ahc, &s);
4589 /* 2947 /*
@@ -4990,13 +3348,267 @@ ahc_platform_dump_card_state(struct ahc_softc *ahc)
4990 3348
4991static void ahc_linux_exit(void); 3349static void ahc_linux_exit(void);
4992 3350
3351static void ahc_linux_get_period(struct scsi_target *starget)
3352{
3353 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3354 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3355 struct ahc_tmode_tstate *tstate;
3356 struct ahc_initiator_tinfo *tinfo
3357 = ahc_fetch_transinfo(ahc,
3358 starget->channel + 'A',
3359 shost->this_id, starget->id, &tstate);
3360 spi_period(starget) = tinfo->curr.period;
3361}
3362
3363static void ahc_linux_set_period(struct scsi_target *starget, int period)
3364{
3365 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3366 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3367 struct ahc_tmode_tstate *tstate;
3368 struct ahc_initiator_tinfo *tinfo
3369 = ahc_fetch_transinfo(ahc,
3370 starget->channel + 'A',
3371 shost->this_id, starget->id, &tstate);
3372 struct ahc_devinfo devinfo;
3373 unsigned int ppr_options = tinfo->curr.ppr_options;
3374 unsigned long flags;
3375 unsigned long offset = tinfo->curr.offset;
3376 struct ahc_syncrate *syncrate;
3377
3378 if (offset == 0)
3379 offset = MAX_OFFSET;
3380
3381 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3382 starget->channel + 'A', ROLE_INITIATOR);
3383 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
3384 ahc_lock(ahc, &flags);
3385 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
3386 ppr_options, AHC_TRANS_GOAL, FALSE);
3387 ahc_unlock(ahc, &flags);
3388}
3389
3390static void ahc_linux_get_offset(struct scsi_target *starget)
3391{
3392 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3393 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3394 struct ahc_tmode_tstate *tstate;
3395 struct ahc_initiator_tinfo *tinfo
3396 = ahc_fetch_transinfo(ahc,
3397 starget->channel + 'A',
3398 shost->this_id, starget->id, &tstate);
3399 spi_offset(starget) = tinfo->curr.offset;
3400}
3401
3402static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
3403{
3404 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3405 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3406 struct ahc_tmode_tstate *tstate;
3407 struct ahc_initiator_tinfo *tinfo
3408 = ahc_fetch_transinfo(ahc,
3409 starget->channel + 'A',
3410 shost->this_id, starget->id, &tstate);
3411 struct ahc_devinfo devinfo;
3412 unsigned int ppr_options = 0;
3413 unsigned int period = 0;
3414 unsigned long flags;
3415 struct ahc_syncrate *syncrate = NULL;
3416
3417 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3418 starget->channel + 'A', ROLE_INITIATOR);
3419 if (offset != 0) {
3420 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
3421 period = tinfo->curr.period;
3422 ppr_options = tinfo->curr.ppr_options;
3423 }
3424 ahc_lock(ahc, &flags);
3425 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
3426 ppr_options, AHC_TRANS_GOAL, FALSE);
3427 ahc_unlock(ahc, &flags);
3428}
3429
3430static void ahc_linux_get_width(struct scsi_target *starget)
3431{
3432 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3433 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3434 struct ahc_tmode_tstate *tstate;
3435 struct ahc_initiator_tinfo *tinfo
3436 = ahc_fetch_transinfo(ahc,
3437 starget->channel + 'A',
3438 shost->this_id, starget->id, &tstate);
3439 spi_width(starget) = tinfo->curr.width;
3440}
3441
3442static void ahc_linux_set_width(struct scsi_target *starget, int width)
3443{
3444 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3445 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3446 struct ahc_devinfo devinfo;
3447 unsigned long flags;
3448
3449 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3450 starget->channel + 'A', ROLE_INITIATOR);
3451 ahc_lock(ahc, &flags);
3452 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
3453 ahc_unlock(ahc, &flags);
3454}
3455
3456static void ahc_linux_get_dt(struct scsi_target *starget)
3457{
3458 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3459 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3460 struct ahc_tmode_tstate *tstate;
3461 struct ahc_initiator_tinfo *tinfo
3462 = ahc_fetch_transinfo(ahc,
3463 starget->channel + 'A',
3464 shost->this_id, starget->id, &tstate);
3465 spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ;
3466}
3467
3468static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
3469{
3470 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3471 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3472 struct ahc_tmode_tstate *tstate;
3473 struct ahc_initiator_tinfo *tinfo
3474 = ahc_fetch_transinfo(ahc,
3475 starget->channel + 'A',
3476 shost->this_id, starget->id, &tstate);
3477 struct ahc_devinfo devinfo;
3478 unsigned int ppr_options = tinfo->curr.ppr_options
3479 & ~MSG_EXT_PPR_DT_REQ;
3480 unsigned int period = tinfo->curr.period;
3481 unsigned long flags;
3482 struct ahc_syncrate *syncrate;
3483
3484 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3485 starget->channel + 'A', ROLE_INITIATOR);
3486 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
3487 dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
3488 ahc_lock(ahc, &flags);
3489 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
3490 ppr_options, AHC_TRANS_GOAL, FALSE);
3491 ahc_unlock(ahc, &flags);
3492}
3493
3494static void ahc_linux_get_qas(struct scsi_target *starget)
3495{
3496 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3497 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3498 struct ahc_tmode_tstate *tstate;
3499 struct ahc_initiator_tinfo *tinfo
3500 = ahc_fetch_transinfo(ahc,
3501 starget->channel + 'A',
3502 shost->this_id, starget->id, &tstate);
3503 spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ;
3504}
3505
3506static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
3507{
3508 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3509 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3510 struct ahc_tmode_tstate *tstate;
3511 struct ahc_initiator_tinfo *tinfo
3512 = ahc_fetch_transinfo(ahc,
3513 starget->channel + 'A',
3514 shost->this_id, starget->id, &tstate);
3515 struct ahc_devinfo devinfo;
3516 unsigned int ppr_options = tinfo->curr.ppr_options
3517 & ~MSG_EXT_PPR_QAS_REQ;
3518 unsigned int period = tinfo->curr.period;
3519 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
3520 unsigned long flags;
3521 struct ahc_syncrate *syncrate;
3522
3523 if (qas)
3524 ppr_options |= MSG_EXT_PPR_QAS_REQ;
3525
3526 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3527 starget->channel + 'A', ROLE_INITIATOR);
3528 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
3529 dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
3530 ahc_lock(ahc, &flags);
3531 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
3532 ppr_options, AHC_TRANS_GOAL, FALSE);
3533 ahc_unlock(ahc, &flags);
3534}
3535
3536static void ahc_linux_get_iu(struct scsi_target *starget)
3537{
3538 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3539 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3540 struct ahc_tmode_tstate *tstate;
3541 struct ahc_initiator_tinfo *tinfo
3542 = ahc_fetch_transinfo(ahc,
3543 starget->channel + 'A',
3544 shost->this_id, starget->id, &tstate);
3545 spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ;
3546}
3547
3548static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
3549{
3550 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3551 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3552 struct ahc_tmode_tstate *tstate;
3553 struct ahc_initiator_tinfo *tinfo
3554 = ahc_fetch_transinfo(ahc,
3555 starget->channel + 'A',
3556 shost->this_id, starget->id, &tstate);
3557 struct ahc_devinfo devinfo;
3558 unsigned int ppr_options = tinfo->curr.ppr_options
3559 & ~MSG_EXT_PPR_IU_REQ;
3560 unsigned int period = tinfo->curr.period;
3561 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
3562 unsigned long flags;
3563 struct ahc_syncrate *syncrate;
3564
3565 if (iu)
3566 ppr_options |= MSG_EXT_PPR_IU_REQ;
3567
3568 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3569 starget->channel + 'A', ROLE_INITIATOR);
3570 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
3571 dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
3572 ahc_lock(ahc, &flags);
3573 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
3574 ppr_options, AHC_TRANS_GOAL, FALSE);
3575 ahc_unlock(ahc, &flags);
3576}
3577
3578static struct spi_function_template ahc_linux_transport_functions = {
3579 .get_offset = ahc_linux_get_offset,
3580 .set_offset = ahc_linux_set_offset,
3581 .show_offset = 1,
3582 .get_period = ahc_linux_get_period,
3583 .set_period = ahc_linux_set_period,
3584 .show_period = 1,
3585 .get_width = ahc_linux_get_width,
3586 .set_width = ahc_linux_set_width,
3587 .show_width = 1,
3588 .get_dt = ahc_linux_get_dt,
3589 .set_dt = ahc_linux_set_dt,
3590 .show_dt = 1,
3591 .get_iu = ahc_linux_get_iu,
3592 .set_iu = ahc_linux_set_iu,
3593 .show_iu = 1,
3594 .get_qas = ahc_linux_get_qas,
3595 .set_qas = ahc_linux_set_qas,
3596 .show_qas = 1,
3597};
3598
3599
3600
4993static int __init 3601static int __init
4994ahc_linux_init(void) 3602ahc_linux_init(void)
4995{ 3603{
4996#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 3604#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
3605 ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions);
3606 if (!ahc_linux_transport_template)
3607 return -ENODEV;
4997 int rc = ahc_linux_detect(&aic7xxx_driver_template); 3608 int rc = ahc_linux_detect(&aic7xxx_driver_template);
4998 if (rc) 3609 if (rc)
4999 return rc; 3610 return rc;
3611 spi_release_transport(ahc_linux_transport_template);
5000 ahc_linux_exit(); 3612 ahc_linux_exit();
5001 return -ENODEV; 3613 return -ENODEV;
5002#else 3614#else
@@ -5014,19 +3626,6 @@ ahc_linux_init(void)
5014static void 3626static void
5015ahc_linux_exit(void) 3627ahc_linux_exit(void)
5016{ 3628{
5017 struct ahc_softc *ahc;
5018
5019 /*
5020 * Shutdown DV threads before going into the SCSI mid-layer.
5021 * This avoids situations where the mid-layer locks the entire
5022 * kernel so that waiting for our DV threads to exit leads
5023 * to deadlock.
5024 */
5025 TAILQ_FOREACH(ahc, &ahc_tailq, links) {
5026
5027 ahc_linux_kill_dv_thread(ahc);
5028 }
5029
5030#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) 3629#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
5031 /* 3630 /*
5032 * In 2.4 we have to unregister from the PCI core _after_ 3631 * In 2.4 we have to unregister from the PCI core _after_
@@ -5037,6 +3636,7 @@ ahc_linux_exit(void)
5037#endif 3636#endif
5038 ahc_linux_pci_exit(); 3637 ahc_linux_pci_exit();
5039 ahc_linux_eisa_exit(); 3638 ahc_linux_eisa_exit();
3639 spi_release_transport(ahc_linux_transport_template);
5040} 3640}
5041 3641
5042module_init(ahc_linux_init); 3642module_init(ahc_linux_init);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index db3bd6321dd4..c401537067b6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -424,27 +424,9 @@ struct ahc_linux_device {
424}; 424};
425 425
426typedef enum { 426typedef enum {
427 AHC_DV_REQUIRED = 0x01,
428 AHC_INQ_VALID = 0x02, 427 AHC_INQ_VALID = 0x02,
429 AHC_BASIC_DV = 0x04,
430 AHC_ENHANCED_DV = 0x08
431} ahc_linux_targ_flags; 428} ahc_linux_targ_flags;
432 429
433/* DV States */
434typedef enum {
435 AHC_DV_STATE_EXIT = 0,
436 AHC_DV_STATE_INQ_SHORT_ASYNC,
437 AHC_DV_STATE_INQ_ASYNC,
438 AHC_DV_STATE_INQ_ASYNC_VERIFY,
439 AHC_DV_STATE_TUR,
440 AHC_DV_STATE_REBD,
441 AHC_DV_STATE_INQ_VERIFY,
442 AHC_DV_STATE_WEB,
443 AHC_DV_STATE_REB,
444 AHC_DV_STATE_SU,
445 AHC_DV_STATE_BUSY
446} ahc_dv_state;
447
448struct ahc_linux_target { 430struct ahc_linux_target {
449 struct ahc_linux_device *devices[AHC_NUM_LUNS]; 431 struct ahc_linux_device *devices[AHC_NUM_LUNS];
450 int channel; 432 int channel;
@@ -454,19 +436,6 @@ struct ahc_linux_target {
454 struct ahc_softc *ahc; 436 struct ahc_softc *ahc;
455 ahc_linux_targ_flags flags; 437 ahc_linux_targ_flags flags;
456 struct scsi_inquiry_data *inq_data; 438 struct scsi_inquiry_data *inq_data;
457 /*
458 * The next "fallback" period to use for narrow/wide transfers.
459 */
460 uint8_t dv_next_narrow_period;
461 uint8_t dv_next_wide_period;
462 uint8_t dv_max_width;
463 uint8_t dv_max_ppr_options;
464 uint8_t dv_last_ppr_options;
465 u_int dv_echo_size;
466 ahc_dv_state dv_state;
467 u_int dv_state_retry;
468 char *dv_buffer;
469 char *dv_buffer1;
470}; 439};
471 440
472/********************* Definitions Required by the Core ***********************/ 441/********************* Definitions Required by the Core ***********************/
@@ -511,10 +480,6 @@ struct scb_platform_data {
511 * this driver. 480 * this driver.
512 */ 481 */
513typedef enum { 482typedef enum {
514 AHC_DV_WAIT_SIMQ_EMPTY = 0x01,
515 AHC_DV_WAIT_SIMQ_RELEASE = 0x02,
516 AHC_DV_ACTIVE = 0x04,
517 AHC_DV_SHUTDOWN = 0x08,
518 AHC_RUN_CMPLT_Q_TIMER = 0x10 483 AHC_RUN_CMPLT_Q_TIMER = 0x10
519} ahc_linux_softc_flags; 484} ahc_linux_softc_flags;
520 485
@@ -937,11 +902,6 @@ int ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
937#endif 902#endif
938 903
939/*************************** Domain Validation ********************************/ 904/*************************** Domain Validation ********************************/
940#define AHC_DV_CMD(cmd) ((cmd)->scsi_done == ahc_linux_dv_complete)
941#define AHC_DV_SIMQ_FROZEN(ahc) \
942 ((((ahc)->platform_data->flags & AHC_DV_ACTIVE) != 0) \
943 && (ahc)->platform_data->qfrozen == 1)
944
945/*********************** Transaction Access Wrappers *************************/ 905/*********************** Transaction Access Wrappers *************************/
946static __inline void ahc_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t); 906static __inline void ahc_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
947static __inline void ahc_set_transaction_status(struct scb *, uint32_t); 907static __inline void ahc_set_transaction_status(struct scb *, uint32_t);
diff --git a/drivers/scsi/aic7xxx/cam.h b/drivers/scsi/aic7xxx/cam.h
index d40ba0760c76..26f17e3fc45c 100644
--- a/drivers/scsi/aic7xxx/cam.h
+++ b/drivers/scsi/aic7xxx/cam.h
@@ -103,9 +103,9 @@ typedef enum {
103} ac_code; 103} ac_code;
104 104
105typedef enum { 105typedef enum {
106 CAM_DIR_IN = SCSI_DATA_READ, 106 CAM_DIR_IN = DMA_FROM_DEVICE,
107 CAM_DIR_OUT = SCSI_DATA_WRITE, 107 CAM_DIR_OUT = DMA_TO_DEVICE,
108 CAM_DIR_NONE = SCSI_DATA_NONE 108 CAM_DIR_NONE = DMA_NONE,
109} ccb_flags; 109} ccb_flags;
110 110
111#endif /* _AIC7XXX_CAM_H */ 111#endif /* _AIC7XXX_CAM_H */
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index a6e7bb0d53f4..9e9d0c40187e 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2700,12 +2700,12 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
2700 struct scatterlist *sg; 2700 struct scatterlist *sg;
2701 2701
2702 sg = (struct scatterlist *)cmd->request_buffer; 2702 sg = (struct scatterlist *)cmd->request_buffer;
2703 pci_unmap_sg(p->pdev, sg, cmd->use_sg, scsi_to_pci_dma_dir(cmd->sc_data_direction)); 2703 pci_unmap_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
2704 } 2704 }
2705 else if (cmd->request_bufflen) 2705 else if (cmd->request_bufflen)
2706 pci_unmap_single(p->pdev, aic7xxx_mapping(cmd), 2706 pci_unmap_single(p->pdev, aic7xxx_mapping(cmd),
2707 cmd->request_bufflen, 2707 cmd->request_bufflen,
2708 scsi_to_pci_dma_dir(cmd->sc_data_direction)); 2708 cmd->sc_data_direction);
2709 if (scb->flags & SCB_SENSE) 2709 if (scb->flags & SCB_SENSE)
2710 { 2710 {
2711 pci_unmap_single(p->pdev, 2711 pci_unmap_single(p->pdev,
@@ -10228,7 +10228,7 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
10228 10228
10229 sg = (struct scatterlist *)cmd->request_buffer; 10229 sg = (struct scatterlist *)cmd->request_buffer;
10230 scb->sg_length = 0; 10230 scb->sg_length = 0;
10231 use_sg = pci_map_sg(p->pdev, sg, cmd->use_sg, scsi_to_pci_dma_dir(cmd->sc_data_direction)); 10231 use_sg = pci_map_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
10232 /* 10232 /*
10233 * Copy the segments into the SG array. NOTE!!! - We used to 10233 * Copy the segments into the SG array. NOTE!!! - We used to
10234 * have the first entry both in the data_pointer area and the first 10234 * have the first entry both in the data_pointer area and the first
@@ -10256,7 +10256,7 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
10256 { 10256 {
10257 unsigned int address = pci_map_single(p->pdev, cmd->request_buffer, 10257 unsigned int address = pci_map_single(p->pdev, cmd->request_buffer,
10258 cmd->request_bufflen, 10258 cmd->request_bufflen,
10259 scsi_to_pci_dma_dir(cmd->sc_data_direction)); 10259 cmd->sc_data_direction);
10260 aic7xxx_mapping(cmd) = address; 10260 aic7xxx_mapping(cmd) = address;
10261 scb->sg_list[0].address = cpu_to_le32(address); 10261 scb->sg_list[0].address = cpu_to_le32(address);
10262 scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen); 10262 scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 0a172c1e9f7e..3838f88e1fe0 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2117,7 +2117,7 @@ request_sense:
2117 SCpnt->SCp.Message = 0; 2117 SCpnt->SCp.Message = 0;
2118 SCpnt->SCp.Status = 0; 2118 SCpnt->SCp.Status = 0;
2119 SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); 2119 SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
2120 SCpnt->sc_data_direction = SCSI_DATA_READ; 2120 SCpnt->sc_data_direction = DMA_FROM_DEVICE;
2121 SCpnt->use_sg = 0; 2121 SCpnt->use_sg = 0;
2122 SCpnt->tag = 0; 2122 SCpnt->tag = 0;
2123 SCpnt->host_scribble = (void *)fas216_rq_sns_done; 2123 SCpnt->host_scribble = (void *)fas216_rq_sns_done;
diff --git a/drivers/scsi/cpqfcTSinit.c b/drivers/scsi/cpqfcTSinit.c
index 2eeb493f5a2b..5674ada6d5c2 100644
--- a/drivers/scsi/cpqfcTSinit.c
+++ b/drivers/scsi/cpqfcTSinit.c
@@ -642,12 +642,12 @@ int cpqfcTS_ioctl( struct scsi_device *ScsiDev, int Cmnd, void *arg)
642 return( -EFAULT); 642 return( -EFAULT);
643 } 643 }
644 } 644 }
645 ScsiPassThruReq->sr_data_direction = SCSI_DATA_WRITE; 645 ScsiPassThruReq->sr_data_direction = DMA_TO_DEVICE;
646 } else if (vendor_cmd->rw_flag == VENDOR_READ_OPCODE) { 646 } else if (vendor_cmd->rw_flag == VENDOR_READ_OPCODE) {
647 ScsiPassThruReq->sr_data_direction = SCSI_DATA_READ; 647 ScsiPassThruReq->sr_data_direction = DMA_FROM_DEVICE;
648 } else 648 } else
649 // maybe this means a bug in the user app 649 // maybe this means a bug in the user app
650 ScsiPassThruReq->sr_data_direction = SCSI_DATA_NONE; 650 ScsiPassThruReq->sr_data_direction = DMA_BIDIRECTIONAL;
651 651
652 ScsiPassThruReq->sr_cmd_len = 0; // set correctly by scsi_do_req() 652 ScsiPassThruReq->sr_cmd_len = 0; // set correctly by scsi_do_req()
653 ScsiPassThruReq->sr_sense_buffer[0] = 0; 653 ScsiPassThruReq->sr_sense_buffer[0] = 0;
diff --git a/drivers/scsi/cpqfcTSworker.c b/drivers/scsi/cpqfcTSworker.c
index a5fd7427e9da..d822ddcc52b2 100644
--- a/drivers/scsi/cpqfcTSworker.c
+++ b/drivers/scsi/cpqfcTSworker.c
@@ -5129,7 +5129,7 @@ cpqfc_undo_SEST_mappings(struct pci_dev *pcidev,
5129 for (i=*sgPages_head; i != NULL ;i = next) 5129 for (i=*sgPages_head; i != NULL ;i = next)
5130 { 5130 {
5131 pci_unmap_single(pcidev, i->busaddr, i->maplen, 5131 pci_unmap_single(pcidev, i->busaddr, i->maplen,
5132 scsi_to_pci_dma_dir(PCI_DMA_TODEVICE)); 5132 PCI_DMA_TODEVICE);
5133 i->busaddr = (dma_addr_t) NULL; 5133 i->busaddr = (dma_addr_t) NULL;
5134 i->maplen = 0L; 5134 i->maplen = 0L;
5135 next = i->next; 5135 next = i->next;
@@ -5195,7 +5195,7 @@ static ULONG build_SEST_sgList(
5195 contigaddr = ulBuff = pci_map_single(pcidev, 5195 contigaddr = ulBuff = pci_map_single(pcidev,
5196 Cmnd->request_buffer, 5196 Cmnd->request_buffer,
5197 Cmnd->request_bufflen, 5197 Cmnd->request_bufflen,
5198 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 5198 Cmnd->sc_data_direction);
5199 // printk("ms %p ", ulBuff); 5199 // printk("ms %p ", ulBuff);
5200 } 5200 }
5201 else { 5201 else {
@@ -5224,7 +5224,7 @@ static ULONG build_SEST_sgList(
5224 unsigned long btg; 5224 unsigned long btg;
5225 contigaddr = pci_map_single(pcidev, Cmnd->request_buffer, 5225 contigaddr = pci_map_single(pcidev, Cmnd->request_buffer,
5226 Cmnd->request_bufflen, 5226 Cmnd->request_bufflen,
5227 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 5227 Cmnd->sc_data_direction);
5228 5228
5229 // printk("contigaddr = %p, len = %d\n", 5229 // printk("contigaddr = %p, len = %d\n",
5230 // (void *) contigaddr, bytes_to_go); 5230 // (void *) contigaddr, bytes_to_go);
@@ -5247,7 +5247,7 @@ static ULONG build_SEST_sgList(
5247 5247
5248 sgl = (struct scatterlist*)Cmnd->request_buffer; 5248 sgl = (struct scatterlist*)Cmnd->request_buffer;
5249 sg_count = pci_map_sg(pcidev, sgl, Cmnd->use_sg, 5249 sg_count = pci_map_sg(pcidev, sgl, Cmnd->use_sg,
5250 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 5250 Cmnd->sc_data_direction);
5251 if( sg_count <= 3 ) { 5251 if( sg_count <= 3 ) {
5252 5252
5253 // we need to be careful here that no individual mapping 5253 // we need to be careful here that no individual mapping
@@ -5400,7 +5400,7 @@ static ULONG build_SEST_sgList(
5400 5400
5401 cpqfc_undo_SEST_mappings(pcidev, contigaddr, 5401 cpqfc_undo_SEST_mappings(pcidev, contigaddr,
5402 Cmnd->request_bufflen, 5402 Cmnd->request_bufflen,
5403 scsi_to_pci_dma_dir(Cmnd->sc_data_direction), 5403 Cmnd->sc_data_direction,
5404 sgl, Cmnd->use_sg, sgPages_head, AllocatedPages+1); 5404 sgl, Cmnd->use_sg, sgPages_head, AllocatedPages+1);
5405 5405
5406 // FIXME: testing shows that if we get here, 5406 // FIXME: testing shows that if we get here,
@@ -5946,7 +5946,7 @@ cpqfc_pci_unmap_extended_sg(struct pci_dev *pcidev,
5946 // for each extended scatter gather region needing unmapping... 5946 // for each extended scatter gather region needing unmapping...
5947 for (i=fcChip->SEST->sgPages[x_ID] ; i != NULL ; i = i->next) 5947 for (i=fcChip->SEST->sgPages[x_ID] ; i != NULL ; i = i->next)
5948 pci_unmap_single(pcidev, i->busaddr, i->maplen, 5948 pci_unmap_single(pcidev, i->busaddr, i->maplen,
5949 scsi_to_pci_dma_dir(PCI_DMA_TODEVICE)); 5949 PCI_DMA_TODEVICE);
5950} 5950}
5951 5951
5952// Called also from cpqfcTScontrol.o, so can't be static 5952// Called also from cpqfcTScontrol.o, so can't be static
@@ -5960,14 +5960,14 @@ cpqfc_pci_unmap(struct pci_dev *pcidev,
5960 if (cmd->use_sg) { // Used scatter gather list for data buffer? 5960 if (cmd->use_sg) { // Used scatter gather list for data buffer?
5961 cpqfc_pci_unmap_extended_sg(pcidev, fcChip, x_ID); 5961 cpqfc_pci_unmap_extended_sg(pcidev, fcChip, x_ID);
5962 pci_unmap_sg(pcidev, cmd->buffer, cmd->use_sg, 5962 pci_unmap_sg(pcidev, cmd->buffer, cmd->use_sg,
5963 scsi_to_pci_dma_dir(cmd->sc_data_direction)); 5963 cmd->sc_data_direction);
5964 // printk("umsg %d\n", cmd->use_sg); 5964 // printk("umsg %d\n", cmd->use_sg);
5965 } 5965 }
5966 else if (cmd->request_bufflen) { 5966 else if (cmd->request_bufflen) {
5967 // printk("ums %p ", fcChip->SEST->u[ x_ID ].IWE.GAddr1); 5967 // printk("ums %p ", fcChip->SEST->u[ x_ID ].IWE.GAddr1);
5968 pci_unmap_single(pcidev, fcChip->SEST->u[ x_ID ].IWE.GAddr1, 5968 pci_unmap_single(pcidev, fcChip->SEST->u[ x_ID ].IWE.GAddr1,
5969 cmd->request_bufflen, 5969 cmd->request_bufflen,
5970 scsi_to_pci_dma_dir(cmd->sc_data_direction)); 5970 cmd->sc_data_direction);
5971 } 5971 }
5972} 5972}
5973 5973
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index cc0cb246b1e4..a9eaab9fbd5e 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4034,7 +4034,7 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
4034} 4034}
4035 4035
4036#ifdef GDTH_STATISTICS 4036#ifdef GDTH_STATISTICS
4037void gdth_timeout(ulong data) 4037static void gdth_timeout(ulong data)
4038{ 4038{
4039 ulong32 i; 4039 ulong32 i;
4040 Scsi_Cmnd *nscp; 4040 Scsi_Cmnd *nscp;
@@ -4062,7 +4062,7 @@ void gdth_timeout(ulong data)
4062} 4062}
4063#endif 4063#endif
4064 4064
4065void __init internal_setup(char *str,int *ints) 4065static void __init internal_setup(char *str,int *ints)
4066{ 4066{
4067 int i, argc; 4067 int i, argc;
4068 char *cur_str, *argv; 4068 char *cur_str, *argv;
@@ -4153,7 +4153,7 @@ int __init option_setup(char *str)
4153 return 1; 4153 return 1;
4154} 4154}
4155 4155
4156int __init gdth_detect(Scsi_Host_Template *shtp) 4156static int __init gdth_detect(Scsi_Host_Template *shtp)
4157{ 4157{
4158 struct Scsi_Host *shp; 4158 struct Scsi_Host *shp;
4159 gdth_pci_str pcistr[MAXHA]; 4159 gdth_pci_str pcistr[MAXHA];
@@ -4604,7 +4604,7 @@ int __init gdth_detect(Scsi_Host_Template *shtp)
4604} 4604}
4605 4605
4606 4606
4607int gdth_release(struct Scsi_Host *shp) 4607static int gdth_release(struct Scsi_Host *shp)
4608{ 4608{
4609 int hanum; 4609 int hanum;
4610 gdth_ha_str *ha; 4610 gdth_ha_str *ha;
@@ -4691,7 +4691,7 @@ static const char *gdth_ctr_name(int hanum)
4691 return(""); 4691 return("");
4692} 4692}
4693 4693
4694const char *gdth_info(struct Scsi_Host *shp) 4694static const char *gdth_info(struct Scsi_Host *shp)
4695{ 4695{
4696 int hanum; 4696 int hanum;
4697 gdth_ha_str *ha; 4697 gdth_ha_str *ha;
@@ -4704,19 +4704,19 @@ const char *gdth_info(struct Scsi_Host *shp)
4704} 4704}
4705 4705
4706/* new error handling */ 4706/* new error handling */
4707int gdth_eh_abort(Scsi_Cmnd *scp) 4707static int gdth_eh_abort(Scsi_Cmnd *scp)
4708{ 4708{
4709 TRACE2(("gdth_eh_abort()\n")); 4709 TRACE2(("gdth_eh_abort()\n"));
4710 return FAILED; 4710 return FAILED;
4711} 4711}
4712 4712
4713int gdth_eh_device_reset(Scsi_Cmnd *scp) 4713static int gdth_eh_device_reset(Scsi_Cmnd *scp)
4714{ 4714{
4715 TRACE2(("gdth_eh_device_reset()\n")); 4715 TRACE2(("gdth_eh_device_reset()\n"));
4716 return FAILED; 4716 return FAILED;
4717} 4717}
4718 4718
4719int gdth_eh_bus_reset(Scsi_Cmnd *scp) 4719static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
4720{ 4720{
4721 int i, hanum; 4721 int i, hanum;
4722 gdth_ha_str *ha; 4722 gdth_ha_str *ha;
@@ -4770,7 +4770,7 @@ int gdth_eh_bus_reset(Scsi_Cmnd *scp)
4770 return SUCCESS; 4770 return SUCCESS;
4771} 4771}
4772 4772
4773int gdth_eh_host_reset(Scsi_Cmnd *scp) 4773static int gdth_eh_host_reset(Scsi_Cmnd *scp)
4774{ 4774{
4775 TRACE2(("gdth_eh_host_reset()\n")); 4775 TRACE2(("gdth_eh_host_reset()\n"));
4776 return FAILED; 4776 return FAILED;
@@ -4778,9 +4778,9 @@ int gdth_eh_host_reset(Scsi_Cmnd *scp)
4778 4778
4779 4779
4780#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 4780#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
4781int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) 4781static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
4782#else 4782#else
4783int gdth_bios_param(Disk *disk,kdev_t dev,int *ip) 4783static int gdth_bios_param(Disk *disk,kdev_t dev,int *ip)
4784#endif 4784#endif
4785{ 4785{
4786 unchar b, t; 4786 unchar b, t;
@@ -4818,7 +4818,7 @@ int gdth_bios_param(Disk *disk,kdev_t dev,int *ip)
4818} 4818}
4819 4819
4820 4820
4821int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)) 4821static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *))
4822{ 4822{
4823 int hanum; 4823 int hanum;
4824 int priority; 4824 int priority;
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index bf269f05ea8e..c0f1e3411524 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -1029,51 +1029,10 @@ typedef struct {
1029 1029
1030/* function prototyping */ 1030/* function prototyping */
1031 1031
1032int gdth_detect(Scsi_Host_Template *);
1033int gdth_release(struct Scsi_Host *);
1034int gdth_queuecommand(Scsi_Cmnd *,void (*done)(Scsi_Cmnd *));
1035const char *gdth_info(struct Scsi_Host *);
1036
1037#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 1032#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
1038int gdth_bios_param(struct scsi_device *,struct block_device *,sector_t,int *);
1039int gdth_proc_info(struct Scsi_Host *, char *,char **,off_t,int,int); 1033int gdth_proc_info(struct Scsi_Host *, char *,char **,off_t,int,int);
1040#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1041int gdth_bios_param(Disk *,kdev_t,int *);
1042int gdth_proc_info(char *,char **,off_t,int,int,int);
1043#else 1034#else
1044int gdth_bios_param(Disk *,kdev_t,int *);
1045extern struct proc_dir_entry proc_scsi_gdth;
1046int gdth_proc_info(char *,char **,off_t,int,int,int); 1035int gdth_proc_info(char *,char **,off_t,int,int,int);
1047int gdth_abort(Scsi_Cmnd *);
1048int gdth_reset(Scsi_Cmnd *,unsigned int);
1049#define GDTH { proc_dir: &proc_scsi_gdth, \
1050 proc_info: gdth_proc_info, \
1051 name: "GDT SCSI Disk Array Controller",\
1052 detect: gdth_detect, \
1053 release: gdth_release, \
1054 info: gdth_info, \
1055 command: NULL, \
1056 queuecommand: gdth_queuecommand, \
1057 eh_abort_handler: gdth_eh_abort, \
1058 eh_device_reset_handler: gdth_eh_device_reset, \
1059 eh_bus_reset_handler: gdth_eh_bus_reset, \
1060 eh_host_reset_handler: gdth_eh_host_reset, \
1061 abort: gdth_abort, \
1062 reset: gdth_reset, \
1063 bios_param: gdth_bios_param, \
1064 can_queue: GDTH_MAXCMDS, \
1065 this_id: -1, \
1066 sg_tablesize: GDTH_MAXSG, \
1067 cmd_per_lun: GDTH_MAXC_P_L, \
1068 present: 0, \
1069 unchecked_isa_dma: 1, \
1070 use_clustering: ENABLE_CLUSTERING, \
1071 use_new_eh_code: 1 /* use new error code */ }
1072#endif 1036#endif
1073 1037
1074int gdth_eh_abort(Scsi_Cmnd *scp);
1075int gdth_eh_device_reset(Scsi_Cmnd *scp);
1076int gdth_eh_bus_reset(Scsi_Cmnd *scp);
1077int gdth_eh_host_reset(Scsi_Cmnd *scp);
1078
1079#endif 1038#endif
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 47c263e5cd39..fbc2cb6667a1 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -231,9 +231,9 @@ module_param(ips, charp, 0);
231#endif 231#endif
232 232
233#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ 233#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
234 SCSI_DATA_NONE == scb->scsi_cmd->sc_data_direction) ? \ 234 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
235 PCI_DMA_BIDIRECTIONAL : \ 235 PCI_DMA_BIDIRECTIONAL : \
236 scsi_to_pci_dma_dir(scb->scsi_cmd->sc_data_direction)) 236 scb->scsi_cmd->sc_data_direction)
237 237
238#ifdef IPS_DEBUG 238#ifdef IPS_DEBUG
239#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n"); 239#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
@@ -2849,8 +2849,7 @@ ips_next(ips_ha_t * ha, int intr)
2849 2849
2850 sg = SC->request_buffer; 2850 sg = SC->request_buffer;
2851 scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg, 2851 scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
2852 scsi_to_pci_dma_dir(SC-> 2852 SC->sc_data_direction);
2853 sc_data_direction));
2854 scb->flags |= IPS_SCB_MAP_SG; 2853 scb->flags |= IPS_SCB_MAP_SG;
2855 for (i = 0; i < scb->sg_count; i++) { 2854 for (i = 0; i < scb->sg_count; i++) {
2856 if (ips_fill_scb_sg_single 2855 if (ips_fill_scb_sg_single
@@ -2865,8 +2864,7 @@ ips_next(ips_ha_t * ha, int intr)
2865 pci_map_single(ha->pcidev, 2864 pci_map_single(ha->pcidev,
2866 SC->request_buffer, 2865 SC->request_buffer,
2867 SC->request_bufflen, 2866 SC->request_bufflen,
2868 scsi_to_pci_dma_dir(SC-> 2867 SC->sc_data_direction);
2869 sc_data_direction));
2870 scb->flags |= IPS_SCB_MAP_SINGLE; 2868 scb->flags |= IPS_SCB_MAP_SINGLE;
2871 ips_fill_scb_sg_single(ha, scb->data_busaddr, 2869 ips_fill_scb_sg_single(ha, scb->data_busaddr,
2872 scb, 0, 2870 scb, 0,
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 29f250c80b98..4cbb6187cc44 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -131,6 +131,7 @@ lasi700_probe(struct parisc_device *dev)
131 if (!host) 131 if (!host)
132 goto out_kfree; 132 goto out_kfree;
133 host->this_id = 7; 133 host->this_id = 7;
134 host->base = base;
134 host->irq = dev->irq; 135 host->irq = dev->irq;
135 if(request_irq(dev->irq, NCR_700_intr, SA_SHIRQ, "lasi700", host)) { 136 if(request_irq(dev->irq, NCR_700_intr, SA_SHIRQ, "lasi700", host)) {
136 printk(KERN_ERR "lasi700: request_irq failed!\n"); 137 printk(KERN_ERR "lasi700: request_irq failed!\n");
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 4e5e54a1564b..4c96df060c3b 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -305,7 +305,7 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
305 sb[0] = 0x70; 305 sb[0] = 0x70;
306 sb[2] = MEDIUM_ERROR; 306 sb[2] = MEDIUM_ERROR;
307 sb[7] = 0x0A; 307 sb[7] = 0x0A;
308 if (cmd->sc_data_direction == SCSI_DATA_READ) { 308 if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
309 sb[12] = 0x11; /* "unrecovered read error" */ 309 sb[12] = 0x11; /* "unrecovered read error" */
310 sb[13] = 0x04; 310 sb[13] = 0x04;
311 } else { 311 } else {
@@ -671,8 +671,8 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
671 return; 671 return;
672 672
673 /* data is present; dma-map it */ 673 /* data is present; dma-map it */
674 if (cmd->sc_data_direction == SCSI_DATA_READ || 674 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
675 cmd->sc_data_direction == SCSI_DATA_WRITE) { 675 cmd->sc_data_direction == DMA_TO_DEVICE) {
676 if (unlikely(cmd->request_bufflen < 1)) { 676 if (unlikely(cmd->request_bufflen < 1)) {
677 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 677 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
678 ap->id, dev->devno); 678 ap->id, dev->devno);
@@ -1304,7 +1304,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
1304 struct scsi_cmnd *cmd = qc->scsicmd; 1304 struct scsi_cmnd *cmd = qc->scsicmd;
1305 struct ata_device *dev = qc->dev; 1305 struct ata_device *dev = qc->dev;
1306 int using_pio = (dev->flags & ATA_DFLAG_PIO); 1306 int using_pio = (dev->flags & ATA_DFLAG_PIO);
1307 int nodata = (cmd->sc_data_direction == SCSI_DATA_NONE); 1307 int nodata = (cmd->sc_data_direction == DMA_NONE);
1308 1308
1309 if (!using_pio) 1309 if (!using_pio)
1310 /* Check whether ATAPI DMA is safe */ 1310 /* Check whether ATAPI DMA is safe */
@@ -1316,7 +1316,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
1316 qc->complete_fn = atapi_qc_complete; 1316 qc->complete_fn = atapi_qc_complete;
1317 1317
1318 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1318 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1319 if (cmd->sc_data_direction == SCSI_DATA_WRITE) { 1319 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1320 qc->tf.flags |= ATA_TFLAG_WRITE; 1320 qc->tf.flags |= ATA_TFLAG_WRITE;
1321 DPRINTK("direction: write\n"); 1321 DPRINTK("direction: write\n");
1322 } 1322 }
@@ -1340,7 +1340,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
1340 1340
1341#ifdef ATAPI_ENABLE_DMADIR 1341#ifdef ATAPI_ENABLE_DMADIR
1342 /* some SATA bridges need us to indicate data xfer direction */ 1342 /* some SATA bridges need us to indicate data xfer direction */
1343 if (cmd->sc_data_direction != SCSI_DATA_WRITE) 1343 if (cmd->sc_data_direction != DMA_TO_DEVICE)
1344 qc->tf.feature |= ATAPI_DMADIR; 1344 qc->tf.feature |= ATAPI_DMADIR;
1345#endif 1345#endif
1346 } 1346 }
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
new file mode 100644
index 000000000000..2b3098591c41
--- /dev/null
+++ b/drivers/scsi/lpfc/Makefile
@@ -0,0 +1,32 @@
1#/*******************************************************************
2# * This file is part of the Emulex Linux Device Driver for *
3# * Enterprise Fibre Channel Host Bus Adapters. *
4# * Refer to the README file included with this package for *
5# * driver version and adapter support. *
6# * Copyright (C) 2004 Emulex Corporation. *
7# * www.emulex.com *
8# * *
9# * This program is free software; you can redistribute it and/or *
10# * modify it under the terms of the GNU General Public License *
11# * as published by the Free Software Foundation; either version 2 *
12# * of the License, or (at your option) any later version. *
13# * *
14# * This program is distributed in the hope that it will be useful, *
15# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17# * GNU General Public License for more details, a copy of which *
18# * can be found in the file COPYING included with this package. *
19# *******************************************************************/
20######################################################################
21
22#$Id: Makefile 1.58 2005/01/23 19:00:32EST sf_support Exp $
23
24ifneq ($(GCOV),)
25 EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage
26 EXTRA_CFLAGS += -O0
27endif
28
29obj-$(CONFIG_SCSI_LPFC) := lpfc.o
30
31lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
32 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
new file mode 100644
index 000000000000..d78247c63d04
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -0,0 +1,384 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc.h 1.167 2005/04/07 08:47:05EDT sf_support Exp $
23 */
24
25struct lpfc_sli2_slim;
26
27#define LPFC_MAX_TARGET 256 /* max targets supported */
28#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */
29#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */
30
31#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
32#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
33#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
34
35#define LPFC_CMD_PER_LUN 30 /* max outstanding cmds per lun */
36#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
37#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
38
39/* Define macros for 64 bit support */
40#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
41#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
42#define getPaddr(high, low) ((dma_addr_t)( \
43 (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
44/* Provide maximum configuration definitions. */
45#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
46#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
47#define FC_MAX_ADPTMSG 64
48
49#define MAX_HBAEVT 32
50
51/* Provide DMA memory definitions the driver uses per port instance. */
52struct lpfc_dmabuf {
53 struct list_head list;
54 void *virt; /* virtual address ptr */
55 dma_addr_t phys; /* mapped address */
56};
57
58struct lpfc_dma_pool {
59 struct lpfc_dmabuf *elements;
60 uint32_t max_count;
61 uint32_t current_count;
62};
63
64/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
65#define MEM_PRI 0x100
66
67
68/****************************************************************************/
69/* Device VPD save area */
70/****************************************************************************/
71typedef struct lpfc_vpd {
72 uint32_t status; /* vpd status value */
73 uint32_t length; /* number of bytes actually returned */
74 struct {
75 uint32_t rsvd1; /* Revision numbers */
76 uint32_t biuRev;
77 uint32_t smRev;
78 uint32_t smFwRev;
79 uint32_t endecRev;
80 uint16_t rBit;
81 uint8_t fcphHigh;
82 uint8_t fcphLow;
83 uint8_t feaLevelHigh;
84 uint8_t feaLevelLow;
85 uint32_t postKernRev;
86 uint32_t opFwRev;
87 uint8_t opFwName[16];
88 uint32_t sli1FwRev;
89 uint8_t sli1FwName[16];
90 uint32_t sli2FwRev;
91 uint8_t sli2FwName[16];
92 } rev;
93} lpfc_vpd_t;
94
95struct lpfc_scsi_buf;
96
97
98/*
99 * lpfc stat counters
100 */
101struct lpfc_stats {
102 /* Statistics for ELS commands */
103 uint32_t elsLogiCol;
104 uint32_t elsRetryExceeded;
105 uint32_t elsXmitRetry;
106 uint32_t elsDelayRetry;
107 uint32_t elsRcvDrop;
108 uint32_t elsRcvFrame;
109 uint32_t elsRcvRSCN;
110 uint32_t elsRcvRNID;
111 uint32_t elsRcvFARP;
112 uint32_t elsRcvFARPR;
113 uint32_t elsRcvFLOGI;
114 uint32_t elsRcvPLOGI;
115 uint32_t elsRcvADISC;
116 uint32_t elsRcvPDISC;
117 uint32_t elsRcvFAN;
118 uint32_t elsRcvLOGO;
119 uint32_t elsRcvPRLO;
120 uint32_t elsRcvPRLI;
121 uint32_t elsRcvRRQ;
122 uint32_t elsXmitFLOGI;
123 uint32_t elsXmitPLOGI;
124 uint32_t elsXmitPRLI;
125 uint32_t elsXmitADISC;
126 uint32_t elsXmitLOGO;
127 uint32_t elsXmitSCR;
128 uint32_t elsXmitRNID;
129 uint32_t elsXmitFARP;
130 uint32_t elsXmitFARPR;
131 uint32_t elsXmitACC;
132 uint32_t elsXmitLSRJT;
133
134 uint32_t frameRcvBcast;
135 uint32_t frameRcvMulti;
136 uint32_t strayXmitCmpl;
137 uint32_t frameXmitDelay;
138 uint32_t xriCmdCmpl;
139 uint32_t xriStatErr;
140 uint32_t LinkUp;
141 uint32_t LinkDown;
142 uint32_t LinkMultiEvent;
143 uint32_t NoRcvBuf;
144 uint32_t fcpCmd;
145 uint32_t fcpCmpl;
146 uint32_t fcpRspErr;
147 uint32_t fcpRemoteStop;
148 uint32_t fcpPortRjt;
149 uint32_t fcpPortBusy;
150 uint32_t fcpError;
151 uint32_t fcpLocalErr;
152};
153
154enum sysfs_mbox_state {
155 SMBOX_IDLE,
156 SMBOX_WRITING,
157 SMBOX_READING
158};
159
160struct lpfc_sysfs_mbox {
161 enum sysfs_mbox_state state;
162 size_t offset;
163 struct lpfcMboxq * mbox;
164};
165
166struct lpfc_hba {
167 struct list_head hba_list; /* List of hbas/ports */
168 struct lpfc_sli sli;
169 struct lpfc_sli2_slim *slim2p;
170 dma_addr_t slim2p_mapping;
171 uint16_t pci_cfg_value;
172
173 uint32_t hba_state;
174
175#define LPFC_INIT_START 1 /* Initial state after board reset */
176#define LPFC_INIT_MBX_CMDS 2 /* Initialize HBA with mbox commands */
177#define LPFC_LINK_DOWN 3 /* HBA initialized, link is down */
178#define LPFC_LINK_UP 4 /* Link is up - issue READ_LA */
179#define LPFC_LOCAL_CFG_LINK 5 /* local NPORT Id configured */
180#define LPFC_FLOGI 6 /* FLOGI sent to Fabric */
181#define LPFC_FABRIC_CFG_LINK 7 /* Fabric assigned NPORT Id
182 configured */
183#define LPFC_NS_REG 8 /* Register with NameServer */
184#define LPFC_NS_QRY 9 /* Query NameServer for NPort ID list */
185#define LPFC_BUILD_DISC_LIST 10 /* Build ADISC and PLOGI lists for
186 * device authentication / discovery */
187#define LPFC_DISC_AUTH 11 /* Processing ADISC list */
188#define LPFC_CLEAR_LA 12 /* authentication cmplt - issue
189 CLEAR_LA */
190#define LPFC_HBA_READY 32
191#define LPFC_HBA_ERROR 0xff
192
193 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
194
195 uint32_t fc_eventTag; /* event tag for link attention */
196 uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
197
198 uint32_t num_disc_nodes; /*in addition to hba_state */
199
200 struct timer_list fc_estabtmo; /* link establishment timer */
201 struct timer_list fc_disctmo; /* Discovery rescue timer */
202 struct timer_list fc_fdmitmo; /* fdmi timer */
203 /* These fields used to be binfo */
204 struct lpfc_name fc_nodename; /* fc nodename */
205 struct lpfc_name fc_portname; /* fc portname */
206 uint32_t fc_pref_DID; /* preferred D_ID */
207 uint8_t fc_pref_ALPA; /* preferred AL_PA */
208 uint32_t fc_edtov; /* E_D_TOV timer value */
209 uint32_t fc_arbtov; /* ARB_TOV timer value */
210 uint32_t fc_ratov; /* R_A_TOV timer value */
211 uint32_t fc_rttov; /* R_T_TOV timer value */
212 uint32_t fc_altov; /* AL_TOV timer value */
213 uint32_t fc_crtov; /* C_R_TOV timer value */
214 uint32_t fc_citov; /* C_I_TOV timer value */
215 uint32_t fc_myDID; /* fibre channel S_ID */
216 uint32_t fc_prevDID; /* previous fibre channel S_ID */
217
218 struct serv_parm fc_sparam; /* buffer for our service parameters */
219 struct serv_parm fc_fabparam; /* fabric service parameters buffer */
220 uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
221
222 uint8_t fc_ns_retry; /* retries for fabric nameserver */
223 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
224 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
225 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
226 uint32_t lmt;
227 uint32_t fc_flag; /* FC flags */
228#define FC_PT2PT 0x1 /* pt2pt with no fabric */
229#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
230#define FC_DISC_TMO 0x4 /* Discovery timer running */
231#define FC_PUBLIC_LOOP 0x8 /* Public loop */
232#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
233#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
234#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
235#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
236#define FC_FABRIC 0x100 /* We are fabric attached */
237#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
238#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
239#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
240#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
241#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
242#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
243#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
244
245 uint32_t fc_topology; /* link topology, from LINK INIT */
246
247 struct lpfc_stats fc_stat;
248
249 /* These are the head/tail pointers for the bind, plogi, adisc, unmap,
250 * and map lists. Their counters are immediately following.
251 */
252 struct list_head fc_plogi_list;
253 struct list_head fc_adisc_list;
254 struct list_head fc_reglogin_list;
255 struct list_head fc_prli_list;
256 struct list_head fc_nlpunmap_list;
257 struct list_head fc_nlpmap_list;
258 struct list_head fc_npr_list;
259 struct list_head fc_unused_list;
260
261 /* Keep counters for the number of entries in each list. */
262 uint16_t fc_plogi_cnt;
263 uint16_t fc_adisc_cnt;
264 uint16_t fc_reglogin_cnt;
265 uint16_t fc_prli_cnt;
266 uint16_t fc_unmap_cnt;
267 uint16_t fc_map_cnt;
268 uint16_t fc_npr_cnt;
269 uint16_t fc_unused_cnt;
270 struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
271 uint32_t nport_event_cnt; /* timestamp for nlplist entry */
272
273#define LPFC_RPI_HASH_SIZE 64
274#define LPFC_RPI_HASH_FUNC(x) ((x) & (0x3f))
275 /* ptr to active D_ID / RPIs */
276 struct lpfc_nodelist *fc_nlplookup[LPFC_RPI_HASH_SIZE];
277 uint32_t wwnn[2];
278 uint32_t RandomData[7];
279
280 uint32_t cfg_log_verbose;
281 uint32_t cfg_lun_queue_depth;
282 uint32_t cfg_nodev_tmo;
283 uint32_t cfg_hba_queue_depth;
284 uint32_t cfg_fcp_class;
285 uint32_t cfg_use_adisc;
286 uint32_t cfg_ack0;
287 uint32_t cfg_topology;
288 uint32_t cfg_scan_down;
289 uint32_t cfg_link_speed;
290 uint32_t cfg_cr_delay;
291 uint32_t cfg_cr_count;
292 uint32_t cfg_fdmi_on;
293 uint32_t cfg_fcp_bind_method;
294 uint32_t cfg_discovery_threads;
295 uint32_t cfg_max_luns;
296 uint32_t cfg_sg_seg_cnt;
297 uint32_t cfg_sg_dma_buf_size;
298
299 lpfc_vpd_t vpd; /* vital product data */
300
301 struct Scsi_Host *host;
302 struct pci_dev *pcidev;
303 struct list_head work_list;
304 uint32_t work_ha; /* Host Attention Bits for WT */
305 uint32_t work_ha_mask; /* HA Bits owned by WT */
306 uint32_t work_hs; /* HS stored in case of ERRAT */
307 uint32_t work_status[2]; /* Extra status from SLIM */
308 uint32_t work_hba_events; /* Timeout to be handled */
309#define WORKER_DISC_TMO 0x1 /* Discovery timeout */
310#define WORKER_ELS_TMO 0x2 /* ELS timeout */
311#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
312#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
313
314 wait_queue_head_t *work_wait;
315 struct task_struct *worker_thread;
316
317 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
318 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
319 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
320 PCI BAR0 */
321 void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
322 PCI BAR2 */
323
324 void __iomem *MBslimaddr; /* virtual address for mbox cmds */
325 void __iomem *HAregaddr; /* virtual address for host attn reg */
326 void __iomem *CAregaddr; /* virtual address for chip attn reg */
327 void __iomem *HSregaddr; /* virtual address for host status
328 reg */
329 void __iomem *HCregaddr; /* virtual address for host ctl reg */
330
331 int brd_no; /* FC board number */
332
333 char SerialNumber[32]; /* adapter Serial Number */
334 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
335 char ModelDesc[256]; /* Model Description */
336 char ModelName[80]; /* Model Name */
337 char ProgramType[256]; /* Program Type */
338 char Port[20]; /* Port No */
339 uint8_t vpd_flag; /* VPD data flag */
340
341#define VPD_MODEL_DESC 0x1 /* valid vpd model description */
342#define VPD_MODEL_NAME 0x2 /* valid vpd model name */
343#define VPD_PROGRAM_TYPE 0x4 /* valid vpd program type */
344#define VPD_PORT 0x8 /* valid vpd port data */
345#define VPD_MASK 0xf /* mask for any vpd data */
346
347 struct timer_list els_tmofunc;
348
349 void *link_stats;
350
351 /*
352 * stat counters
353 */
354 uint64_t fc4InputRequests;
355 uint64_t fc4OutputRequests;
356 uint64_t fc4ControlRequests;
357
358 struct lpfc_sysfs_mbox sysfs_mbox;
359
360 /* fastpath list. */
361 struct list_head lpfc_scsi_buf_list;
362 uint32_t total_scsi_bufs;
363 struct list_head lpfc_iocb_list;
364 uint32_t total_iocbq_bufs;
365
366 /* pci_mem_pools */
367 struct pci_pool *lpfc_scsi_dma_buf_pool;
368 struct pci_pool *lpfc_mbuf_pool;
369 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
370
371 mempool_t *mbox_mem_pool;
372 mempool_t *nlp_mem_pool;
373 struct list_head freebufList;
374 struct list_head ctrspbuflist;
375 struct list_head rnidrspbuflist;
376};
377
378
379struct rnidrsp {
380 void *buf;
381 uint32_t uniqueid;
382 struct list_head list;
383 uint32_t data;
384};
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
new file mode 100644
index 000000000000..1276bd77b995
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -0,0 +1,1291 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_attr.c 1.24 2005/04/13 11:58:55EDT sf_support Exp $
23 */
24
25#include <linux/ctype.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi_transport_fc.h>
33
34#include "lpfc_hw.h"
35#include "lpfc_sli.h"
36#include "lpfc_disc.h"
37#include "lpfc_scsi.h"
38#include "lpfc.h"
39#include "lpfc_logmsg.h"
40#include "lpfc_version.h"
41#include "lpfc_compat.h"
42#include "lpfc_crtn.h"
43
44
45static void
46lpfc_jedec_to_ascii(int incr, char hdw[])
47{
48 int i, j;
49 for (i = 0; i < 8; i++) {
50 j = (incr & 0xf);
51 if (j <= 9)
52 hdw[7 - i] = 0x30 + j;
53 else
54 hdw[7 - i] = 0x61 + j - 10;
55 incr = (incr >> 4);
56 }
57 hdw[8] = 0;
58 return;
59}
60
61static ssize_t
62lpfc_drvr_version_show(struct class_device *cdev, char *buf)
63{
64 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
65}
66
67static ssize_t
68management_version_show(struct class_device *cdev, char *buf)
69{
70 return snprintf(buf, PAGE_SIZE, DFC_API_VERSION "\n");
71}
72
73static ssize_t
74lpfc_info_show(struct class_device *cdev, char *buf)
75{
76 struct Scsi_Host *host = class_to_shost(cdev);
77 return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
78}
79
80static ssize_t
81lpfc_serialnum_show(struct class_device *cdev, char *buf)
82{
83 struct Scsi_Host *host = class_to_shost(cdev);
84 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
85 return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
86}
87
88static ssize_t
89lpfc_modeldesc_show(struct class_device *cdev, char *buf)
90{
91 struct Scsi_Host *host = class_to_shost(cdev);
92 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
93 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
94}
95
96static ssize_t
97lpfc_modelname_show(struct class_device *cdev, char *buf)
98{
99 struct Scsi_Host *host = class_to_shost(cdev);
100 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
101 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
102}
103
104static ssize_t
105lpfc_programtype_show(struct class_device *cdev, char *buf)
106{
107 struct Scsi_Host *host = class_to_shost(cdev);
108 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
109 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
110}
111
112static ssize_t
113lpfc_portnum_show(struct class_device *cdev, char *buf)
114{
115 struct Scsi_Host *host = class_to_shost(cdev);
116 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
117 return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
118}
119
120static ssize_t
121lpfc_fwrev_show(struct class_device *cdev, char *buf)
122{
123 struct Scsi_Host *host = class_to_shost(cdev);
124 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
125 char fwrev[32];
126 lpfc_decode_firmware_rev(phba, fwrev, 1);
127 return snprintf(buf, PAGE_SIZE, "%s\n",fwrev);
128}
129
130static ssize_t
131lpfc_hdw_show(struct class_device *cdev, char *buf)
132{
133 char hdw[9];
134 struct Scsi_Host *host = class_to_shost(cdev);
135 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
136 lpfc_vpd_t *vp = &phba->vpd;
137 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
138 return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
139}
140static ssize_t
141lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
142{
143 struct Scsi_Host *host = class_to_shost(cdev);
144 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
145 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
146}
147static ssize_t
148lpfc_state_show(struct class_device *cdev, char *buf)
149{
150 struct Scsi_Host *host = class_to_shost(cdev);
151 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
152 int len = 0;
153 switch (phba->hba_state) {
154 case LPFC_INIT_START:
155 case LPFC_INIT_MBX_CMDS:
156 case LPFC_LINK_DOWN:
157 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
158 break;
159 case LPFC_LINK_UP:
160 case LPFC_LOCAL_CFG_LINK:
161 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
162 break;
163 case LPFC_FLOGI:
164 case LPFC_FABRIC_CFG_LINK:
165 case LPFC_NS_REG:
166 case LPFC_NS_QRY:
167 case LPFC_BUILD_DISC_LIST:
168 case LPFC_DISC_AUTH:
169 case LPFC_CLEAR_LA:
170 len += snprintf(buf + len, PAGE_SIZE-len,
171 "Link Up - Discovery\n");
172 break;
173 case LPFC_HBA_READY:
174 len += snprintf(buf + len, PAGE_SIZE-len,
175 "Link Up - Ready:\n");
176 if (phba->fc_topology == TOPOLOGY_LOOP) {
177 if (phba->fc_flag & FC_PUBLIC_LOOP)
178 len += snprintf(buf + len, PAGE_SIZE-len,
179 " Public Loop\n");
180 else
181 len += snprintf(buf + len, PAGE_SIZE-len,
182 " Private Loop\n");
183 } else {
184 if (phba->fc_flag & FC_FABRIC)
185 len += snprintf(buf + len, PAGE_SIZE-len,
186 " Fabric\n");
187 else
188 len += snprintf(buf + len, PAGE_SIZE-len,
189 " Point-2-Point\n");
190 }
191 }
192 return len;
193}
194
195static ssize_t
196lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
197{
198 struct Scsi_Host *host = class_to_shost(cdev);
199 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
200 return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt +
201 phba->fc_unmap_cnt);
202}
203
204
205static ssize_t
206lpfc_issue_lip (struct class_device *cdev, const char *buf, size_t count)
207{
208 struct Scsi_Host *host = class_to_shost(cdev);
209 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
210 int val = 0;
211 LPFC_MBOXQ_t *pmboxq;
212 int mbxstatus = MBXERR_ERROR;
213
214 if ((sscanf(buf, "%d", &val) != 1) ||
215 (val != 1))
216 return -EINVAL;
217
218 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
219 (phba->hba_state != LPFC_HBA_READY))
220 return -EPERM;
221
222 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
223
224 if (!pmboxq)
225 return -ENOMEM;
226
227 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
228 lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed);
229 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
230
231 if (mbxstatus == MBX_TIMEOUT)
232 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
233 else
234 mempool_free( pmboxq, phba->mbox_mem_pool);
235
236 if (mbxstatus == MBXERR_ERROR)
237 return -EIO;
238
239 return strlen(buf);
240}
241
242static ssize_t
243lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
244{
245 struct Scsi_Host *host = class_to_shost(cdev);
246 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
247 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
248}
249
250static ssize_t
251lpfc_board_online_show(struct class_device *cdev, char *buf)
252{
253 struct Scsi_Host *host = class_to_shost(cdev);
254 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
255
256 if (!phba) return 0;
257
258 if (phba->fc_flag & FC_OFFLINE_MODE)
259 return snprintf(buf, PAGE_SIZE, "0\n");
260 else
261 return snprintf(buf, PAGE_SIZE, "1\n");
262}
263
264static ssize_t
265lpfc_board_online_store(struct class_device *cdev, const char *buf,
266 size_t count)
267{
268 struct Scsi_Host *host = class_to_shost(cdev);
269 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
270 struct completion online_compl;
271 int val=0, status=0;
272
273 if (sscanf(buf, "%d", &val) != 1)
274 return 0;
275
276 init_completion(&online_compl);
277
278 if (val)
279 lpfc_workq_post_event(phba, &status, &online_compl,
280 LPFC_EVT_ONLINE);
281 else
282 lpfc_workq_post_event(phba, &status, &online_compl,
283 LPFC_EVT_OFFLINE);
284 wait_for_completion(&online_compl);
285 if (!status)
286 return strlen(buf);
287 else
288 return 0;
289}
290
291
292#define lpfc_param_show(attr) \
293static ssize_t \
294lpfc_##attr##_show(struct class_device *cdev, char *buf) \
295{ \
296 struct Scsi_Host *host = class_to_shost(cdev);\
297 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
298 int val = 0;\
299 if (phba){\
300 val = phba->cfg_##attr;\
301 return snprintf(buf, PAGE_SIZE, "%d\n",\
302 phba->cfg_##attr);\
303 }\
304 return 0;\
305}
306
307#define lpfc_param_store(attr, minval, maxval) \
308static ssize_t \
309lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
310{ \
311 struct Scsi_Host *host = class_to_shost(cdev);\
312 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
313 int val = 0;\
314 if (!isdigit(buf[0]))\
315 return -EINVAL;\
316 if (sscanf(buf, "0x%x", &val) != 1)\
317 if (sscanf(buf, "%d", &val) != 1)\
318 return -EINVAL;\
319 if (phba){\
320 if (val >= minval && val <= maxval) {\
321 phba->cfg_##attr = val;\
322 return strlen(buf);\
323 }\
324 }\
325 return 0;\
326}
327
328#define LPFC_ATTR_R_NOINIT(name, desc) \
329extern int lpfc_##name;\
330module_param(lpfc_##name, int, 0);\
331MODULE_PARM_DESC(lpfc_##name, desc);\
332lpfc_param_show(name)\
333static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
334
335#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
336static int lpfc_##name = defval;\
337module_param(lpfc_##name, int, 0);\
338MODULE_PARM_DESC(lpfc_##name, desc);\
339lpfc_param_show(name)\
340static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
341
342#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
343static int lpfc_##name = defval;\
344module_param(lpfc_##name, int, 0);\
345MODULE_PARM_DESC(lpfc_##name, desc);\
346lpfc_param_show(name)\
347lpfc_param_store(name, minval, maxval)\
348static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
349 lpfc_##name##_show, lpfc_##name##_store)
350
351static CLASS_DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
352static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
353static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
354static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
355static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
356static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL);
357static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
358static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
359static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
360static CLASS_DEVICE_ATTR(option_rom_version, S_IRUGO,
361 lpfc_option_rom_version_show, NULL);
362static CLASS_DEVICE_ATTR(num_discovered_ports, S_IRUGO,
363 lpfc_num_discovered_ports_show, NULL);
364static CLASS_DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
365static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
366 NULL);
367static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
368 NULL);
369static CLASS_DEVICE_ATTR(issue_lip, S_IWUSR, NULL, lpfc_issue_lip);
370static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
371 lpfc_board_online_show, lpfc_board_online_store);
372
373
374/*
375# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
376# deluged with LOTS of information.
377# You can set a bit mask to record specific types of verbose messages:
378#
379# LOG_ELS 0x1 ELS events
380# LOG_DISCOVERY 0x2 Link discovery events
381# LOG_MBOX 0x4 Mailbox events
382# LOG_INIT 0x8 Initialization events
383# LOG_LINK_EVENT 0x10 Link events
384# LOG_IP 0x20 IP traffic history
385# LOG_FCP 0x40 FCP traffic history
386# LOG_NODE 0x80 Node table events
387# LOG_MISC 0x400 Miscellaneous events
388# LOG_SLI 0x800 SLI events
389# LOG_CHK_COND 0x1000 FCP Check condition flag
390# LOG_LIBDFC 0x2000 LIBDFC events
391# LOG_ALL_MSG 0xffff LOG all messages
392*/
393LPFC_ATTR_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask");
394
395/*
396# lun_queue_depth: This parameter is used to limit the number of outstanding
397# commands per FCP LUN. Value range is [1,128]. Default value is 30.
398*/
399LPFC_ATTR_R(lun_queue_depth, 30, 1, 128,
400 "Max number of FCP commands we can queue to a specific LUN");
401
402/*
403# Some disk devices have a "select ID" or "select Target" capability.
404# From a protocol standpoint "select ID" usually means select the
405# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
406# annex" which contains a table that maps a "select ID" (a number
407# between 0 and 7F) to an ALPA. By default, for compatibility with
408# older drivers, the lpfc driver scans this table from low ALPA to high
409# ALPA.
410#
411# Turning on the scan-down variable (on = 1, off = 0) will
412# cause the lpfc driver to use an inverted table, effectively
413# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
414#
415# (Note: This "select ID" functionality is a LOOP ONLY characteristic
416# and will not work across a fabric. Also this parameter will take
417# effect only in the case when ALPA map is not available.)
418*/
419LPFC_ATTR_R(scan_down, 1, 0, 1,
420 "Start scanning for devices from highest ALPA to lowest");
421
422/*
423# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
424# until the timer expires. Value range is [0,255]. Default value is 20.
425# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
426*/
427LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
428 "Seconds driver will hold I/O waiting for a device to come back");
429
430/*
431# lpfc_topology: link topology for init link
432# 0x0 = attempt loop mode then point-to-point
433# 0x02 = attempt point-to-point mode only
434# 0x04 = attempt loop mode only
435# 0x06 = attempt point-to-point mode then loop
436# Set point-to-point mode if you want to run as an N_Port.
437# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
438# Default value is 0.
439*/
440LPFC_ATTR_R(topology, 0, 0, 6, "Select Fibre Channel topology");
441
442/*
443# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
444# connection.
445# 0 = auto select (default)
446# 1 = 1 Gigabaud
447# 2 = 2 Gigabaud
448# 4 = 4 Gigabaud
449# Value range is [0,4]. Default value is 0.
450*/
451LPFC_ATTR_R(link_speed, 0, 0, 4, "Select link speed");
452
453/*
454# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
455# Value range is [2,3]. Default value is 3.
456*/
457LPFC_ATTR_R(fcp_class, 3, 2, 3,
458 "Select Fibre Channel class of service for FCP sequences");
459
460/*
461# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
462# is [0,1]. Default value is 0.
463*/
464LPFC_ATTR_RW(use_adisc, 0, 0, 1,
465 "Use ADISC on rediscovery to authenticate FCP devices");
466
467/*
468# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
469# range is [0,1]. Default value is 0.
470*/
471LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
472
473/*
474# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
475# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
476# value [0,63]. cr_count can take value [0,255]. Default value of cr_delay
477# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
478# cr_delay is set to 0.
479*/
480static int lpfc_cr_delay = 0;
481module_param(lpfc_cr_delay, int , 0);
482MODULE_PARM_DESC(lpfc_cr_delay, "A count of milliseconds after which an "
483 "interrupt response is generated");
484
485static int lpfc_cr_count = 1;
486module_param(lpfc_cr_count, int, 0);
487MODULE_PARM_DESC(lpfc_cr_count, "A count of I/O completions after which an "
488 "interrupt response is generated");
489
490/*
491# lpfc_fdmi_on: controls FDMI support.
492# 0 = no FDMI support
493# 1 = support FDMI without attribute of hostname
494# 2 = support FDMI with attribute of hostname
495# Value range [0,2]. Default value is 0.
496*/
497LPFC_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
498
499/*
500# Specifies the maximum number of ELS cmds we can have outstanding (for
501# discovery). Value range is [1,64]. Default value = 32.
502*/
503static int lpfc_discovery_threads = 32;
504module_param(lpfc_discovery_threads, int, 0);
505MODULE_PARM_DESC(lpfc_discovery_threads, "Maximum number of ELS commands "
506 "during discovery");
507
508/*
509# lpfc_max_luns: maximum number of LUNs per target driver will support
510# Value range is [1,32768]. Default value is 256.
511# NOTE: The SCSI layer will scan each target for this many luns
512*/
513LPFC_ATTR_R(max_luns, 256, 1, 32768,
514 "Maximum number of LUNs per target driver will support");
515
516struct class_device_attribute *lpfc_host_attrs[] = {
517 &class_device_attr_info,
518 &class_device_attr_serialnum,
519 &class_device_attr_modeldesc,
520 &class_device_attr_modelname,
521 &class_device_attr_programtype,
522 &class_device_attr_portnum,
523 &class_device_attr_fwrev,
524 &class_device_attr_hdw,
525 &class_device_attr_option_rom_version,
526 &class_device_attr_state,
527 &class_device_attr_num_discovered_ports,
528 &class_device_attr_lpfc_drvr_version,
529 &class_device_attr_lpfc_log_verbose,
530 &class_device_attr_lpfc_lun_queue_depth,
531 &class_device_attr_lpfc_nodev_tmo,
532 &class_device_attr_lpfc_fcp_class,
533 &class_device_attr_lpfc_use_adisc,
534 &class_device_attr_lpfc_ack0,
535 &class_device_attr_lpfc_topology,
536 &class_device_attr_lpfc_scan_down,
537 &class_device_attr_lpfc_link_speed,
538 &class_device_attr_lpfc_fdmi_on,
539 &class_device_attr_lpfc_max_luns,
540 &class_device_attr_nport_evt_cnt,
541 &class_device_attr_management_version,
542 &class_device_attr_issue_lip,
543 &class_device_attr_board_online,
544 NULL,
545};
546
547static ssize_t
548sysfs_ctlreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
549{
550 size_t buf_off;
551 struct Scsi_Host *host = class_to_shost(container_of(kobj,
552 struct class_device, kobj));
553 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
554
555 if ((off + count) > FF_REG_AREA_SIZE)
556 return -ERANGE;
557
558 if (count == 0) return 0;
559
560 if (off % 4 || count % 4 || (unsigned long)buf % 4)
561 return -EINVAL;
562
563 spin_lock_irq(phba->host->host_lock);
564
565 if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
566 spin_unlock_irq(phba->host->host_lock);
567 return -EPERM;
568 }
569
570 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
571 writel(*((uint32_t *)(buf + buf_off)),
572 phba->ctrl_regs_memmap_p + off + buf_off);
573
574 spin_unlock_irq(phba->host->host_lock);
575
576 return count;
577}
578
579static ssize_t
580sysfs_ctlreg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
581{
582 size_t buf_off;
583 uint32_t * tmp_ptr;
584 struct Scsi_Host *host = class_to_shost(container_of(kobj,
585 struct class_device, kobj));
586 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
587
588 if (off > FF_REG_AREA_SIZE)
589 return -ERANGE;
590
591 if ((off + count) > FF_REG_AREA_SIZE)
592 count = FF_REG_AREA_SIZE - off;
593
594 if (count == 0) return 0;
595
596 if (off % 4 || count % 4 || (unsigned long)buf % 4)
597 return -EINVAL;
598
599 spin_lock_irq(phba->host->host_lock);
600
601 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
602 tmp_ptr = (uint32_t *)(buf + buf_off);
603 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
604 }
605
606 spin_unlock_irq(phba->host->host_lock);
607
608 return count;
609}
610
611static struct bin_attribute sysfs_ctlreg_attr = {
612 .attr = {
613 .name = "ctlreg",
614 .mode = S_IRUSR | S_IWUSR,
615 .owner = THIS_MODULE,
616 },
617 .size = 256,
618 .read = sysfs_ctlreg_read,
619 .write = sysfs_ctlreg_write,
620};
621
622
623static void
624sysfs_mbox_idle (struct lpfc_hba * phba)
625{
626 phba->sysfs_mbox.state = SMBOX_IDLE;
627 phba->sysfs_mbox.offset = 0;
628
629 if (phba->sysfs_mbox.mbox) {
630 mempool_free(phba->sysfs_mbox.mbox,
631 phba->mbox_mem_pool);
632 phba->sysfs_mbox.mbox = NULL;
633 }
634}
635
636static ssize_t
637sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
638{
639 struct Scsi_Host * host =
640 class_to_shost(container_of(kobj, struct class_device, kobj));
641 struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata[0];
642 struct lpfcMboxq * mbox = NULL;
643
644 if ((count + off) > MAILBOX_CMD_SIZE)
645 return -ERANGE;
646
647 if (off % 4 || count % 4 || (unsigned long)buf % 4)
648 return -EINVAL;
649
650 if (count == 0)
651 return 0;
652
653 if (off == 0) {
654 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
655 if (!mbox)
656 return -ENOMEM;
657
658 }
659
660 spin_lock_irq(host->host_lock);
661
662 if (off == 0) {
663 if (phba->sysfs_mbox.mbox)
664 mempool_free(mbox, phba->mbox_mem_pool);
665 else
666 phba->sysfs_mbox.mbox = mbox;
667 phba->sysfs_mbox.state = SMBOX_WRITING;
668 } else {
669 if (phba->sysfs_mbox.state != SMBOX_WRITING ||
670 phba->sysfs_mbox.offset != off ||
671 phba->sysfs_mbox.mbox == NULL ) {
672 sysfs_mbox_idle(phba);
673 spin_unlock_irq(host->host_lock);
674 return -EINVAL;
675 }
676 }
677
678 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
679 buf, count);
680
681 phba->sysfs_mbox.offset = off + count;
682
683 spin_unlock_irq(host->host_lock);
684
685 return count;
686}
687
688static ssize_t
689sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
690{
691 struct Scsi_Host *host =
692 class_to_shost(container_of(kobj, struct class_device,
693 kobj));
694 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
695 int rc;
696
697 if (off > sizeof(MAILBOX_t))
698 return -ERANGE;
699
700 if ((count + off) > sizeof(MAILBOX_t))
701 count = sizeof(MAILBOX_t) - off;
702
703 if (off % 4 || count % 4 || (unsigned long)buf % 4)
704 return -EINVAL;
705
706 if (off && count == 0)
707 return 0;
708
709 spin_lock_irq(phba->host->host_lock);
710
711 if (off == 0 &&
712 phba->sysfs_mbox.state == SMBOX_WRITING &&
713 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
714
715 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
716 /* Offline only */
717 case MBX_WRITE_NV:
718 case MBX_INIT_LINK:
719 case MBX_DOWN_LINK:
720 case MBX_CONFIG_LINK:
721 case MBX_CONFIG_RING:
722 case MBX_RESET_RING:
723 case MBX_UNREG_LOGIN:
724 case MBX_CLEAR_LA:
725 case MBX_DUMP_CONTEXT:
726 case MBX_RUN_DIAGS:
727 case MBX_RESTART:
728 case MBX_FLASH_WR_ULA:
729 case MBX_SET_MASK:
730 case MBX_SET_SLIM:
731 case MBX_SET_DEBUG:
732 if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
733 printk(KERN_WARNING "mbox_read:Command 0x%x "
734 "is illegal in on-line state\n",
735 phba->sysfs_mbox.mbox->mb.mbxCommand);
736 sysfs_mbox_idle(phba);
737 spin_unlock_irq(phba->host->host_lock);
738 return -EPERM;
739 }
740 case MBX_LOAD_SM:
741 case MBX_READ_NV:
742 case MBX_READ_CONFIG:
743 case MBX_READ_RCONFIG:
744 case MBX_READ_STATUS:
745 case MBX_READ_XRI:
746 case MBX_READ_REV:
747 case MBX_READ_LNK_STAT:
748 case MBX_DUMP_MEMORY:
749 case MBX_DOWN_LOAD:
750 case MBX_UPDATE_CFG:
751 case MBX_LOAD_AREA:
752 case MBX_LOAD_EXP_ROM:
753 break;
754 case MBX_READ_SPARM64:
755 case MBX_READ_LA:
756 case MBX_READ_LA64:
757 case MBX_REG_LOGIN:
758 case MBX_REG_LOGIN64:
759 case MBX_CONFIG_PORT:
760 case MBX_RUN_BIU_DIAG:
761 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
762 phba->sysfs_mbox.mbox->mb.mbxCommand);
763 sysfs_mbox_idle(phba);
764 spin_unlock_irq(phba->host->host_lock);
765 return -EPERM;
766 default:
767 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
768 phba->sysfs_mbox.mbox->mb.mbxCommand);
769 sysfs_mbox_idle(phba);
770 spin_unlock_irq(phba->host->host_lock);
771 return -EPERM;
772 }
773
774 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
775 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
776
777 spin_unlock_irq(phba->host->host_lock);
778 rc = lpfc_sli_issue_mbox (phba,
779 phba->sysfs_mbox.mbox,
780 MBX_POLL);
781 spin_lock_irq(phba->host->host_lock);
782
783 } else {
784 spin_unlock_irq(phba->host->host_lock);
785 rc = lpfc_sli_issue_mbox_wait (phba,
786 phba->sysfs_mbox.mbox,
787 phba->fc_ratov * 2);
788 spin_lock_irq(phba->host->host_lock);
789 }
790
791 if (rc != MBX_SUCCESS) {
792 sysfs_mbox_idle(phba);
793 spin_unlock_irq(host->host_lock);
794 return -ENODEV;
795 }
796 phba->sysfs_mbox.state = SMBOX_READING;
797 }
798 else if (phba->sysfs_mbox.offset != off ||
799 phba->sysfs_mbox.state != SMBOX_READING) {
800 printk(KERN_WARNING "mbox_read: Bad State\n");
801 sysfs_mbox_idle(phba);
802 spin_unlock_irq(host->host_lock);
803 return -EINVAL;
804 }
805
806 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
807
808 phba->sysfs_mbox.offset = off + count;
809
810 if (phba->sysfs_mbox.offset == sizeof(MAILBOX_t))
811 sysfs_mbox_idle(phba);
812
813 spin_unlock_irq(phba->host->host_lock);
814
815 return count;
816}
817
818static struct bin_attribute sysfs_mbox_attr = {
819 .attr = {
820 .name = "mbox",
821 .mode = S_IRUSR | S_IWUSR,
822 .owner = THIS_MODULE,
823 },
824 .size = sizeof(MAILBOX_t),
825 .read = sysfs_mbox_read,
826 .write = sysfs_mbox_write,
827};
828
829int
830lpfc_alloc_sysfs_attr(struct lpfc_hba *phba)
831{
832 struct Scsi_Host *host = phba->host;
833 int error;
834
835 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
836 &sysfs_ctlreg_attr);
837 if (error)
838 goto out;
839
840 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
841 &sysfs_mbox_attr);
842 if (error)
843 goto out_remove_ctlreg_attr;
844
845 return 0;
846out_remove_ctlreg_attr:
847 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
848out:
849 return error;
850}
851
852void
853lpfc_free_sysfs_attr(struct lpfc_hba *phba)
854{
855 struct Scsi_Host *host = phba->host;
856
857 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
858 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
859}
860
861
862/*
863 * Dynamic FC Host Attributes Support
864 */
865
866static void
867lpfc_get_host_port_id(struct Scsi_Host *shost)
868{
869 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
870 /* note: fc_myDID already in cpu endianness */
871 fc_host_port_id(shost) = phba->fc_myDID;
872}
873
874static void
875lpfc_get_host_port_type(struct Scsi_Host *shost)
876{
877 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
878
879 spin_lock_irq(shost->host_lock);
880
881 if (phba->hba_state == LPFC_HBA_READY) {
882 if (phba->fc_topology == TOPOLOGY_LOOP) {
883 if (phba->fc_flag & FC_PUBLIC_LOOP)
884 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
885 else
886 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
887 } else {
888 if (phba->fc_flag & FC_FABRIC)
889 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
890 else
891 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
892 }
893 } else
894 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
895
896 spin_unlock_irq(shost->host_lock);
897}
898
899static void
900lpfc_get_host_port_state(struct Scsi_Host *shost)
901{
902 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
903
904 spin_lock_irq(shost->host_lock);
905
906 if (phba->fc_flag & FC_OFFLINE_MODE)
907 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
908 else {
909 switch (phba->hba_state) {
910 case LPFC_INIT_START:
911 case LPFC_INIT_MBX_CMDS:
912 case LPFC_LINK_DOWN:
913 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
914 break;
915 case LPFC_LINK_UP:
916 case LPFC_LOCAL_CFG_LINK:
917 case LPFC_FLOGI:
918 case LPFC_FABRIC_CFG_LINK:
919 case LPFC_NS_REG:
920 case LPFC_NS_QRY:
921 case LPFC_BUILD_DISC_LIST:
922 case LPFC_DISC_AUTH:
923 case LPFC_CLEAR_LA:
924 case LPFC_HBA_READY:
925 /* Links up, beyond this port_type reports state */
926 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
927 break;
928 case LPFC_HBA_ERROR:
929 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
930 break;
931 default:
932 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
933 break;
934 }
935 }
936
937 spin_unlock_irq(shost->host_lock);
938}
939
940static void
941lpfc_get_host_speed(struct Scsi_Host *shost)
942{
943 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
944
945 spin_lock_irq(shost->host_lock);
946
947 if (phba->hba_state == LPFC_HBA_READY) {
948 switch(phba->fc_linkspeed) {
949 case LA_1GHZ_LINK:
950 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
951 break;
952 case LA_2GHZ_LINK:
953 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
954 break;
955 case LA_4GHZ_LINK:
956 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
957 break;
958 default:
959 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
960 break;
961 }
962 }
963
964 spin_unlock_irq(shost->host_lock);
965}
966
967static void
968lpfc_get_host_fabric_name (struct Scsi_Host *shost)
969{
970 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
971 u64 nodename;
972
973 spin_lock_irq(shost->host_lock);
974
975 if ((phba->fc_flag & FC_FABRIC) ||
976 ((phba->fc_topology == TOPOLOGY_LOOP) &&
977 (phba->fc_flag & FC_PUBLIC_LOOP)))
978 memcpy(&nodename, &phba->fc_fabparam.nodeName, sizeof(u64));
979 else
980 /* fabric is local port if there is no F/FL_Port */
981 memcpy(&nodename, &phba->fc_nodename, sizeof(u64));
982
983 spin_unlock_irq(shost->host_lock);
984
985 fc_host_fabric_name(shost) = be64_to_cpu(nodename);
986}
987
988
989static struct fc_host_statistics *
990lpfc_get_stats(struct Scsi_Host *shost)
991{
992 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
993 struct lpfc_sli *psli = &phba->sli;
994 struct fc_host_statistics *hs =
995 (struct fc_host_statistics *)phba->link_stats;
996 LPFC_MBOXQ_t *pmboxq;
997 MAILBOX_t *pmb;
998 int rc=0;
999
1000 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1001 if (!pmboxq)
1002 return NULL;
1003 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1004
1005 pmb = &pmboxq->mb;
1006 pmb->mbxCommand = MBX_READ_STATUS;
1007 pmb->mbxOwner = OWN_HOST;
1008 pmboxq->context1 = NULL;
1009
1010 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1011 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))){
1012 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1013 } else
1014 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1015
1016 if (rc != MBX_SUCCESS) {
1017 if (pmboxq) {
1018 if (rc == MBX_TIMEOUT)
1019 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1020 else
1021 mempool_free( pmboxq, phba->mbox_mem_pool);
1022 }
1023 return NULL;
1024 }
1025
1026 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
1027 hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256);
1028 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
1029 hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256);
1030
1031 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1032 pmb->mbxCommand = MBX_READ_LNK_STAT;
1033 pmb->mbxOwner = OWN_HOST;
1034 pmboxq->context1 = NULL;
1035
1036 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1037 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) {
1038 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1039 } else
1040 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1041
1042 if (rc != MBX_SUCCESS) {
1043 if (pmboxq) {
1044 if (rc == MBX_TIMEOUT)
1045 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1046 else
1047 mempool_free( pmboxq, phba->mbox_mem_pool);
1048 }
1049 return NULL;
1050 }
1051
1052 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
1053 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
1054 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
1055 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
1056 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
1057 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1058 hs->error_frames = pmb->un.varRdLnk.crcCnt;
1059
1060 if (phba->fc_topology == TOPOLOGY_LOOP) {
1061 hs->lip_count = (phba->fc_eventTag >> 1);
1062 hs->nos_count = -1;
1063 } else {
1064 hs->lip_count = -1;
1065 hs->nos_count = (phba->fc_eventTag >> 1);
1066 }
1067
1068 hs->dumped_frames = -1;
1069
1070/* FIX ME */
1071 /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/
1072
1073 return hs;
1074}
1075
1076
1077/*
1078 * The LPFC driver treats linkdown handling as target loss events so there
1079 * are no sysfs handlers for link_down_tmo.
1080 */
1081static void
1082lpfc_get_starget_port_id(struct scsi_target *starget)
1083{
1084 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1085 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
1086 uint32_t did = -1;
1087 struct lpfc_nodelist *ndlp = NULL;
1088
1089 spin_lock_irq(shost->host_lock);
1090 /* Search the mapped list for this target ID */
1091 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1092 if (starget->id == ndlp->nlp_sid) {
1093 did = ndlp->nlp_DID;
1094 break;
1095 }
1096 }
1097 spin_unlock_irq(shost->host_lock);
1098
1099 fc_starget_port_id(starget) = did;
1100}
1101
1102static void
1103lpfc_get_starget_node_name(struct scsi_target *starget)
1104{
1105 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1106 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
1107 uint64_t node_name = 0;
1108 struct lpfc_nodelist *ndlp = NULL;
1109
1110 spin_lock_irq(shost->host_lock);
1111 /* Search the mapped list for this target ID */
1112 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1113 if (starget->id == ndlp->nlp_sid) {
1114 memcpy(&node_name, &ndlp->nlp_nodename,
1115 sizeof(struct lpfc_name));
1116 break;
1117 }
1118 }
1119 spin_unlock_irq(shost->host_lock);
1120
1121 fc_starget_node_name(starget) = be64_to_cpu(node_name);
1122}
1123
1124static void
1125lpfc_get_starget_port_name(struct scsi_target *starget)
1126{
1127 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1128 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
1129 uint64_t port_name = 0;
1130 struct lpfc_nodelist *ndlp = NULL;
1131
1132 spin_lock_irq(shost->host_lock);
1133 /* Search the mapped list for this target ID */
1134 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1135 if (starget->id == ndlp->nlp_sid) {
1136 memcpy(&port_name, &ndlp->nlp_portname,
1137 sizeof(struct lpfc_name));
1138 break;
1139 }
1140 }
1141 spin_unlock_irq(shost->host_lock);
1142
1143 fc_starget_port_name(starget) = be64_to_cpu(port_name);
1144}
1145
1146static void
1147lpfc_get_rport_loss_tmo(struct fc_rport *rport)
1148{
1149 /*
1150 * Return the driver's global value for device loss timeout plus
1151 * five seconds to allow the driver's nodev timer to run.
1152 */
1153 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1154}
1155
1156static void
1157lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1158{
1159 /*
1160 * The driver doesn't have a per-target timeout setting. Set
1161 * this value globally. lpfc_nodev_tmo should be greater then 0.
1162 */
1163 if (timeout)
1164 lpfc_nodev_tmo = timeout;
1165 else
1166 lpfc_nodev_tmo = 1;
1167 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1168}
1169
1170
1171#define lpfc_rport_show_function(field, format_string, sz, cast) \
1172static ssize_t \
1173lpfc_show_rport_##field (struct class_device *cdev, char *buf) \
1174{ \
1175 struct fc_rport *rport = transport_class_to_rport(cdev); \
1176 struct lpfc_rport_data *rdata = rport->hostdata; \
1177 return snprintf(buf, sz, format_string, \
1178 (rdata->target) ? cast rdata->target->field : 0); \
1179}
1180
1181#define lpfc_rport_rd_attr(field, format_string, sz) \
1182 lpfc_rport_show_function(field, format_string, sz, ) \
1183static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
1184
1185
1186struct fc_function_template lpfc_transport_functions = {
1187 /* fixed attributes the driver supports */
1188 .show_host_node_name = 1,
1189 .show_host_port_name = 1,
1190 .show_host_supported_classes = 1,
1191 .show_host_supported_fc4s = 1,
1192 .show_host_symbolic_name = 1,
1193 .show_host_supported_speeds = 1,
1194 .show_host_maxframe_size = 1,
1195
1196 /* dynamic attributes the driver supports */
1197 .get_host_port_id = lpfc_get_host_port_id,
1198 .show_host_port_id = 1,
1199
1200 .get_host_port_type = lpfc_get_host_port_type,
1201 .show_host_port_type = 1,
1202
1203 .get_host_port_state = lpfc_get_host_port_state,
1204 .show_host_port_state = 1,
1205
1206 /* active_fc4s is shown but doesn't change (thus no get function) */
1207 .show_host_active_fc4s = 1,
1208
1209 .get_host_speed = lpfc_get_host_speed,
1210 .show_host_speed = 1,
1211
1212 .get_host_fabric_name = lpfc_get_host_fabric_name,
1213 .show_host_fabric_name = 1,
1214
1215 /*
1216 * The LPFC driver treats linkdown handling as target loss events
1217 * so there are no sysfs handlers for link_down_tmo.
1218 */
1219
1220 .get_fc_host_stats = lpfc_get_stats,
1221
1222 /* the LPFC driver doesn't support resetting stats yet */
1223
1224 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
1225 .show_rport_maxframe_size = 1,
1226 .show_rport_supported_classes = 1,
1227
1228 .get_rport_dev_loss_tmo = lpfc_get_rport_loss_tmo,
1229 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
1230 .show_rport_dev_loss_tmo = 1,
1231
1232 .get_starget_port_id = lpfc_get_starget_port_id,
1233 .show_starget_port_id = 1,
1234
1235 .get_starget_node_name = lpfc_get_starget_node_name,
1236 .show_starget_node_name = 1,
1237
1238 .get_starget_port_name = lpfc_get_starget_port_name,
1239 .show_starget_port_name = 1,
1240};
1241
1242void
1243lpfc_get_cfgparam(struct lpfc_hba *phba)
1244{
1245 phba->cfg_log_verbose = lpfc_log_verbose;
1246 phba->cfg_cr_delay = lpfc_cr_delay;
1247 phba->cfg_cr_count = lpfc_cr_count;
1248 phba->cfg_lun_queue_depth = lpfc_lun_queue_depth;
1249 phba->cfg_fcp_class = lpfc_fcp_class;
1250 phba->cfg_use_adisc = lpfc_use_adisc;
1251 phba->cfg_ack0 = lpfc_ack0;
1252 phba->cfg_topology = lpfc_topology;
1253 phba->cfg_scan_down = lpfc_scan_down;
1254 phba->cfg_nodev_tmo = lpfc_nodev_tmo;
1255 phba->cfg_link_speed = lpfc_link_speed;
1256 phba->cfg_fdmi_on = lpfc_fdmi_on;
1257 phba->cfg_discovery_threads = lpfc_discovery_threads;
1258 phba->cfg_max_luns = lpfc_max_luns;
1259
1260 /*
1261 * The total number of segments is the configuration value plus 2
1262 * since the IOCB need a command and response bde.
1263 */
1264 phba->cfg_sg_seg_cnt = LPFC_SG_SEG_CNT + 2;
1265
1266 /*
1267 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
1268 * used to create the sg_dma_buf_pool must be dynamically calculated
1269 */
1270 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
1271 sizeof(struct fcp_rsp) +
1272 (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64));
1273
1274 switch (phba->pcidev->device) {
1275 case PCI_DEVICE_ID_LP101:
1276 case PCI_DEVICE_ID_BSMB:
1277 case PCI_DEVICE_ID_ZSMB:
1278 phba->cfg_hba_queue_depth = LPFC_LP101_HBA_Q_DEPTH;
1279 break;
1280 case PCI_DEVICE_ID_RFLY:
1281 case PCI_DEVICE_ID_PFLY:
1282 case PCI_DEVICE_ID_BMID:
1283 case PCI_DEVICE_ID_ZMID:
1284 case PCI_DEVICE_ID_TFLY:
1285 phba->cfg_hba_queue_depth = LPFC_LC_HBA_Q_DEPTH;
1286 break;
1287 default:
1288 phba->cfg_hba_queue_depth = LPFC_DFT_HBA_Q_DEPTH;
1289 }
1290 return;
1291}
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
new file mode 100644
index 000000000000..646649fe962a
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -0,0 +1,97 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_compat.h 1.32 2005/01/25 17:51:45EST sf_support Exp $
23 *
24 * This file provides macros to aid compilation in the Linux 2.4 kernel
25 * over various platform architectures.
26 */
27
28/*******************************************************************
29Note: HBA's SLI memory contains little-endian LW.
30Thus to access it from a little-endian host,
31memcpy_toio() and memcpy_fromio() can be used.
32However on a big-endian host, copy 4 bytes at a time,
33using writel() and readl().
34 *******************************************************************/
35
36#if __BIG_ENDIAN
37
38static inline void
39lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes)
40{
41 uint32_t __iomem *dest32;
42 uint32_t *src32;
43 unsigned int four_bytes;
44
45
46 dest32 = (uint32_t __iomem *) dest;
47 src32 = (uint32_t *) src;
48
49 /* write input bytes, 4 bytes at a time */
50 for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
51 writel( *src32, dest32);
52 readl(dest32); /* flush */
53 dest32++;
54 src32++;
55 }
56
57 return;
58}
59
60static inline void
61lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
62{
63 uint32_t *dest32;
64 uint32_t __iomem *src32;
65 unsigned int four_bytes;
66
67
68 dest32 = (uint32_t *) dest;
69 src32 = (uint32_t __iomem *) src;
70
71 /* read input bytes, 4 bytes at a time */
72 for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
73 *dest32 = readl( src32);
74 dest32++;
75 src32++;
76 }
77
78 return;
79}
80
81#else
82
83static inline void
84lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
85{
86 /* actually returns 1 byte past dest */
87 memcpy_toio( dest, src, bytes);
88}
89
90static inline void
91lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
92{
93 /* actually returns 1 byte past dest */
94 memcpy_fromio( dest, src, bytes);
95}
96
97#endif /* __BIG_ENDIAN */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
new file mode 100644
index 000000000000..c504477a6a5d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -0,0 +1,216 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_crtn.h 1.166 2005/04/07 08:46:47EDT sf_support Exp $
23 */
24
25void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
27int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
28 struct lpfc_dmabuf *mp);
29void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
30void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
31int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
32void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
33void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
34int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
35 uint32_t);
36void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
37void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
38void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
39
40
41int lpfc_linkdown(struct lpfc_hba *);
42void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
43
44void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
45void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
46void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
47void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
48void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
49int lpfc_nlp_plogi(struct lpfc_hba *, struct lpfc_nodelist *);
50int lpfc_nlp_adisc(struct lpfc_hba *, struct lpfc_nodelist *);
51int lpfc_nlp_unmapped(struct lpfc_hba *, struct lpfc_nodelist *);
52int lpfc_nlp_list(struct lpfc_hba *, struct lpfc_nodelist *, int);
53void lpfc_set_disctmo(struct lpfc_hba *);
54int lpfc_can_disctmo(struct lpfc_hba *);
55int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
56int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
57 struct lpfc_iocbq *, struct lpfc_nodelist *);
58int lpfc_nlp_remove(struct lpfc_hba *, struct lpfc_nodelist *);
59void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
60struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
61struct lpfc_nodelist *lpfc_setup_rscn_node(struct lpfc_hba *, uint32_t);
62void lpfc_disc_list_loopmap(struct lpfc_hba *);
63void lpfc_disc_start(struct lpfc_hba *);
64void lpfc_disc_flush_list(struct lpfc_hba *);
65void lpfc_disc_timeout(unsigned long);
66void lpfc_scan_timeout(unsigned long);
67
68struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
69struct lpfc_nodelist *lpfc_findnode_remove_rpi(struct lpfc_hba * phba,
70 uint16_t rpi);
71void lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
72 uint16_t rpi);
73
74int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
75int lpfc_do_work(void *);
76int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
77 uint32_t);
78
79uint32_t lpfc_cmpl_prli_reglogin_issue(struct lpfc_hba *,
80 struct lpfc_nodelist *, void *,
81 uint32_t);
82uint32_t lpfc_cmpl_plogi_prli_issue(struct lpfc_hba *, struct lpfc_nodelist *,
83 void *, uint32_t);
84
85int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
86 struct serv_parm *, uint32_t);
87int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp,
88 int);
89int lpfc_els_abort_flogi(struct lpfc_hba *);
90int lpfc_initial_flogi(struct lpfc_hba *);
91int lpfc_issue_els_plogi(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
92int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
93int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
94int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
95int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t);
96int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
97int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
98 struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
99int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
100 struct lpfc_nodelist *);
101int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *,
102 struct lpfc_nodelist *);
103int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *,
104 struct lpfc_nodelist *);
105void lpfc_els_retry_delay(unsigned long);
106void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
107void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
108 struct lpfc_iocbq *);
109int lpfc_els_handle_rscn(struct lpfc_hba *);
110int lpfc_els_flush_rscn(struct lpfc_hba *);
111int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t);
112void lpfc_els_flush_cmd(struct lpfc_hba *);
113int lpfc_els_disc_adisc(struct lpfc_hba *);
114int lpfc_els_disc_plogi(struct lpfc_hba *);
115void lpfc_els_timeout(unsigned long);
116void lpfc_els_timeout_handler(struct lpfc_hba *);
117
118void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
119 struct lpfc_iocbq *);
120int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
121int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
122void lpfc_fdmi_tmo(unsigned long);
123void lpfc_fdmi_tmo_handler(struct lpfc_hba *);
124
125int lpfc_config_port_prep(struct lpfc_hba *);
126int lpfc_config_port_post(struct lpfc_hba *);
127int lpfc_hba_down_prep(struct lpfc_hba *);
128void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
129int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
130void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
131uint8_t *lpfc_get_lpfchba_info(struct lpfc_hba *, uint8_t *);
132int lpfc_fcp_abort(struct lpfc_hba *, int, int, int);
133int lpfc_online(struct lpfc_hba *);
134int lpfc_offline(struct lpfc_hba *);
135
136
137int lpfc_sli_setup(struct lpfc_hba *);
138int lpfc_sli_queue_setup(struct lpfc_hba *);
139void lpfc_slim_access(struct lpfc_hba *);
140
141void lpfc_handle_eratt(struct lpfc_hba *);
142void lpfc_handle_latt(struct lpfc_hba *);
143irqreturn_t lpfc_intr_handler(int, void *, struct pt_regs *);
144
145void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
146void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
147void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
148void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
149LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
150
151int lpfc_mem_alloc(struct lpfc_hba *);
152void lpfc_mem_free(struct lpfc_hba *);
153
154int lpfc_sli_hba_setup(struct lpfc_hba *);
155int lpfc_sli_hba_down(struct lpfc_hba *);
156int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
157int lpfc_sli_handle_mb_event(struct lpfc_hba *);
158int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
159 struct lpfc_sli_ring *, uint32_t);
160void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
161int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
162 struct lpfc_iocbq *, uint32_t);
163void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
164int lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
165int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
166 struct lpfc_dmabuf *);
167struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
168 struct lpfc_sli_ring *,
169 dma_addr_t);
170int lpfc_sli_issue_abort_iotag32(struct lpfc_hba *, struct lpfc_sli_ring *,
171 struct lpfc_iocbq *);
172int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
173 uint64_t, lpfc_ctx_cmd);
174int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
175 uint64_t, uint32_t, lpfc_ctx_cmd);
176
177void lpfc_mbox_timeout(unsigned long);
178void lpfc_mbox_timeout_handler(struct lpfc_hba *);
179void lpfc_map_fcp_cmnd_to_bpl(struct lpfc_hba *, struct lpfc_scsi_buf *);
180void lpfc_free_scsi_cmd(struct lpfc_scsi_buf *);
181uint32_t lpfc_os_timeout_transform(struct lpfc_hba *, uint32_t);
182
183struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order,
184 uint32_t did);
185
186int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
187 uint32_t timeout);
188
189int lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
190 struct lpfc_sli_ring * pring,
191 struct lpfc_iocbq * piocb,
192 uint32_t flag,
193 struct lpfc_iocbq * prspiocbq,
194 uint32_t timeout);
195void lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
196 struct lpfc_iocbq * queue1,
197 struct lpfc_iocbq * queue2);
198
199void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
200void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
201
202/* Function prototypes. */
203const char* lpfc_info(struct Scsi_Host *);
204void lpfc_get_cfgparam(struct lpfc_hba *);
205int lpfc_alloc_sysfs_attr(struct lpfc_hba *);
206void lpfc_free_sysfs_attr(struct lpfc_hba *);
207extern struct class_device_attribute *lpfc_host_attrs[];
208extern struct scsi_host_template lpfc_template;
209extern struct fc_function_template lpfc_transport_functions;
210
211void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
212
213#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
214#define HBA_EVENT_RSCN 5
215#define HBA_EVENT_LINK_UP 2
216#define HBA_EVENT_LINK_DOWN 3
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
new file mode 100644
index 000000000000..c40cb239c16d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -0,0 +1,1237 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_ct.c 1.161 2005/04/13 11:59:01EDT sf_support Exp $
23 *
24 * Fibre Channel SCSI LAN Device Driver CT support
25 */
26
27#include <linux/blkdev.h>
28#include <linux/pci.h>
29#include <linux/interrupt.h>
30#include <linux/utsname.h>
31
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h>
34
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_version.h"
43
44#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
45 * incapable of reporting */
46#define HBA_PORTSPEED_1GBIT 1 /* 1 GBit/sec */
47#define HBA_PORTSPEED_2GBIT 2 /* 2 GBit/sec */
48#define HBA_PORTSPEED_4GBIT 8 /* 4 GBit/sec */
49#define HBA_PORTSPEED_8GBIT 16 /* 8 GBit/sec */
50#define HBA_PORTSPEED_10GBIT 4 /* 10 GBit/sec */
51#define HBA_PORTSPEED_NOT_NEGOTIATED 5 /* Speed not established */
52
53#define FOURBYTES 4
54
55
56static char *lpfc_release_version = LPFC_DRIVER_VERSION;
57
58/*
59 * lpfc_ct_unsol_event
60 */
61void
62lpfc_ct_unsol_event(struct lpfc_hba * phba,
63 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq)
64{
65
66 struct lpfc_iocbq *next_piocbq;
67 struct lpfc_dmabuf *pmbuf = NULL;
68 struct lpfc_dmabuf *matp, *next_matp;
69 uint32_t ctx = 0, size = 0, cnt = 0;
70 IOCB_t *icmd = &piocbq->iocb;
71 IOCB_t *save_icmd = icmd;
72 int i, go_exit = 0;
73 struct list_head head;
74
75 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
76 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
77 /* Not enough posted buffers; Try posting more buffers */
78 phba->fc_stat.NoRcvBuf++;
79 lpfc_post_buffer(phba, pring, 0, 1);
80 return;
81 }
82
83 /* If there are no BDEs associated with this IOCB,
84 * there is nothing to do.
85 */
86 if (icmd->ulpBdeCount == 0)
87 return;
88
89 INIT_LIST_HEAD(&head);
90 list_add_tail(&head, &piocbq->list);
91
92 list_for_each_entry_safe(piocbq, next_piocbq, &head, list) {
93 icmd = &piocbq->iocb;
94 if (ctx == 0)
95 ctx = (uint32_t) (icmd->ulpContext);
96 if (icmd->ulpBdeCount == 0)
97 continue;
98
99 for (i = 0; i < icmd->ulpBdeCount; i++) {
100 matp = lpfc_sli_ringpostbuf_get(phba, pring,
101 getPaddr(icmd->un.
102 cont64[i].
103 addrHigh,
104 icmd->un.
105 cont64[i].
106 addrLow));
107 if (!matp) {
108 /* Insert lpfc log message here */
109 lpfc_post_buffer(phba, pring, cnt, 1);
110 go_exit = 1;
111 goto ct_unsol_event_exit_piocbq;
112 }
113
114 /* Typically for Unsolicited CT requests */
115 if (!pmbuf) {
116 pmbuf = matp;
117 INIT_LIST_HEAD(&pmbuf->list);
118 } else
119 list_add_tail(&matp->list, &pmbuf->list);
120
121 size += icmd->un.cont64[i].tus.f.bdeSize;
122 cnt++;
123 }
124
125 icmd->ulpBdeCount = 0;
126 }
127
128 lpfc_post_buffer(phba, pring, cnt, 1);
129 if (save_icmd->ulpStatus) {
130 go_exit = 1;
131 }
132
133ct_unsol_event_exit_piocbq:
134 if (pmbuf) {
135 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
136 lpfc_mbuf_free(phba, matp->virt, matp->phys);
137 list_del(&matp->list);
138 kfree(matp);
139 }
140 lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
141 kfree(pmbuf);
142 }
143 return;
144}
145
146static void
147lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
148{
149 struct lpfc_dmabuf *mlast, *next_mlast;
150
151 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
152 lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
153 list_del(&mlast->list);
154 kfree(mlast);
155 }
156 lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
157 kfree(mlist);
158 return;
159}
160
161static struct lpfc_dmabuf *
162lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
163 uint32_t size, int *entries)
164{
165 struct lpfc_dmabuf *mlist = NULL;
166 struct lpfc_dmabuf *mp;
167 int cnt, i = 0;
168
169 /* We get chucks of FCELSSIZE */
170 cnt = size > FCELSSIZE ? FCELSSIZE: size;
171
172 while (size) {
173 /* Allocate buffer for rsp payload */
174 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
175 if (!mp) {
176 if (mlist)
177 lpfc_free_ct_rsp(phba, mlist);
178 return NULL;
179 }
180
181 INIT_LIST_HEAD(&mp->list);
182
183 if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT))
184 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
185 else
186 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
187
188 if (!mp->virt) {
189 kfree(mp);
190 lpfc_free_ct_rsp(phba, mlist);
191 return NULL;
192 }
193
194 /* Queue it to a linked list */
195 if (!mlist)
196 mlist = mp;
197 else
198 list_add_tail(&mp->list, &mlist->list);
199
200 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
201 /* build buffer ptr list for IOCB */
202 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
203 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
204 bpl->tus.f.bdeSize = (uint16_t) cnt;
205 bpl->tus.w = le32_to_cpu(bpl->tus.w);
206 bpl++;
207
208 i++;
209 size -= cnt;
210 }
211
212 *entries = i;
213 return mlist;
214}
215
216static int
217lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
218 struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
219 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
220 struct lpfc_iocbq *),
221 struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
222 uint32_t tmo)
223{
224
225 struct lpfc_sli *psli = &phba->sli;
226 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
227 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
228 IOCB_t *icmd;
229 struct lpfc_iocbq *geniocb = NULL;
230
231 /* Allocate buffer for command iocb */
232 spin_lock_irq(phba->host->host_lock);
233 list_remove_head(lpfc_iocb_list, geniocb, struct lpfc_iocbq, list);
234 spin_unlock_irq(phba->host->host_lock);
235
236 if (geniocb == NULL)
237 return 1;
238 memset(geniocb, 0, sizeof (struct lpfc_iocbq));
239
240 icmd = &geniocb->iocb;
241 icmd->un.genreq64.bdl.ulpIoTag32 = 0;
242 icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
243 icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
244 icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
245 icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
246
247 if (usr_flg)
248 geniocb->context3 = NULL;
249 else
250 geniocb->context3 = (uint8_t *) bmp;
251
252 /* Save for completion so we can release these resources */
253 geniocb->context1 = (uint8_t *) inp;
254 geniocb->context2 = (uint8_t *) outp;
255
256 /* Fill in payload, bp points to frame payload */
257 icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
258
259 /* Fill in rest of iocb */
260 icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
261 icmd->un.genreq64.w5.hcsw.Dfctl = 0;
262 icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
263 icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
264
265 if (!tmo)
266 tmo = (2 * phba->fc_ratov) + 1;
267 icmd->ulpTimeout = tmo;
268 icmd->ulpBdeCount = 1;
269 icmd->ulpLe = 1;
270 icmd->ulpClass = CLASS3;
271 icmd->ulpContext = ndlp->nlp_rpi;
272
273 /* Issue GEN REQ IOCB for NPORT <did> */
274 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
275 "%d:0119 Issue GEN REQ IOCB for NPORT x%x "
276 "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5],
277 icmd->ulpIoTag, phba->hba_state);
278 geniocb->iocb_cmpl = cmpl;
279 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
280 spin_lock_irq(phba->host->host_lock);
281 if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
282 list_add_tail(&geniocb->list, lpfc_iocb_list);
283 spin_unlock_irq(phba->host->host_lock);
284 return 1;
285 }
286 spin_unlock_irq(phba->host->host_lock);
287
288 return 0;
289}
290
291static int
292lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
293 struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
294 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
295 struct lpfc_iocbq *),
296 uint32_t rsp_size)
297{
298 struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
299 struct lpfc_dmabuf *outmp;
300 int cnt = 0, status;
301 int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)->
302 CommandResponse.bits.CmdRsp;
303
304 bpl++; /* Skip past ct request */
305
306 /* Put buffer(s) for ct rsp in bpl */
307 outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
308 if (!outmp)
309 return -ENOMEM;
310
311 status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0,
312 cnt+1, 0);
313 if (status) {
314 lpfc_free_ct_rsp(phba, outmp);
315 return -ENOMEM;
316 }
317 return 0;
318}
319
320static int
321lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
322{
323 struct lpfc_sli_ct_request *Response =
324 (struct lpfc_sli_ct_request *) mp->virt;
325 struct lpfc_nodelist *ndlp = NULL;
326 struct lpfc_dmabuf *mlast, *next_mp;
327 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
328 uint32_t Did;
329 uint32_t CTentry;
330 int Cnt;
331 struct list_head head;
332
333 lpfc_set_disctmo(phba);
334
335 Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
336
337 list_add_tail(&head, &mp->list);
338 list_for_each_entry_safe(mp, next_mp, &head, list) {
339 mlast = mp;
340
341 Size -= Cnt;
342
343 if (!ctptr)
344 ctptr = (uint32_t *) mlast->virt;
345 else
346 Cnt -= 16; /* subtract length of CT header */
347
348 /* Loop through entire NameServer list of DIDs */
349 while (Cnt) {
350
351 /* Get next DID from NameServer List */
352 CTentry = *ctptr++;
353 Did = ((be32_to_cpu(CTentry)) & Mask_DID);
354
355 ndlp = NULL;
356 if (Did != phba->fc_myDID) {
357 /* Check for rscn processing or not */
358 ndlp = lpfc_setup_disc_node(phba, Did);
359 }
360 /* Mark all node table entries that are in the
361 Nameserver */
362 if (ndlp) {
363 /* NameServer Rsp */
364 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
365 "%d:0238 Process x%x NameServer"
366 " Rsp Data: x%x x%x x%x\n",
367 phba->brd_no,
368 Did, ndlp->nlp_flag,
369 phba->fc_flag,
370 phba->fc_rscn_id_cnt);
371 } else {
372 /* NameServer Rsp */
373 lpfc_printf_log(phba,
374 KERN_INFO,
375 LOG_DISCOVERY,
376 "%d:0239 Skip x%x NameServer "
377 "Rsp Data: x%x x%x x%x\n",
378 phba->brd_no,
379 Did, Size, phba->fc_flag,
380 phba->fc_rscn_id_cnt);
381 }
382
383 if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
384 goto nsout1;
385 Cnt -= sizeof (uint32_t);
386 }
387 ctptr = NULL;
388
389 }
390
391nsout1:
392 list_del(&head);
393
394 /* Here we are finished in the case RSCN */
395 if (phba->hba_state == LPFC_HBA_READY) {
396 lpfc_els_flush_rscn(phba);
397 spin_lock_irq(phba->host->host_lock);
398 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
399 spin_unlock_irq(phba->host->host_lock);
400 }
401 return 0;
402}
403
404
405
406
407static void
408lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
409 struct lpfc_iocbq * rspiocb)
410{
411 IOCB_t *irsp;
412 struct lpfc_sli *psli;
413 struct lpfc_dmabuf *bmp;
414 struct lpfc_dmabuf *inp;
415 struct lpfc_dmabuf *outp;
416 struct lpfc_nodelist *ndlp;
417 struct lpfc_sli_ct_request *CTrsp;
418
419 psli = &phba->sli;
420 /* we pass cmdiocb to state machine which needs rspiocb as well */
421 cmdiocb->context_un.rsp_iocb = rspiocb;
422
423 inp = (struct lpfc_dmabuf *) cmdiocb->context1;
424 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
425 bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
426
427 irsp = &rspiocb->iocb;
428 if (irsp->ulpStatus) {
429 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
430 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
431 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
432 goto out;
433 }
434
435 /* Check for retry */
436 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
437 phba->fc_ns_retry++;
438 /* CT command is being retried */
439 ndlp =
440 lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
441 NameServer_DID);
442 if (ndlp) {
443 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
444 0) {
445 goto out;
446 }
447 }
448 }
449 } else {
450 /* Good status, continue checking */
451 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
452 if (CTrsp->CommandResponse.bits.CmdRsp ==
453 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
454 lpfc_ns_rsp(phba, outp,
455 (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
456 } else if (CTrsp->CommandResponse.bits.CmdRsp ==
457 be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
458 /* NameServer Rsp Error */
459 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
460 "%d:0240 NameServer Rsp Error "
461 "Data: x%x x%x x%x x%x\n",
462 phba->brd_no,
463 CTrsp->CommandResponse.bits.CmdRsp,
464 (uint32_t) CTrsp->ReasonCode,
465 (uint32_t) CTrsp->Explanation,
466 phba->fc_flag);
467 } else {
468 /* NameServer Rsp Error */
469 lpfc_printf_log(phba,
470 KERN_INFO,
471 LOG_DISCOVERY,
472 "%d:0241 NameServer Rsp Error "
473 "Data: x%x x%x x%x x%x\n",
474 phba->brd_no,
475 CTrsp->CommandResponse.bits.CmdRsp,
476 (uint32_t) CTrsp->ReasonCode,
477 (uint32_t) CTrsp->Explanation,
478 phba->fc_flag);
479 }
480 }
481 /* Link up / RSCN discovery */
482 lpfc_disc_start(phba);
483out:
484 lpfc_free_ct_rsp(phba, outp);
485 lpfc_mbuf_free(phba, inp->virt, inp->phys);
486 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
487 kfree(inp);
488 kfree(bmp);
489 spin_lock_irq(phba->host->host_lock);
490 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
491 spin_unlock_irq(phba->host->host_lock);
492 return;
493}
494
495static void
496lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
497 struct lpfc_iocbq * rspiocb)
498{
499 struct lpfc_sli *psli;
500 struct lpfc_dmabuf *bmp;
501 struct lpfc_dmabuf *inp;
502 struct lpfc_dmabuf *outp;
503 IOCB_t *irsp;
504 struct lpfc_sli_ct_request *CTrsp;
505
506 psli = &phba->sli;
507 /* we pass cmdiocb to state machine which needs rspiocb as well */
508 cmdiocb->context_un.rsp_iocb = rspiocb;
509
510 inp = (struct lpfc_dmabuf *) cmdiocb->context1;
511 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
512 bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
513 irsp = &rspiocb->iocb;
514
515 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
516
517 /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
518 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
519 "%d:0209 RFT request completes ulpStatus x%x "
520 "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus,
521 CTrsp->CommandResponse.bits.CmdRsp);
522
523 lpfc_free_ct_rsp(phba, outp);
524 lpfc_mbuf_free(phba, inp->virt, inp->phys);
525 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
526 kfree(inp);
527 kfree(bmp);
528 spin_lock_irq(phba->host->host_lock);
529 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
530 spin_unlock_irq(phba->host->host_lock);
531 return;
532}
533
534static void
535lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
536 struct lpfc_iocbq * rspiocb)
537{
538 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
539 return;
540}
541
542static void
543lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
544 struct lpfc_iocbq * rspiocb)
545{
546 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
547 return;
548}
549
550void
551lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
552{
553 char fwrev[16];
554
555 lpfc_decode_firmware_rev(phba, fwrev, 0);
556
557 if (phba->Port[0]) {
558 sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName,
559 phba->Port, fwrev, lpfc_release_version);
560 } else {
561 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
562 fwrev, lpfc_release_version);
563 }
564}
565
566/*
567 * lpfc_ns_cmd
568 * Description:
569 * Issue Cmd to NameServer
570 * SLI_CTNS_GID_FT
571 * LI_CTNS_RFT_ID
572 */
573int
574lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
575{
576 struct lpfc_dmabuf *mp, *bmp;
577 struct lpfc_sli_ct_request *CtReq;
578 struct ulp_bde64 *bpl;
579 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
580 struct lpfc_iocbq *) = NULL;
581 uint32_t rsp_size = 1024;
582
583 /* fill in BDEs for command */
584 /* Allocate buffer for command payload */
585 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
586 if (!mp)
587 goto ns_cmd_exit;
588
589 INIT_LIST_HEAD(&mp->list);
590 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
591 if (!mp->virt)
592 goto ns_cmd_free_mp;
593
594 /* Allocate buffer for Buffer ptr list */
595 bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
596 if (!bmp)
597 goto ns_cmd_free_mpvirt;
598
599 INIT_LIST_HEAD(&bmp->list);
600 bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
601 if (!bmp->virt)
602 goto ns_cmd_free_bmp;
603
604 /* NameServer Req */
605 lpfc_printf_log(phba,
606 KERN_INFO,
607 LOG_DISCOVERY,
608 "%d:0236 NameServer Req Data: x%x x%x x%x\n",
609 phba->brd_no, cmdcode, phba->fc_flag,
610 phba->fc_rscn_id_cnt);
611
612 bpl = (struct ulp_bde64 *) bmp->virt;
613 memset(bpl, 0, sizeof(struct ulp_bde64));
614 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
615 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
616 bpl->tus.f.bdeFlags = 0;
617 if (cmdcode == SLI_CTNS_GID_FT)
618 bpl->tus.f.bdeSize = GID_REQUEST_SZ;
619 else if (cmdcode == SLI_CTNS_RFT_ID)
620 bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
621 else if (cmdcode == SLI_CTNS_RNN_ID)
622 bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
623 else if (cmdcode == SLI_CTNS_RSNN_NN)
624 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
625 else
626 bpl->tus.f.bdeSize = 0;
627 bpl->tus.w = le32_to_cpu(bpl->tus.w);
628
629 CtReq = (struct lpfc_sli_ct_request *) mp->virt;
630 memset(CtReq, 0, sizeof (struct lpfc_sli_ct_request));
631 CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
632 CtReq->RevisionId.bits.InId = 0;
633 CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
634 CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
635 CtReq->CommandResponse.bits.Size = 0;
636 switch (cmdcode) {
637 case SLI_CTNS_GID_FT:
638 CtReq->CommandResponse.bits.CmdRsp =
639 be16_to_cpu(SLI_CTNS_GID_FT);
640 CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
641 if (phba->hba_state < LPFC_HBA_READY)
642 phba->hba_state = LPFC_NS_QRY;
643 lpfc_set_disctmo(phba);
644 cmpl = lpfc_cmpl_ct_cmd_gid_ft;
645 rsp_size = FC_MAX_NS_RSP;
646 break;
647
648 case SLI_CTNS_RFT_ID:
649 CtReq->CommandResponse.bits.CmdRsp =
650 be16_to_cpu(SLI_CTNS_RFT_ID);
651 CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID);
652 CtReq->un.rft.fcpReg = 1;
653 cmpl = lpfc_cmpl_ct_cmd_rft_id;
654 break;
655
656 case SLI_CTNS_RNN_ID:
657 CtReq->CommandResponse.bits.CmdRsp =
658 be16_to_cpu(SLI_CTNS_RNN_ID);
659 CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID);
660 memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename,
661 sizeof (struct lpfc_name));
662 cmpl = lpfc_cmpl_ct_cmd_rnn_id;
663 break;
664
665 case SLI_CTNS_RSNN_NN:
666 CtReq->CommandResponse.bits.CmdRsp =
667 be16_to_cpu(SLI_CTNS_RSNN_NN);
668 memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename,
669 sizeof (struct lpfc_name));
670 lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname);
671 CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname);
672 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
673 break;
674 }
675
676 if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size))
677 /* On success, The cmpl function will free the buffers */
678 return 0;
679
680 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
681ns_cmd_free_bmp:
682 kfree(bmp);
683ns_cmd_free_mpvirt:
684 lpfc_mbuf_free(phba, mp->virt, mp->phys);
685ns_cmd_free_mp:
686 kfree(mp);
687ns_cmd_exit:
688 return 1;
689}
690
691static void
692lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
693 struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
694{
695 struct lpfc_dmabuf *bmp = cmdiocb->context3;
696 struct lpfc_dmabuf *inp = cmdiocb->context1;
697 struct lpfc_dmabuf *outp = cmdiocb->context2;
698 struct lpfc_sli_ct_request *CTrsp = outp->virt;
699 struct lpfc_sli_ct_request *CTcmd = inp->virt;
700 struct lpfc_nodelist *ndlp;
701 uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
702 uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
703
704 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
705 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
706 /* FDMI rsp failed */
707 lpfc_printf_log(phba,
708 KERN_INFO,
709 LOG_DISCOVERY,
710 "%d:0220 FDMI rsp failed Data: x%x\n",
711 phba->brd_no,
712 be16_to_cpu(fdmi_cmd));
713 }
714
715 switch (be16_to_cpu(fdmi_cmd)) {
716 case SLI_MGMT_RHBA:
717 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA);
718 break;
719
720 case SLI_MGMT_RPA:
721 break;
722
723 case SLI_MGMT_DHBA:
724 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT);
725 break;
726
727 case SLI_MGMT_DPRT:
728 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA);
729 break;
730 }
731
732 lpfc_free_ct_rsp(phba, outp);
733 lpfc_mbuf_free(phba, inp->virt, inp->phys);
734 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
735 kfree(inp);
736 kfree(bmp);
737 spin_lock_irq(phba->host->host_lock);
738 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
739 spin_unlock_irq(phba->host->host_lock);
740 return;
741}
742int
743lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
744{
745 struct lpfc_dmabuf *mp, *bmp;
746 struct lpfc_sli_ct_request *CtReq;
747 struct ulp_bde64 *bpl;
748 uint32_t size;
749 REG_HBA *rh;
750 PORT_ENTRY *pe;
751 REG_PORT_ATTRIBUTE *pab;
752 ATTRIBUTE_BLOCK *ab;
753 ATTRIBUTE_ENTRY *ae;
754 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
755 struct lpfc_iocbq *);
756
757
758 /* fill in BDEs for command */
759 /* Allocate buffer for command payload */
760 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
761 if (!mp)
762 goto fdmi_cmd_exit;
763
764 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
765 if (!mp->virt)
766 goto fdmi_cmd_free_mp;
767
768 /* Allocate buffer for Buffer ptr list */
769 bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
770 if (!bmp)
771 goto fdmi_cmd_free_mpvirt;
772
773 bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
774 if (!bmp->virt)
775 goto fdmi_cmd_free_bmp;
776
777 INIT_LIST_HEAD(&mp->list);
778 INIT_LIST_HEAD(&bmp->list);
779
780 /* FDMI request */
781 lpfc_printf_log(phba,
782 KERN_INFO,
783 LOG_DISCOVERY,
784 "%d:0218 FDMI Request Data: x%x x%x x%x\n",
785 phba->brd_no,
786 phba->fc_flag, phba->hba_state, cmdcode);
787
788 CtReq = (struct lpfc_sli_ct_request *) mp->virt;
789
790 memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
791 CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
792 CtReq->RevisionId.bits.InId = 0;
793
794 CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
795 CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
796 size = 0;
797
798 switch (cmdcode) {
799 case SLI_MGMT_RHBA:
800 {
801 lpfc_vpd_t *vp = &phba->vpd;
802 uint32_t i, j, incr;
803 int len;
804
805 CtReq->CommandResponse.bits.CmdRsp =
806 be16_to_cpu(SLI_MGMT_RHBA);
807 CtReq->CommandResponse.bits.Size = 0;
808 rh = (REG_HBA *) & CtReq->un.PortID;
809 memcpy(&rh->hi.PortName, &phba->fc_sparam.portName,
810 sizeof (struct lpfc_name));
811 /* One entry (port) per adapter */
812 rh->rpl.EntryCnt = be32_to_cpu(1);
813 memcpy(&rh->rpl.pe, &phba->fc_sparam.portName,
814 sizeof (struct lpfc_name));
815
816 /* point to the HBA attribute block */
817 size = 2 * sizeof (struct lpfc_name) + FOURBYTES;
818 ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size);
819 ab->EntryCnt = 0;
820
821 /* Point to the beginning of the first HBA attribute
822 entry */
823 /* #1 HBA attribute entry */
824 size += FOURBYTES;
825 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
826 ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
827 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
828 + sizeof (struct lpfc_name));
829 memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName,
830 sizeof (struct lpfc_name));
831 ab->EntryCnt++;
832 size += FOURBYTES + sizeof (struct lpfc_name);
833
834 /* #2 HBA attribute entry */
835 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
836 ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
837 strcpy(ae->un.Manufacturer, "Emulex Corporation");
838 len = strlen(ae->un.Manufacturer);
839 len += (len & 3) ? (4 - (len & 3)) : 4;
840 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
841 ab->EntryCnt++;
842 size += FOURBYTES + len;
843
844 /* #3 HBA attribute entry */
845 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
846 ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
847 strcpy(ae->un.SerialNumber, phba->SerialNumber);
848 len = strlen(ae->un.SerialNumber);
849 len += (len & 3) ? (4 - (len & 3)) : 4;
850 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
851 ab->EntryCnt++;
852 size += FOURBYTES + len;
853
854 /* #4 HBA attribute entry */
855 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
856 ae->ad.bits.AttrType = be16_to_cpu(MODEL);
857 strcpy(ae->un.Model, phba->ModelName);
858 len = strlen(ae->un.Model);
859 len += (len & 3) ? (4 - (len & 3)) : 4;
860 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
861 ab->EntryCnt++;
862 size += FOURBYTES + len;
863
864 /* #5 HBA attribute entry */
865 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
866 ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
867 strcpy(ae->un.ModelDescription, phba->ModelDesc);
868 len = strlen(ae->un.ModelDescription);
869 len += (len & 3) ? (4 - (len & 3)) : 4;
870 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
871 ab->EntryCnt++;
872 size += FOURBYTES + len;
873
874 /* #6 HBA attribute entry */
875 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
876 ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION);
877 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8);
878 /* Convert JEDEC ID to ascii for hardware version */
879 incr = vp->rev.biuRev;
880 for (i = 0; i < 8; i++) {
881 j = (incr & 0xf);
882 if (j <= 9)
883 ae->un.HardwareVersion[7 - i] =
884 (char)((uint8_t) 0x30 +
885 (uint8_t) j);
886 else
887 ae->un.HardwareVersion[7 - i] =
888 (char)((uint8_t) 0x61 +
889 (uint8_t) (j - 10));
890 incr = (incr >> 4);
891 }
892 ab->EntryCnt++;
893 size += FOURBYTES + 8;
894
895 /* #7 HBA attribute entry */
896 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
897 ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
898 strcpy(ae->un.DriverVersion, lpfc_release_version);
899 len = strlen(ae->un.DriverVersion);
900 len += (len & 3) ? (4 - (len & 3)) : 4;
901 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
902 ab->EntryCnt++;
903 size += FOURBYTES + len;
904
905 /* #8 HBA attribute entry */
906 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
907 ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
908 strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion);
909 len = strlen(ae->un.OptionROMVersion);
910 len += (len & 3) ? (4 - (len & 3)) : 4;
911 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
912 ab->EntryCnt++;
913 size += FOURBYTES + len;
914
915 /* #9 HBA attribute entry */
916 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
917 ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION);
918 lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
919 1);
920 len = strlen(ae->un.FirmwareVersion);
921 len += (len & 3) ? (4 - (len & 3)) : 4;
922 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
923 ab->EntryCnt++;
924 size += FOURBYTES + len;
925
926 /* #10 HBA attribute entry */
927 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
928 ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
929 sprintf(ae->un.OsNameVersion, "%s %s %s",
930 system_utsname.sysname, system_utsname.release,
931 system_utsname.version);
932 len = strlen(ae->un.OsNameVersion);
933 len += (len & 3) ? (4 - (len & 3)) : 4;
934 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
935 ab->EntryCnt++;
936 size += FOURBYTES + len;
937
938 /* #11 HBA attribute entry */
939 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
940 ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN);
941 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
942 ae->un.MaxCTPayloadLen = (65 * 4096);
943 ab->EntryCnt++;
944 size += FOURBYTES + 4;
945
946 ab->EntryCnt = be32_to_cpu(ab->EntryCnt);
947 /* Total size */
948 size = GID_REQUEST_SZ - 4 + size;
949 }
950 break;
951
952 case SLI_MGMT_RPA:
953 {
954 lpfc_vpd_t *vp;
955 struct serv_parm *hsp;
956 int len;
957
958 vp = &phba->vpd;
959
960 CtReq->CommandResponse.bits.CmdRsp =
961 be16_to_cpu(SLI_MGMT_RPA);
962 CtReq->CommandResponse.bits.Size = 0;
963 pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
964 size = sizeof (struct lpfc_name) + FOURBYTES;
965 memcpy((uint8_t *) & pab->PortName,
966 (uint8_t *) & phba->fc_sparam.portName,
967 sizeof (struct lpfc_name));
968 pab->ab.EntryCnt = 0;
969
970 /* #1 Port attribute entry */
971 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
972 ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES);
973 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32);
974 ae->un.SupportFC4Types[2] = 1;
975 ae->un.SupportFC4Types[7] = 1;
976 pab->ab.EntryCnt++;
977 size += FOURBYTES + 32;
978
979 /* #2 Port attribute entry */
980 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
981 ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED);
982 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
983 if (FC_JEDEC_ID(vp->rev.biuRev) == VIPER_JEDEC_ID)
984 ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT;
985 else if (FC_JEDEC_ID(vp->rev.biuRev) == HELIOS_JEDEC_ID)
986 ae->un.SupportSpeed = HBA_PORTSPEED_4GBIT;
987 else if ((FC_JEDEC_ID(vp->rev.biuRev) ==
988 CENTAUR_2G_JEDEC_ID)
989 || (FC_JEDEC_ID(vp->rev.biuRev) ==
990 PEGASUS_JEDEC_ID)
991 || (FC_JEDEC_ID(vp->rev.biuRev) ==
992 THOR_JEDEC_ID))
993 ae->un.SupportSpeed = HBA_PORTSPEED_2GBIT;
994 else
995 ae->un.SupportSpeed = HBA_PORTSPEED_1GBIT;
996 pab->ab.EntryCnt++;
997 size += FOURBYTES + 4;
998
999 /* #3 Port attribute entry */
1000 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
1001 ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
1002 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
1003 switch(phba->fc_linkspeed) {
1004 case LA_1GHZ_LINK:
1005 ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
1006 break;
1007 case LA_2GHZ_LINK:
1008 ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
1009 break;
1010 case LA_4GHZ_LINK:
1011 ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
1012 break;
1013 default:
1014 ae->un.PortSpeed =
1015 HBA_PORTSPEED_UNKNOWN;
1016 break;
1017 }
1018 pab->ab.EntryCnt++;
1019 size += FOURBYTES + 4;
1020
1021 /* #4 Port attribute entry */
1022 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
1023 ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
1024 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
1025 hsp = (struct serv_parm *) & phba->fc_sparam;
1026 ae->un.MaxFrameSize =
1027 (((uint32_t) hsp->cmn.
1028 bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
1029 bbRcvSizeLsb;
1030 pab->ab.EntryCnt++;
1031 size += FOURBYTES + 4;
1032
1033 /* #5 Port attribute entry */
1034 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
1035 ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME);
1036 strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME);
1037 len = strlen((char *)ae->un.OsDeviceName);
1038 len += (len & 3) ? (4 - (len & 3)) : 4;
1039 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
1040 pab->ab.EntryCnt++;
1041 size += FOURBYTES + len;
1042
1043 if (phba->cfg_fdmi_on == 2) {
1044 /* #6 Port attribute entry */
1045 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
1046 size);
1047 ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME);
1048 sprintf(ae->un.HostName, "%s",
1049 system_utsname.nodename);
1050 len = strlen(ae->un.HostName);
1051 len += (len & 3) ? (4 - (len & 3)) : 4;
1052 ae->ad.bits.AttrLen =
1053 be16_to_cpu(FOURBYTES + len);
1054 pab->ab.EntryCnt++;
1055 size += FOURBYTES + len;
1056 }
1057
1058 pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt);
1059 /* Total size */
1060 size = GID_REQUEST_SZ - 4 + size;
1061 }
1062 break;
1063
1064 case SLI_MGMT_DHBA:
1065 CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA);
1066 CtReq->CommandResponse.bits.Size = 0;
1067 pe = (PORT_ENTRY *) & CtReq->un.PortID;
1068 memcpy((uint8_t *) & pe->PortName,
1069 (uint8_t *) & phba->fc_sparam.portName,
1070 sizeof (struct lpfc_name));
1071 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
1072 break;
1073
1074 case SLI_MGMT_DPRT:
1075 CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT);
1076 CtReq->CommandResponse.bits.Size = 0;
1077 pe = (PORT_ENTRY *) & CtReq->un.PortID;
1078 memcpy((uint8_t *) & pe->PortName,
1079 (uint8_t *) & phba->fc_sparam.portName,
1080 sizeof (struct lpfc_name));
1081 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
1082 break;
1083 }
1084
1085 bpl = (struct ulp_bde64 *) bmp->virt;
1086 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
1087 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
1088 bpl->tus.f.bdeFlags = 0;
1089 bpl->tus.f.bdeSize = size;
1090 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1091
1092 cmpl = lpfc_cmpl_ct_cmd_fdmi;
1093
1094 if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP))
1095 return 0;
1096
1097 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1098fdmi_cmd_free_bmp:
1099 kfree(bmp);
1100fdmi_cmd_free_mpvirt:
1101 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1102fdmi_cmd_free_mp:
1103 kfree(mp);
1104fdmi_cmd_exit:
1105 /* Issue FDMI request failed */
1106 lpfc_printf_log(phba,
1107 KERN_INFO,
1108 LOG_DISCOVERY,
1109 "%d:0244 Issue FDMI request failed Data: x%x\n",
1110 phba->brd_no,
1111 cmdcode);
1112 return 1;
1113}
1114
1115void
1116lpfc_fdmi_tmo(unsigned long ptr)
1117{
1118 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
1119 unsigned long iflag;
1120
1121 spin_lock_irqsave(phba->host->host_lock, iflag);
1122 if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
1123 phba->work_hba_events |= WORKER_FDMI_TMO;
1124 if (phba->work_wait)
1125 wake_up(phba->work_wait);
1126 }
1127 spin_unlock_irqrestore(phba->host->host_lock,iflag);
1128}
1129
1130void
1131lpfc_fdmi_tmo_handler(struct lpfc_hba *phba)
1132{
1133 struct lpfc_nodelist *ndlp;
1134
1135 spin_lock_irq(phba->host->host_lock);
1136 if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
1137 spin_unlock_irq(phba->host->host_lock);
1138 return;
1139 }
1140 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
1141 if (ndlp) {
1142 if (system_utsname.nodename[0] != '\0') {
1143 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
1144 } else {
1145 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
1146 }
1147 }
1148 spin_unlock_irq(phba->host->host_lock);
1149 return;
1150}
1151
1152
1153void
1154lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag)
1155{
1156 struct lpfc_sli *psli = &phba->sli;
1157 lpfc_vpd_t *vp = &phba->vpd;
1158 uint32_t b1, b2, b3, b4, i, rev;
1159 char c;
1160 uint32_t *ptr, str[4];
1161 uint8_t *fwname;
1162
1163 if (vp->rev.rBit) {
1164 if (psli->sli_flag & LPFC_SLI2_ACTIVE)
1165 rev = vp->rev.sli2FwRev;
1166 else
1167 rev = vp->rev.sli1FwRev;
1168
1169 b1 = (rev & 0x0000f000) >> 12;
1170 b2 = (rev & 0x00000f00) >> 8;
1171 b3 = (rev & 0x000000c0) >> 6;
1172 b4 = (rev & 0x00000030) >> 4;
1173
1174 switch (b4) {
1175 case 0:
1176 c = 'N';
1177 break;
1178 case 1:
1179 c = 'A';
1180 break;
1181 case 2:
1182 c = 'B';
1183 break;
1184 default:
1185 c = 0;
1186 break;
1187 }
1188 b4 = (rev & 0x0000000f);
1189
1190 if (psli->sli_flag & LPFC_SLI2_ACTIVE)
1191 fwname = vp->rev.sli2FwName;
1192 else
1193 fwname = vp->rev.sli1FwName;
1194
1195 for (i = 0; i < 16; i++)
1196 if (fwname[i] == 0x20)
1197 fwname[i] = 0;
1198
1199 ptr = (uint32_t*)fwname;
1200
1201 for (i = 0; i < 3; i++)
1202 str[i] = be32_to_cpu(*ptr++);
1203
1204 if (c == 0) {
1205 if (flag)
1206 sprintf(fwrevision, "%d.%d%d (%s)",
1207 b1, b2, b3, (char *)str);
1208 else
1209 sprintf(fwrevision, "%d.%d%d", b1,
1210 b2, b3);
1211 } else {
1212 if (flag)
1213 sprintf(fwrevision, "%d.%d%d%c%d (%s)",
1214 b1, b2, b3, c,
1215 b4, (char *)str);
1216 else
1217 sprintf(fwrevision, "%d.%d%d%c%d",
1218 b1, b2, b3, c, b4);
1219 }
1220 } else {
1221 rev = vp->rev.smFwRev;
1222
1223 b1 = (rev & 0xff000000) >> 24;
1224 b2 = (rev & 0x00f00000) >> 20;
1225 b3 = (rev & 0x000f0000) >> 16;
1226 c = (rev & 0x0000ff00) >> 8;
1227 b4 = (rev & 0x000000ff);
1228
1229 if (flag)
1230 sprintf(fwrevision, "%d.%d%d%c%d ", b1,
1231 b2, b3, c, b4);
1232 else
1233 sprintf(fwrevision, "%d.%d%d%c%d ", b1,
1234 b2, b3, c, b4);
1235 }
1236 return;
1237}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
new file mode 100644
index 000000000000..adccc99510d5
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -0,0 +1,206 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_disc.h 1.61 2005/04/07 08:46:52EDT sf_support Exp $
23 */
24
25#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */
26#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */
27#define FC_MAXLOOP 126 /* max devices supported on a fc loop */
28#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */
29
30
31/* This is the protocol dependent definition for a Node List Entry.
32 * This is used by Fibre Channel protocol to support FCP.
33 */
34
35/* structure used to queue event to the discovery tasklet */
36struct lpfc_work_evt {
37 struct list_head evt_listp;
38 void * evt_arg1;
39 void * evt_arg2;
40 uint32_t evt;
41};
42
43#define LPFC_EVT_NODEV_TMO 0x1
44#define LPFC_EVT_ONLINE 0x2
45#define LPFC_EVT_OFFLINE 0x3
46#define LPFC_EVT_ELS_RETRY 0x4
47
48struct lpfc_nodelist {
49 struct list_head nlp_listp;
50 struct lpfc_name nlp_portname; /* port name */
51 struct lpfc_name nlp_nodename; /* node name */
52 uint32_t nlp_flag; /* entry flags */
53 uint32_t nlp_DID; /* FC D_ID of entry */
54 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
55 uint16_t nlp_type;
56#define NLP_FC_NODE 0x1 /* entry is an FC node */
57#define NLP_FABRIC 0x4 /* entry rep a Fabric entity */
58#define NLP_FCP_TARGET 0x8 /* entry is an FCP target */
59#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */
60
61 uint16_t nlp_rpi;
62 uint16_t nlp_state; /* state transition indicator */
63 uint16_t nlp_xri; /* output exchange id for RPI */
64 uint16_t nlp_sid; /* scsi id */
65#define NLP_NO_SID 0xffff
66 uint16_t nlp_maxframe; /* Max RCV frame size */
67 uint8_t nlp_class_sup; /* Supported Classes */
68 uint8_t nlp_retry; /* used for ELS retries */
69 uint8_t nlp_disc_refcnt; /* used for DSM */
70 uint8_t nlp_fcp_info; /* class info, bits 0-3 */
71#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
72
73 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
74 struct timer_list nlp_tmofunc; /* Used for nodev tmo */
75 struct fc_rport *rport; /* Corresponding FC transport
76 port structure */
77 struct lpfc_nodelist *nlp_rpi_hash_next;
78 struct lpfc_hba *nlp_phba;
79 struct lpfc_work_evt nodev_timeout_evt;
80 struct lpfc_work_evt els_retry_evt;
81};
82
83/* Defines for nlp_flag (uint32) */
84#define NLP_NO_LIST 0x0 /* Indicates immediately free node */
85#define NLP_UNUSED_LIST 0x1 /* Flg to indicate node will be freed */
86#define NLP_PLOGI_LIST 0x2 /* Flg to indicate sent PLOGI */
87#define NLP_ADISC_LIST 0x3 /* Flg to indicate sent ADISC */
88#define NLP_REGLOGIN_LIST 0x4 /* Flg to indicate sent REG_LOGIN */
89#define NLP_PRLI_LIST 0x5 /* Flg to indicate sent PRLI */
90#define NLP_UNMAPPED_LIST 0x6 /* Node is now unmapped */
91#define NLP_MAPPED_LIST 0x7 /* Node is now mapped */
92#define NLP_NPR_LIST 0x8 /* Node is in NPort Recovery state */
93#define NLP_JUST_DQ 0x9 /* just deque ndlp in lpfc_nlp_list */
94#define NLP_LIST_MASK 0xf /* mask to see what list node is on */
95#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */
96#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */
97#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */
98#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
99#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
100#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
101#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */
102#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
103#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
104#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
105#define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */
106#define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */
107#define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful
108 ACC */
109#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
110 NPR list */
111#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
112
113/* Defines for list searchs */
114#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
115#define NLP_SEARCH_UNMAPPED 0x2 /* search unmapped */
116#define NLP_SEARCH_PLOGI 0x4 /* search plogi */
117#define NLP_SEARCH_ADISC 0x8 /* search adisc */
118#define NLP_SEARCH_REGLOGIN 0x10 /* search reglogin */
119#define NLP_SEARCH_PRLI 0x20 /* search prli */
120#define NLP_SEARCH_NPR 0x40 /* search npr */
121#define NLP_SEARCH_UNUSED 0x80 /* search mapped */
122#define NLP_SEARCH_ALL 0xff /* search all lists */
123
124/* There are 4 different double linked lists nodelist entries can reside on.
125 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
126 * when Link Up discovery or Registered State Change Notification (RSCN)
127 * processing is needed. Each list holds the nodes that require a PLOGI or
128 * ADISC Extended Link Service (ELS) request. These lists keep track of the
129 * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected
130 * by Link Up) event. The unmapped_list contains all nodes that have
131 * successfully logged into at the Fibre Channel level. The
132 * mapped_list will contain all nodes that are mapped FCP targets.
133 *
134 * The bind list is a list of undiscovered (potentially non-existent) nodes
135 * that we have saved binding information on. This information is used when
136 * nodes transition from the unmapped to the mapped list.
137 */
138
139/* Defines for nlp_state */
140#define NLP_STE_UNUSED_NODE 0x0 /* node is just allocated */
141#define NLP_STE_PLOGI_ISSUE 0x1 /* PLOGI was sent to NL_PORT */
142#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
143#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
144#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
145#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */
146#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */
147#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */
148#define NLP_STE_MAX_STATE 0x8
149#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
150
151/* For UNUSED_NODE state, the node has just been allocated.
152 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
153 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
154 * and put on the unmapped list. For ADISC processing, the node is taken off
155 * the ADISC list and placed on either the mapped or unmapped list (depending
156 * on its previous state). Once on the unmapped list, a PRLI is issued and the
157 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
158 * changed to PRLI_COMPL. If the completion indicates a mapped
159 * node, the node is taken off the unmapped list. The binding list is checked
160 * for a valid binding, or a binding is automatically assigned. If binding
161 * assignment is unsuccessful, the node is left on the unmapped list. If
162 * binding assignment is successful, the associated binding list entry (if
163 * any) is removed, and the node is placed on the mapped list.
164 */
165/*
166 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
167 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
168 * expire, all effected nodes will receive a DEVICE_RM event.
169 */
170/*
171 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
172 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
173 * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to /
174 * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
175 * we will first process the ADISC list. 32 entries are processed initially and
176 * ADISC is initited for each one. Completions / Events for each node are
177 * funnelled thru the state machine. As each node finishes ADISC processing, it
178 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
179 * waiting, and the ADISC list count is identically 0, then we are done. For
180 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
181 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
182 * list. 32 entries are processed initially and PLOGI is initited for each one.
183 * Completions / Events for each node are funnelled thru the state machine. As
184 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
185 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
186 * identically 0, then we are done. We have now completed discovery / RSCN
187 * handling. Upon completion, ALL nodes should be on either the mapped or
188 * unmapped lists.
189 */
190
191/* Defines for Node List Entry Events that could happen */
192#define NLP_EVT_RCV_PLOGI 0x0 /* Rcv'd an ELS PLOGI command */
193#define NLP_EVT_RCV_PRLI 0x1 /* Rcv'd an ELS PRLI command */
194#define NLP_EVT_RCV_LOGO 0x2 /* Rcv'd an ELS LOGO command */
195#define NLP_EVT_RCV_ADISC 0x3 /* Rcv'd an ELS ADISC command */
196#define NLP_EVT_RCV_PDISC 0x4 /* Rcv'd an ELS PDISC command */
197#define NLP_EVT_RCV_PRLO 0x5 /* Rcv'd an ELS PRLO command */
198#define NLP_EVT_CMPL_PLOGI 0x6 /* Sent an ELS PLOGI command */
199#define NLP_EVT_CMPL_PRLI 0x7 /* Sent an ELS PRLI command */
200#define NLP_EVT_CMPL_LOGO 0x8 /* Sent an ELS LOGO command */
201#define NLP_EVT_CMPL_ADISC 0x9 /* Sent an ELS ADISC command */
202#define NLP_EVT_CMPL_REG_LOGIN 0xa /* REG_LOGIN mbox cmd completed */
203#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */
204#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */
205#define NLP_EVT_MAX_EVENT 0xd
206
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
new file mode 100644
index 000000000000..68d1b77e0256
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -0,0 +1,3258 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_els.c 1.186 2005/04/13 14:26:55EDT sf_support Exp $
23 */
24
25#include <linux/blkdev.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h>
32
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_disc.h"
36#include "lpfc_scsi.h"
37#include "lpfc.h"
38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h"
40
41static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
42 struct lpfc_iocbq *);
43static int lpfc_max_els_tries = 3;
44
45static int
46lpfc_els_chk_latt(struct lpfc_hba * phba)
47{
48 struct lpfc_sli *psli;
49 LPFC_MBOXQ_t *mbox;
50 uint32_t ha_copy;
51 int rc;
52
53 psli = &phba->sli;
54
55 if ((phba->hba_state >= LPFC_HBA_READY) ||
56 (phba->hba_state == LPFC_LINK_DOWN))
57 return 0;
58
59 /* Read the HBA Host Attention Register */
60 spin_lock_irq(phba->host->host_lock);
61 ha_copy = readl(phba->HAregaddr);
62 spin_unlock_irq(phba->host->host_lock);
63
64 if (!(ha_copy & HA_LATT))
65 return 0;
66
67 /* Pending Link Event during Discovery */
68 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
69 "%d:0237 Pending Link Event during "
70 "Discovery: State x%x\n",
71 phba->brd_no, phba->hba_state);
72
73 /* CLEAR_LA should re-enable link attention events and
74 * we should then imediately take a LATT event. The
75 * LATT processing should call lpfc_linkdown() which
76 * will cleanup any left over in-progress discovery
77 * events.
78 */
79 spin_lock_irq(phba->host->host_lock);
80 phba->fc_flag |= FC_ABORT_DISCOVERY;
81 spin_unlock_irq(phba->host->host_lock);
82
83 if (phba->hba_state != LPFC_CLEAR_LA) {
84 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
85 phba->hba_state = LPFC_CLEAR_LA;
86 lpfc_clear_la(phba, mbox);
87 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
88 rc = lpfc_sli_issue_mbox (phba, mbox,
89 (MBX_NOWAIT | MBX_STOP_IOCB));
90 if (rc == MBX_NOT_FINISHED) {
91 mempool_free(mbox, phba->mbox_mem_pool);
92 phba->hba_state = LPFC_HBA_ERROR;
93 }
94 }
95 }
96
97 return (1);
98
99}
100
101static struct lpfc_iocbq *
102lpfc_prep_els_iocb(struct lpfc_hba * phba,
103 uint8_t expectRsp,
104 uint16_t cmdSize,
105 uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd)
106{
107 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
108 struct lpfc_sli_ring *pring;
109 struct lpfc_iocbq *elsiocb = NULL;
110 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
111 struct ulp_bde64 *bpl;
112 IOCB_t *icmd;
113
114 pring = &phba->sli.ring[LPFC_ELS_RING];
115
116 if (phba->hba_state < LPFC_LINK_UP)
117 return NULL;
118
119
120 /* Allocate buffer for command iocb */
121 spin_lock_irq(phba->host->host_lock);
122 list_remove_head(lpfc_iocb_list, elsiocb, struct lpfc_iocbq, list);
123 spin_unlock_irq(phba->host->host_lock);
124
125 if (elsiocb == NULL)
126 return NULL;
127 memset(elsiocb, 0, sizeof (struct lpfc_iocbq));
128 icmd = &elsiocb->iocb;
129
130 /* fill in BDEs for command */
131 /* Allocate buffer for command payload */
132 if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
133 ((pcmd->virt = lpfc_mbuf_alloc(phba,
134 MEM_PRI, &(pcmd->phys))) == 0)) {
135 if (pcmd)
136 kfree(pcmd);
137
138 list_add_tail(&elsiocb->list, lpfc_iocb_list);
139 return NULL;
140 }
141
142 INIT_LIST_HEAD(&pcmd->list);
143
144 /* Allocate buffer for response payload */
145 if (expectRsp) {
146 prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
147 if (prsp)
148 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
149 &prsp->phys);
150 if (prsp == 0 || prsp->virt == 0) {
151 if (prsp)
152 kfree(prsp);
153 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
154 kfree(pcmd);
155 list_add_tail(&elsiocb->list, lpfc_iocb_list);
156 return NULL;
157 }
158 INIT_LIST_HEAD(&prsp->list);
159 } else {
160 prsp = NULL;
161 }
162
163 /* Allocate buffer for Buffer ptr list */
164 pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
165 if (pbuflist)
166 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
167 &pbuflist->phys);
168 if (pbuflist == 0 || pbuflist->virt == 0) {
169 list_add_tail(&elsiocb->list, lpfc_iocb_list);
170 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
171 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
172 kfree(pcmd);
173 kfree(prsp);
174 if (pbuflist)
175 kfree(pbuflist);
176 return NULL;
177 }
178
179 INIT_LIST_HEAD(&pbuflist->list);
180
181 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
182 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
183 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
184 if (expectRsp) {
185 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
186 icmd->un.elsreq64.remoteID = ndlp->nlp_DID; /* DID */
187 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
188 } else {
189 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
190 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
191 }
192
193 icmd->ulpBdeCount = 1;
194 icmd->ulpLe = 1;
195 icmd->ulpClass = CLASS3;
196
197 bpl = (struct ulp_bde64 *) pbuflist->virt;
198 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
199 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
200 bpl->tus.f.bdeSize = cmdSize;
201 bpl->tus.f.bdeFlags = 0;
202 bpl->tus.w = le32_to_cpu(bpl->tus.w);
203
204 if (expectRsp) {
205 bpl++;
206 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
207 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
208 bpl->tus.f.bdeSize = FCELSSIZE;
209 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
210 bpl->tus.w = le32_to_cpu(bpl->tus.w);
211 }
212
213 /* Save for completion so we can release these resources */
214 elsiocb->context1 = (uint8_t *) ndlp;
215 elsiocb->context2 = (uint8_t *) pcmd;
216 elsiocb->context3 = (uint8_t *) pbuflist;
217 elsiocb->retry = retry;
218 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
219
220 if (prsp) {
221 list_add(&prsp->list, &pcmd->list);
222 }
223
224 if (expectRsp) {
225 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
226 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
227 "%d:0116 Xmit ELS command x%x to remote "
228 "NPORT x%x Data: x%x x%x\n",
229 phba->brd_no, elscmd,
230 ndlp->nlp_DID, icmd->ulpIoTag, phba->hba_state);
231 } else {
232 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
233 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
234 "%d:0117 Xmit ELS response x%x to remote "
235 "NPORT x%x Data: x%x x%x\n",
236 phba->brd_no, elscmd,
237 ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);
238 }
239
240 return (elsiocb);
241}
242
243
244static int
245lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
246 struct serv_parm *sp, IOCB_t *irsp)
247{
248 LPFC_MBOXQ_t *mbox;
249 int rc;
250
251 spin_lock_irq(phba->host->host_lock);
252 phba->fc_flag |= FC_FABRIC;
253 spin_unlock_irq(phba->host->host_lock);
254
255 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
256 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
257 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
258
259 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
260
261 if (phba->fc_topology == TOPOLOGY_LOOP) {
262 spin_lock_irq(phba->host->host_lock);
263 phba->fc_flag |= FC_PUBLIC_LOOP;
264 spin_unlock_irq(phba->host->host_lock);
265 } else {
266 /*
267 * If we are a N-port connected to a Fabric, fixup sparam's so
268 * logins to devices on remote loops work.
269 */
270 phba->fc_sparam.cmn.altBbCredit = 1;
271 }
272
273 phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
274 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
275 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
276 ndlp->nlp_class_sup = 0;
277 if (sp->cls1.classValid)
278 ndlp->nlp_class_sup |= FC_COS_CLASS1;
279 if (sp->cls2.classValid)
280 ndlp->nlp_class_sup |= FC_COS_CLASS2;
281 if (sp->cls3.classValid)
282 ndlp->nlp_class_sup |= FC_COS_CLASS3;
283 if (sp->cls4.classValid)
284 ndlp->nlp_class_sup |= FC_COS_CLASS4;
285 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
286 sp->cmn.bbRcvSizeLsb;
287 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
288
289 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
290 if (!mbox)
291 goto fail;
292
293 phba->hba_state = LPFC_FABRIC_CFG_LINK;
294 lpfc_config_link(phba, mbox);
295 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
296
297 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
298 if (rc == MBX_NOT_FINISHED)
299 goto fail_free_mbox;
300
301 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
302 if (!mbox)
303 goto fail;
304
305 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
306 goto fail_free_mbox;
307
308 /*
309 * set_slim mailbox command needs to execute first,
310 * queue this command to be processed later.
311 */
312 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
313 mbox->context2 = ndlp;
314
315 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
316 if (rc == MBX_NOT_FINISHED)
317 goto fail_free_mbox;
318
319 return 0;
320
321 fail_free_mbox:
322 mempool_free(mbox, phba->mbox_mem_pool);
323 fail:
324 return -ENXIO;
325}
326
327/*
328 * We FLOGIed into an NPort, initiate pt2pt protocol
329 */
330static int
331lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
332 struct serv_parm *sp)
333{
334 LPFC_MBOXQ_t *mbox;
335 int rc;
336
337 spin_lock_irq(phba->host->host_lock);
338 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
339 spin_unlock_irq(phba->host->host_lock);
340
341 phba->fc_edtov = FF_DEF_EDTOV;
342 phba->fc_ratov = FF_DEF_RATOV;
343 rc = memcmp(&phba->fc_portname, &sp->portName,
344 sizeof(struct lpfc_name));
345 if (rc >= 0) {
346 /* This side will initiate the PLOGI */
347 spin_lock_irq(phba->host->host_lock);
348 phba->fc_flag |= FC_PT2PT_PLOGI;
349 spin_unlock_irq(phba->host->host_lock);
350
351 /*
352 * N_Port ID cannot be 0, set our to LocalID the other
353 * side will be RemoteID.
354 */
355
356 /* not equal */
357 if (rc)
358 phba->fc_myDID = PT2PT_LocalID;
359
360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
361 if (!mbox)
362 goto fail;
363
364 lpfc_config_link(phba, mbox);
365
366 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
367 rc = lpfc_sli_issue_mbox(phba, mbox,
368 MBX_NOWAIT | MBX_STOP_IOCB);
369 if (rc == MBX_NOT_FINISHED) {
370 mempool_free(mbox, phba->mbox_mem_pool);
371 goto fail;
372 }
373 mempool_free(ndlp, phba->nlp_mem_pool);
374
375 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, PT2PT_RemoteID);
376 if (!ndlp) {
377 /*
378 * Cannot find existing Fabric ndlp, so allocate a
379 * new one
380 */
381 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
382 if (!ndlp)
383 goto fail;
384
385 lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID);
386 }
387
388 memcpy(&ndlp->nlp_portname, &sp->portName,
389 sizeof(struct lpfc_name));
390 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
391 sizeof(struct lpfc_name));
392 ndlp->nlp_state = NLP_STE_NPR_NODE;
393 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
394 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
395 } else {
396 /* This side will wait for the PLOGI */
397 mempool_free( ndlp, phba->nlp_mem_pool);
398 }
399
400 spin_lock_irq(phba->host->host_lock);
401 phba->fc_flag |= FC_PT2PT;
402 spin_unlock_irq(phba->host->host_lock);
403
404 /* Start discovery - this should just do CLEAR_LA */
405 lpfc_disc_start(phba);
406 return 0;
407 fail:
408 return -ENXIO;
409}
410
411static void
412lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
413 struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
414{
415 IOCB_t *irsp = &rspiocb->iocb;
416 struct lpfc_nodelist *ndlp = cmdiocb->context1;
417 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
418 struct serv_parm *sp;
419 int rc;
420
421 /* Check to see if link went down during discovery */
422 if (lpfc_els_chk_latt(phba)) {
423 lpfc_nlp_remove(phba, ndlp);
424 goto out;
425 }
426
427 if (irsp->ulpStatus) {
428 /* Check for retry */
429 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
430 /* ELS command is being retried */
431 goto out;
432 }
433 /* FLOGI failed, so there is no fabric */
434 spin_lock_irq(phba->host->host_lock);
435 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
436 spin_unlock_irq(phba->host->host_lock);
437
438 /* If private loop, then allow max outstandting els to be
439 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
440 * alpa map would take too long otherwise.
441 */
442 if (phba->alpa_map[0] == 0) {
443 phba->cfg_discovery_threads =
444 LPFC_MAX_DISC_THREADS;
445 }
446
447 /* FLOGI failure */
448 lpfc_printf_log(phba,
449 KERN_INFO,
450 LOG_ELS,
451 "%d:0100 FLOGI failure Data: x%x x%x\n",
452 phba->brd_no,
453 irsp->ulpStatus, irsp->un.ulpWord[4]);
454 goto flogifail;
455 }
456
457 /*
458 * The FLogI succeeded. Sync the data for the CPU before
459 * accessing it.
460 */
461 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
462
463 sp = prsp->virt + sizeof(uint32_t);
464
465 /* FLOGI completes successfully */
466 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
467 "%d:0101 FLOGI completes sucessfully "
468 "Data: x%x x%x x%x x%x\n",
469 phba->brd_no,
470 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
471 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
472
473 if (phba->hba_state == LPFC_FLOGI) {
474 /*
475 * If Common Service Parameters indicate Nport
476 * we are point to point, if Fport we are Fabric.
477 */
478 if (sp->cmn.fPort)
479 rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp);
480 else
481 rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp);
482
483 if (!rc)
484 goto out;
485 }
486
487flogifail:
488 lpfc_nlp_remove(phba, ndlp);
489
490 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
491 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
492 irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
493 /* FLOGI failed, so just use loop map to make discovery list */
494 lpfc_disc_list_loopmap(phba);
495
496 /* Start discovery */
497 lpfc_disc_start(phba);
498 }
499
500out:
501 lpfc_els_free_iocb(phba, cmdiocb);
502}
503
504static int
505lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
506 uint8_t retry)
507{
508 struct serv_parm *sp;
509 IOCB_t *icmd;
510 struct lpfc_iocbq *elsiocb;
511 struct lpfc_sli_ring *pring;
512 uint8_t *pcmd;
513 uint16_t cmdsize;
514 uint32_t tmo;
515 int rc;
516
517 pring = &phba->sli.ring[LPFC_ELS_RING];
518
519 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
520 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
521 ndlp, ELS_CMD_FLOGI)) == 0) {
522 return (1);
523 }
524
525 icmd = &elsiocb->iocb;
526 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
527
528 /* For FLOGI request, remainder of payload is service parameters */
529 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
530 pcmd += sizeof (uint32_t);
531 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
532 sp = (struct serv_parm *) pcmd;
533
534 /* Setup CSPs accordingly for Fabric */
535 sp->cmn.e_d_tov = 0;
536 sp->cmn.w2.r_a_tov = 0;
537 sp->cls1.classValid = 0;
538 sp->cls2.seqDelivery = 1;
539 sp->cls3.seqDelivery = 1;
540 if (sp->cmn.fcphLow < FC_PH3)
541 sp->cmn.fcphLow = FC_PH3;
542 if (sp->cmn.fcphHigh < FC_PH3)
543 sp->cmn.fcphHigh = FC_PH3;
544
545 tmo = phba->fc_ratov;
546 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
547 lpfc_set_disctmo(phba);
548 phba->fc_ratov = tmo;
549
550 phba->fc_stat.elsXmitFLOGI++;
551 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
552 spin_lock_irq(phba->host->host_lock);
553 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
554 spin_unlock_irq(phba->host->host_lock);
555 if (rc == IOCB_ERROR) {
556 lpfc_els_free_iocb(phba, elsiocb);
557 return (1);
558 }
559 return (0);
560}
561
562int
563lpfc_els_abort_flogi(struct lpfc_hba * phba)
564{
565 struct lpfc_sli_ring *pring;
566 struct lpfc_iocbq *iocb, *next_iocb;
567 struct lpfc_nodelist *ndlp;
568 IOCB_t *icmd;
569
570 /* Abort outstanding I/O on NPort <nlp_DID> */
571 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
572 "%d:0201 Abort outstanding I/O on NPort x%x\n",
573 phba->brd_no, Fabric_DID);
574
575 pring = &phba->sli.ring[LPFC_ELS_RING];
576
577 /*
578 * Check the txcmplq for an iocb that matches the nport the driver is
579 * searching for.
580 */
581 spin_lock_irq(phba->host->host_lock);
582 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
583 icmd = &iocb->iocb;
584 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
585 ndlp = (struct lpfc_nodelist *)(iocb->context1);
586 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
587 list_del(&iocb->list);
588 pring->txcmplq_cnt--;
589
590 if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
591 lpfc_sli_issue_abort_iotag32
592 (phba, pring, iocb);
593 }
594 if (iocb->iocb_cmpl) {
595 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
596 icmd->un.ulpWord[4] =
597 IOERR_SLI_ABORTED;
598 spin_unlock_irq(phba->host->host_lock);
599 (iocb->iocb_cmpl) (phba, iocb, iocb);
600 spin_lock_irq(phba->host->host_lock);
601 } else {
602 list_add_tail(&iocb->list,
603 &phba->lpfc_iocb_list);
604 }
605 }
606 }
607 }
608 spin_unlock_irq(phba->host->host_lock);
609
610 return 0;
611}
612
613int
614lpfc_initial_flogi(struct lpfc_hba * phba)
615{
616 struct lpfc_nodelist *ndlp;
617
618 /* First look for Fabric ndlp on the unmapped list */
619
620 if ((ndlp =
621 lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
622 Fabric_DID)) == 0) {
623 /* Cannot find existing Fabric ndlp, so allocate a new one */
624 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
625 == 0) {
626 return (0);
627 }
628 lpfc_nlp_init(phba, ndlp, Fabric_DID);
629 }
630 else {
631 phba->fc_unmap_cnt--;
632 list_del(&ndlp->nlp_listp);
633 spin_lock_irq(phba->host->host_lock);
634 ndlp->nlp_flag &= ~NLP_LIST_MASK;
635 spin_unlock_irq(phba->host->host_lock);
636 }
637 if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
638 mempool_free( ndlp, phba->nlp_mem_pool);
639 }
640 return (1);
641}
642
643static void
644lpfc_more_plogi(struct lpfc_hba * phba)
645{
646 int sentplogi;
647
648 if (phba->num_disc_nodes)
649 phba->num_disc_nodes--;
650
651 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
652 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
653 "%d:0232 Continue discovery with %d PLOGIs to go "
654 "Data: x%x x%x x%x\n",
655 phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
656 phba->fc_flag, phba->hba_state);
657
658 /* Check to see if there are more PLOGIs to be sent */
659 if (phba->fc_flag & FC_NLP_MORE) {
660 /* go thru NPR list and issue any remaining ELS PLOGIs */
661 sentplogi = lpfc_els_disc_plogi(phba);
662 }
663 return;
664}
665
666static void
667lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
668 struct lpfc_iocbq * rspiocb)
669{
670 IOCB_t *irsp;
671 struct lpfc_sli *psli;
672 struct lpfc_nodelist *ndlp;
673 int disc, rc, did, type;
674
675 psli = &phba->sli;
676
677 /* we pass cmdiocb to state machine which needs rspiocb as well */
678 cmdiocb->context_un.rsp_iocb = rspiocb;
679
680 irsp = &rspiocb->iocb;
681 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
682 spin_lock_irq(phba->host->host_lock);
683 ndlp->nlp_flag &= ~NLP_PLOGI_SND;
684 spin_unlock_irq(phba->host->host_lock);
685
686 /* Since ndlp can be freed in the disc state machine, note if this node
687 * is being used during discovery.
688 */
689 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
690 rc = 0;
691
692 /* PLOGI completes to NPort <nlp_DID> */
693 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
694 "%d:0102 PLOGI completes to NPort x%x "
695 "Data: x%x x%x x%x x%x\n",
696 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
697 irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
698
699 /* Check to see if link went down during discovery */
700 if (lpfc_els_chk_latt(phba)) {
701 spin_lock_irq(phba->host->host_lock);
702 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
703 spin_unlock_irq(phba->host->host_lock);
704 goto out;
705 }
706
707 /* ndlp could be freed in DSM, save these values now */
708 type = ndlp->nlp_type;
709 did = ndlp->nlp_DID;
710
711 if (irsp->ulpStatus) {
712 /* Check for retry */
713 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
714 /* ELS command is being retried */
715 if (disc) {
716 spin_lock_irq(phba->host->host_lock);
717 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
718 spin_unlock_irq(phba->host->host_lock);
719 }
720 goto out;
721 }
722
723 /* PLOGI failed */
724 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
725 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
726 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
727 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
728 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
729 }
730 else {
731 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
732 NLP_EVT_CMPL_PLOGI);
733 }
734 } else {
735 /* Good status, call state machine */
736 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
737 NLP_EVT_CMPL_PLOGI);
738 }
739
740 if (type & NLP_FABRIC) {
741 /* If we cannot login to Nameserver, kick off discovery now */
742 if ((did == NameServer_DID) && (rc == NLP_STE_FREED_NODE)) {
743 lpfc_disc_start(phba);
744 }
745 goto out;
746 }
747
748 if (disc && phba->num_disc_nodes) {
749 /* Check to see if there are more PLOGIs to be sent */
750 lpfc_more_plogi(phba);
751 }
752
753 if (rc != NLP_STE_FREED_NODE) {
754 spin_lock_irq(phba->host->host_lock);
755 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
756 spin_unlock_irq(phba->host->host_lock);
757 }
758
759 if (phba->num_disc_nodes == 0) {
760 if(disc) {
761 spin_lock_irq(phba->host->host_lock);
762 phba->fc_flag &= ~FC_NDISC_ACTIVE;
763 spin_unlock_irq(phba->host->host_lock);
764 }
765 lpfc_can_disctmo(phba);
766 if (phba->fc_flag & FC_RSCN_MODE) {
767 /* Check to see if more RSCNs came in while we were
768 * processing this one.
769 */
770 if ((phba->fc_rscn_id_cnt == 0) &&
771 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
772 spin_lock_irq(phba->host->host_lock);
773 phba->fc_flag &= ~FC_RSCN_MODE;
774 spin_unlock_irq(phba->host->host_lock);
775 } else {
776 lpfc_els_handle_rscn(phba);
777 }
778 }
779 }
780
781out:
782 lpfc_els_free_iocb(phba, cmdiocb);
783 return;
784}
785
786int
787lpfc_issue_els_plogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
788 uint8_t retry)
789{
790 struct serv_parm *sp;
791 IOCB_t *icmd;
792 struct lpfc_iocbq *elsiocb;
793 struct lpfc_sli_ring *pring;
794 struct lpfc_sli *psli;
795 uint8_t *pcmd;
796 uint16_t cmdsize;
797
798 psli = &phba->sli;
799 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
800
801 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
802 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
803 ndlp, ELS_CMD_PLOGI)) == 0) {
804 return (1);
805 }
806
807 icmd = &elsiocb->iocb;
808 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
809
810 /* For PLOGI request, remainder of payload is service parameters */
811 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
812 pcmd += sizeof (uint32_t);
813 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
814 sp = (struct serv_parm *) pcmd;
815
816 if (sp->cmn.fcphLow < FC_PH_4_3)
817 sp->cmn.fcphLow = FC_PH_4_3;
818
819 if (sp->cmn.fcphHigh < FC_PH3)
820 sp->cmn.fcphHigh = FC_PH3;
821
822 phba->fc_stat.elsXmitPLOGI++;
823 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
824 spin_lock_irq(phba->host->host_lock);
825 ndlp->nlp_flag |= NLP_PLOGI_SND;
826 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
827 ndlp->nlp_flag &= ~NLP_PLOGI_SND;
828 spin_unlock_irq(phba->host->host_lock);
829 lpfc_els_free_iocb(phba, elsiocb);
830 return (1);
831 }
832 spin_unlock_irq(phba->host->host_lock);
833 return (0);
834}
835
836static void
837lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
838 struct lpfc_iocbq * rspiocb)
839{
840 IOCB_t *irsp;
841 struct lpfc_sli *psli;
842 struct lpfc_nodelist *ndlp;
843
844 psli = &phba->sli;
845 /* we pass cmdiocb to state machine which needs rspiocb as well */
846 cmdiocb->context_un.rsp_iocb = rspiocb;
847
848 irsp = &(rspiocb->iocb);
849 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
850 spin_lock_irq(phba->host->host_lock);
851 ndlp->nlp_flag &= ~NLP_PRLI_SND;
852 spin_unlock_irq(phba->host->host_lock);
853
854 /* PRLI completes to NPort <nlp_DID> */
855 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
856 "%d:0103 PRLI completes to NPort x%x "
857 "Data: x%x x%x x%x\n",
858 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
859 irsp->un.ulpWord[4], phba->num_disc_nodes);
860
861 phba->fc_prli_sent--;
862 /* Check to see if link went down during discovery */
863 if (lpfc_els_chk_latt(phba))
864 goto out;
865
866 if (irsp->ulpStatus) {
867 /* Check for retry */
868 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
869 /* ELS command is being retried */
870 goto out;
871 }
872 /* PRLI failed */
873 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
874 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
875 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
876 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
877 goto out;
878 }
879 else {
880 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
881 NLP_EVT_CMPL_PRLI);
882 }
883 } else {
884 /* Good status, call state machine */
885 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
886 }
887
888out:
889 lpfc_els_free_iocb(phba, cmdiocb);
890 return;
891}
892
893int
894lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
895 uint8_t retry)
896{
897 PRLI *npr;
898 IOCB_t *icmd;
899 struct lpfc_iocbq *elsiocb;
900 struct lpfc_sli_ring *pring;
901 struct lpfc_sli *psli;
902 uint8_t *pcmd;
903 uint16_t cmdsize;
904
905 psli = &phba->sli;
906 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
907
908 cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
909 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
910 ndlp, ELS_CMD_PRLI)) == 0) {
911 return (1);
912 }
913
914 icmd = &elsiocb->iocb;
915 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
916
917 /* For PRLI request, remainder of payload is service parameters */
918 memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
919 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
920 pcmd += sizeof (uint32_t);
921
922 /* For PRLI, remainder of payload is PRLI parameter page */
923 npr = (PRLI *) pcmd;
924 /*
925 * If our firmware version is 3.20 or later,
926 * set the following bits for FC-TAPE support.
927 */
928 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
929 npr->ConfmComplAllowed = 1;
930 npr->Retry = 1;
931 npr->TaskRetryIdReq = 1;
932 }
933 npr->estabImagePair = 1;
934 npr->readXferRdyDis = 1;
935
936 /* For FCP support */
937 npr->prliType = PRLI_FCP_TYPE;
938 npr->initiatorFunc = 1;
939
940 phba->fc_stat.elsXmitPRLI++;
941 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
942 spin_lock_irq(phba->host->host_lock);
943 ndlp->nlp_flag |= NLP_PRLI_SND;
944 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
945 ndlp->nlp_flag &= ~NLP_PRLI_SND;
946 spin_unlock_irq(phba->host->host_lock);
947 lpfc_els_free_iocb(phba, elsiocb);
948 return (1);
949 }
950 spin_unlock_irq(phba->host->host_lock);
951 phba->fc_prli_sent++;
952 return (0);
953}
954
955static void
956lpfc_more_adisc(struct lpfc_hba * phba)
957{
958 int sentadisc;
959
960 if (phba->num_disc_nodes)
961 phba->num_disc_nodes--;
962
963 /* Continue discovery with <num_disc_nodes> ADISCs to go */
964 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
965 "%d:0210 Continue discovery with %d ADISCs to go "
966 "Data: x%x x%x x%x\n",
967 phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
968 phba->fc_flag, phba->hba_state);
969
970 /* Check to see if there are more ADISCs to be sent */
971 if (phba->fc_flag & FC_NLP_MORE) {
972 lpfc_set_disctmo(phba);
973
974 /* go thru NPR list and issue any remaining ELS ADISCs */
975 sentadisc = lpfc_els_disc_adisc(phba);
976 }
977 return;
978}
979
980static void
981lpfc_rscn_disc(struct lpfc_hba * phba)
982{
983 /* RSCN discovery */
984 /* go thru NPR list and issue ELS PLOGIs */
985 if (phba->fc_npr_cnt) {
986 if (lpfc_els_disc_plogi(phba))
987 return;
988 }
989 if (phba->fc_flag & FC_RSCN_MODE) {
990 /* Check to see if more RSCNs came in while we were
991 * processing this one.
992 */
993 if ((phba->fc_rscn_id_cnt == 0) &&
994 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
995 spin_lock_irq(phba->host->host_lock);
996 phba->fc_flag &= ~FC_RSCN_MODE;
997 spin_unlock_irq(phba->host->host_lock);
998 } else {
999 lpfc_els_handle_rscn(phba);
1000 }
1001 }
1002}
1003
1004static void
1005lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1006 struct lpfc_iocbq * rspiocb)
1007{
1008 IOCB_t *irsp;
1009 struct lpfc_sli *psli;
1010 struct lpfc_nodelist *ndlp;
1011 LPFC_MBOXQ_t *mbox;
1012 int disc, rc;
1013
1014 psli = &phba->sli;
1015
1016 /* we pass cmdiocb to state machine which needs rspiocb as well */
1017 cmdiocb->context_un.rsp_iocb = rspiocb;
1018
1019 irsp = &(rspiocb->iocb);
1020 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1021 spin_lock_irq(phba->host->host_lock);
1022 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1023 spin_unlock_irq(phba->host->host_lock);
1024
1025 /* Since ndlp can be freed in the disc state machine, note if this node
1026 * is being used during discovery.
1027 */
1028 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1029
1030 /* ADISC completes to NPort <nlp_DID> */
1031 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1032 "%d:0104 ADISC completes to NPort x%x "
1033 "Data: x%x x%x x%x x%x\n",
1034 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
1035 irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
1036
1037 /* Check to see if link went down during discovery */
1038 if (lpfc_els_chk_latt(phba)) {
1039 spin_lock_irq(phba->host->host_lock);
1040 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1041 spin_unlock_irq(phba->host->host_lock);
1042 goto out;
1043 }
1044
1045 if (irsp->ulpStatus) {
1046 /* Check for retry */
1047 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1048 /* ELS command is being retried */
1049 if (disc) {
1050 spin_lock_irq(phba->host->host_lock);
1051 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1052 spin_unlock_irq(phba->host->host_lock);
1053 lpfc_set_disctmo(phba);
1054 }
1055 goto out;
1056 }
1057 /* ADISC failed */
1058 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1059 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1060 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1061 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1062 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1063 }
1064 else {
1065 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1066 NLP_EVT_CMPL_ADISC);
1067 }
1068 } else {
1069 /* Good status, call state machine */
1070 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1071 NLP_EVT_CMPL_ADISC);
1072 }
1073
1074 if (disc && phba->num_disc_nodes) {
1075 /* Check to see if there are more ADISCs to be sent */
1076 lpfc_more_adisc(phba);
1077
1078 /* Check to see if we are done with ADISC authentication */
1079 if (phba->num_disc_nodes == 0) {
1080 lpfc_can_disctmo(phba);
1081 /* If we get here, there is nothing left to wait for */
1082 if ((phba->hba_state < LPFC_HBA_READY) &&
1083 (phba->hba_state != LPFC_CLEAR_LA)) {
1084 /* Link up discovery */
1085 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
1086 GFP_KERNEL))) {
1087 phba->hba_state = LPFC_CLEAR_LA;
1088 lpfc_clear_la(phba, mbox);
1089 mbox->mbox_cmpl =
1090 lpfc_mbx_cmpl_clear_la;
1091 rc = lpfc_sli_issue_mbox
1092 (phba, mbox,
1093 (MBX_NOWAIT | MBX_STOP_IOCB));
1094 if (rc == MBX_NOT_FINISHED) {
1095 mempool_free(mbox,
1096 phba->mbox_mem_pool);
1097 lpfc_disc_flush_list(phba);
1098 psli->ring[(psli->ip_ring)].
1099 flag &=
1100 ~LPFC_STOP_IOCB_EVENT;
1101 psli->ring[(psli->fcp_ring)].
1102 flag &=
1103 ~LPFC_STOP_IOCB_EVENT;
1104 psli->ring[(psli->next_ring)].
1105 flag &=
1106 ~LPFC_STOP_IOCB_EVENT;
1107 phba->hba_state =
1108 LPFC_HBA_READY;
1109 }
1110 }
1111 } else {
1112 lpfc_rscn_disc(phba);
1113 }
1114 }
1115 }
1116 spin_lock_irq(phba->host->host_lock);
1117 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1118 spin_unlock_irq(phba->host->host_lock);
1119out:
1120 lpfc_els_free_iocb(phba, cmdiocb);
1121 return;
1122}
1123
1124int
1125lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1126 uint8_t retry)
1127{
1128 ADISC *ap;
1129 IOCB_t *icmd;
1130 struct lpfc_iocbq *elsiocb;
1131 struct lpfc_sli_ring *pring;
1132 struct lpfc_sli *psli;
1133 uint8_t *pcmd;
1134 uint16_t cmdsize;
1135
1136 psli = &phba->sli;
1137 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1138
1139 cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
1140 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
1141 ndlp, ELS_CMD_ADISC)) == 0) {
1142 return (1);
1143 }
1144
1145 icmd = &elsiocb->iocb;
1146 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1147
1148 /* For ADISC request, remainder of payload is service parameters */
1149 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1150 pcmd += sizeof (uint32_t);
1151
1152 /* Fill in ADISC payload */
1153 ap = (ADISC *) pcmd;
1154 ap->hardAL_PA = phba->fc_pref_ALPA;
1155 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
1156 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1157 ap->DID = be32_to_cpu(phba->fc_myDID);
1158
1159 phba->fc_stat.elsXmitADISC++;
1160 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1161 spin_lock_irq(phba->host->host_lock);
1162 ndlp->nlp_flag |= NLP_ADISC_SND;
1163 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1164 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1165 spin_unlock_irq(phba->host->host_lock);
1166 lpfc_els_free_iocb(phba, elsiocb);
1167 return (1);
1168 }
1169 spin_unlock_irq(phba->host->host_lock);
1170 return (0);
1171}
1172
1173static void
1174lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1175 struct lpfc_iocbq * rspiocb)
1176{
1177 IOCB_t *irsp;
1178 struct lpfc_sli *psli;
1179 struct lpfc_nodelist *ndlp;
1180
1181 psli = &phba->sli;
1182 /* we pass cmdiocb to state machine which needs rspiocb as well */
1183 cmdiocb->context_un.rsp_iocb = rspiocb;
1184
1185 irsp = &(rspiocb->iocb);
1186 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1187 spin_lock_irq(phba->host->host_lock);
1188 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1189 spin_unlock_irq(phba->host->host_lock);
1190
1191 /* LOGO completes to NPort <nlp_DID> */
1192 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1193 "%d:0105 LOGO completes to NPort x%x "
1194 "Data: x%x x%x x%x\n",
1195 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
1196 irsp->un.ulpWord[4], phba->num_disc_nodes);
1197
1198 /* Check to see if link went down during discovery */
1199 if (lpfc_els_chk_latt(phba))
1200 goto out;
1201
1202 if (irsp->ulpStatus) {
1203 /* Check for retry */
1204 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1205 /* ELS command is being retried */
1206 goto out;
1207 }
1208 /* LOGO failed */
1209 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1210 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1211 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1212 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1213 goto out;
1214 }
1215 else {
1216 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1217 NLP_EVT_CMPL_LOGO);
1218 }
1219 } else {
1220 /* Good status, call state machine */
1221 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
1222
1223 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1224 lpfc_unreg_rpi(phba, ndlp);
1225 }
1226 }
1227
1228out:
1229 lpfc_els_free_iocb(phba, cmdiocb);
1230 return;
1231}
1232
1233int
1234lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1235 uint8_t retry)
1236{
1237 IOCB_t *icmd;
1238 struct lpfc_iocbq *elsiocb;
1239 struct lpfc_sli_ring *pring;
1240 struct lpfc_sli *psli;
1241 uint8_t *pcmd;
1242 uint16_t cmdsize;
1243
1244 psli = &phba->sli;
1245 pring = &psli->ring[LPFC_ELS_RING];
1246
1247 cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
1248 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
1249 ndlp, ELS_CMD_LOGO)) == 0) {
1250 return (1);
1251 }
1252
1253 icmd = &elsiocb->iocb;
1254 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1255 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1256 pcmd += sizeof (uint32_t);
1257
1258 /* Fill in LOGO payload */
1259 *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
1260 pcmd += sizeof (uint32_t);
1261 memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
1262
1263 phba->fc_stat.elsXmitLOGO++;
1264 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1265 spin_lock_irq(phba->host->host_lock);
1266 ndlp->nlp_flag |= NLP_LOGO_SND;
1267 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1268 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1269 spin_unlock_irq(phba->host->host_lock);
1270 lpfc_els_free_iocb(phba, elsiocb);
1271 return (1);
1272 }
1273 spin_unlock_irq(phba->host->host_lock);
1274 return (0);
1275}
1276
1277static void
1278lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1279 struct lpfc_iocbq * rspiocb)
1280{
1281 IOCB_t *irsp;
1282
1283 irsp = &rspiocb->iocb;
1284
1285 /* ELS cmd tag <ulpIoTag> completes */
1286 lpfc_printf_log(phba,
1287 KERN_INFO,
1288 LOG_ELS,
1289 "%d:0106 ELS cmd tag x%x completes Data: x%x x%x\n",
1290 phba->brd_no,
1291 irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4]);
1292
1293 /* Check to see if link went down during discovery */
1294 lpfc_els_chk_latt(phba);
1295 lpfc_els_free_iocb(phba, cmdiocb);
1296 return;
1297}
1298
1299int
1300lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1301{
1302 IOCB_t *icmd;
1303 struct lpfc_iocbq *elsiocb;
1304 struct lpfc_sli_ring *pring;
1305 struct lpfc_sli *psli;
1306 uint8_t *pcmd;
1307 uint16_t cmdsize;
1308 struct lpfc_nodelist *ndlp;
1309
1310 psli = &phba->sli;
1311 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1312 cmdsize = (sizeof (uint32_t) + sizeof (SCR));
1313 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL)) == 0) {
1314 return (1);
1315 }
1316
1317 lpfc_nlp_init(phba, ndlp, nportid);
1318
1319 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
1320 ndlp, ELS_CMD_SCR)) == 0) {
1321 mempool_free( ndlp, phba->nlp_mem_pool);
1322 return (1);
1323 }
1324
1325 icmd = &elsiocb->iocb;
1326 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1327
1328 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1329 pcmd += sizeof (uint32_t);
1330
1331 /* For SCR, remainder of payload is SCR parameter page */
1332 memset(pcmd, 0, sizeof (SCR));
1333 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1334
1335 phba->fc_stat.elsXmitSCR++;
1336 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1337 spin_lock_irq(phba->host->host_lock);
1338 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1339 spin_unlock_irq(phba->host->host_lock);
1340 mempool_free( ndlp, phba->nlp_mem_pool);
1341 lpfc_els_free_iocb(phba, elsiocb);
1342 return (1);
1343 }
1344 spin_unlock_irq(phba->host->host_lock);
1345 mempool_free( ndlp, phba->nlp_mem_pool);
1346 return (0);
1347}
1348
1349static int
1350lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1351{
1352 IOCB_t *icmd;
1353 struct lpfc_iocbq *elsiocb;
1354 struct lpfc_sli_ring *pring;
1355 struct lpfc_sli *psli;
1356 FARP *fp;
1357 uint8_t *pcmd;
1358 uint32_t *lp;
1359 uint16_t cmdsize;
1360 struct lpfc_nodelist *ondlp;
1361 struct lpfc_nodelist *ndlp;
1362
1363 psli = &phba->sli;
1364 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1365 cmdsize = (sizeof (uint32_t) + sizeof (FARP));
1366 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL)) == 0) {
1367 return (1);
1368 }
1369 lpfc_nlp_init(phba, ndlp, nportid);
1370
1371 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
1372 ndlp, ELS_CMD_RNID)) == 0) {
1373 mempool_free( ndlp, phba->nlp_mem_pool);
1374 return (1);
1375 }
1376
1377 icmd = &elsiocb->iocb;
1378 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1379
1380 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1381 pcmd += sizeof (uint32_t);
1382
1383 /* Fill in FARPR payload */
1384 fp = (FARP *) (pcmd);
1385 memset(fp, 0, sizeof (FARP));
1386 lp = (uint32_t *) pcmd;
1387 *lp++ = be32_to_cpu(nportid);
1388 *lp++ = be32_to_cpu(phba->fc_myDID);
1389 fp->Rflags = 0;
1390 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1391
1392 memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
1393 memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1394 if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) {
1395 memcpy(&fp->OportName, &ondlp->nlp_portname,
1396 sizeof (struct lpfc_name));
1397 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1398 sizeof (struct lpfc_name));
1399 }
1400
1401 phba->fc_stat.elsXmitFARPR++;
1402 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1403 spin_lock_irq(phba->host->host_lock);
1404 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1405 spin_unlock_irq(phba->host->host_lock);
1406 mempool_free( ndlp, phba->nlp_mem_pool);
1407 lpfc_els_free_iocb(phba, elsiocb);
1408 return (1);
1409 }
1410 spin_unlock_irq(phba->host->host_lock);
1411 mempool_free( ndlp, phba->nlp_mem_pool);
1412 return (0);
1413}
1414
1415void
1416lpfc_els_retry_delay(unsigned long ptr)
1417{
1418 struct lpfc_nodelist *ndlp;
1419 struct lpfc_hba *phba;
1420 unsigned long iflag;
1421 struct lpfc_work_evt *evtp;
1422
1423 ndlp = (struct lpfc_nodelist *)ptr;
1424 phba = ndlp->nlp_phba;
1425 evtp = &ndlp->els_retry_evt;
1426
1427 spin_lock_irqsave(phba->host->host_lock, iflag);
1428 if (!list_empty(&evtp->evt_listp)) {
1429 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1430 return;
1431 }
1432
1433 evtp->evt_arg1 = ndlp;
1434 evtp->evt = LPFC_EVT_ELS_RETRY;
1435 list_add_tail(&evtp->evt_listp, &phba->work_list);
1436 if (phba->work_wait)
1437 wake_up(phba->work_wait);
1438
1439 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1440 return;
1441}
1442
1443void
1444lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1445{
1446 struct lpfc_hba *phba;
1447 uint32_t cmd;
1448 uint32_t did;
1449 uint8_t retry;
1450
1451 phba = ndlp->nlp_phba;
1452 spin_lock_irq(phba->host->host_lock);
1453 did = (uint32_t) (ndlp->nlp_DID);
1454 cmd = (uint32_t) (ndlp->nlp_last_elscmd);
1455
1456 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1457 spin_unlock_irq(phba->host->host_lock);
1458 return;
1459 }
1460
1461 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1462 spin_unlock_irq(phba->host->host_lock);
1463 retry = ndlp->nlp_retry;
1464
1465 switch (cmd) {
1466 case ELS_CMD_FLOGI:
1467 lpfc_issue_els_flogi(phba, ndlp, retry);
1468 break;
1469 case ELS_CMD_PLOGI:
1470 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1471 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1472 lpfc_issue_els_plogi(phba, ndlp, retry);
1473 break;
1474 case ELS_CMD_ADISC:
1475 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1476 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1477 lpfc_issue_els_adisc(phba, ndlp, retry);
1478 break;
1479 case ELS_CMD_PRLI:
1480 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1481 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1482 lpfc_issue_els_prli(phba, ndlp, retry);
1483 break;
1484 case ELS_CMD_LOGO:
1485 ndlp->nlp_state = NLP_STE_NPR_NODE;
1486 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1487 lpfc_issue_els_logo(phba, ndlp, retry);
1488 break;
1489 }
1490 return;
1491}
1492
1493static int
1494lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1495 struct lpfc_iocbq * rspiocb)
1496{
1497 IOCB_t *irsp;
1498 struct lpfc_dmabuf *pcmd;
1499 struct lpfc_nodelist *ndlp;
1500 uint32_t *elscmd;
1501 struct ls_rjt stat;
1502 int retry, maxretry;
1503 int delay;
1504 uint32_t cmd;
1505
1506 retry = 0;
1507 delay = 0;
1508 maxretry = lpfc_max_els_tries;
1509 irsp = &rspiocb->iocb;
1510 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1511 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1512 cmd = 0;
1513 /* Note: context2 may be 0 for internal driver abort
1514 * of delays ELS command.
1515 */
1516
1517 if (pcmd && pcmd->virt) {
1518 elscmd = (uint32_t *) (pcmd->virt);
1519 cmd = *elscmd++;
1520 }
1521
1522 switch (irsp->ulpStatus) {
1523 case IOSTAT_FCP_RSP_ERROR:
1524 case IOSTAT_REMOTE_STOP:
1525 break;
1526
1527 case IOSTAT_LOCAL_REJECT:
1528 switch ((irsp->un.ulpWord[4] & 0xff)) {
1529 case IOERR_LOOP_OPEN_FAILURE:
1530 if (cmd == ELS_CMD_PLOGI) {
1531 if (cmdiocb->retry == 0) {
1532 delay = 1;
1533 }
1534 }
1535 retry = 1;
1536 break;
1537
1538 case IOERR_SEQUENCE_TIMEOUT:
1539 retry = 1;
1540 if ((cmd == ELS_CMD_FLOGI)
1541 && (phba->fc_topology != TOPOLOGY_LOOP)) {
1542 delay = 1;
1543 maxretry = 48;
1544 }
1545 break;
1546
1547 case IOERR_NO_RESOURCES:
1548 if (cmd == ELS_CMD_PLOGI) {
1549 delay = 1;
1550 }
1551 retry = 1;
1552 break;
1553
1554 case IOERR_INVALID_RPI:
1555 retry = 1;
1556 break;
1557 }
1558 break;
1559
1560 case IOSTAT_NPORT_RJT:
1561 case IOSTAT_FABRIC_RJT:
1562 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
1563 retry = 1;
1564 break;
1565 }
1566 break;
1567
1568 case IOSTAT_NPORT_BSY:
1569 case IOSTAT_FABRIC_BSY:
1570 retry = 1;
1571 break;
1572
1573 case IOSTAT_LS_RJT:
1574 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
1575 /* Added for Vendor specifc support
1576 * Just keep retrying for these Rsn / Exp codes
1577 */
1578 switch (stat.un.b.lsRjtRsnCode) {
1579 case LSRJT_UNABLE_TPC:
1580 if (stat.un.b.lsRjtRsnCodeExp ==
1581 LSEXP_CMD_IN_PROGRESS) {
1582 if (cmd == ELS_CMD_PLOGI) {
1583 delay = 1;
1584 maxretry = 48;
1585 }
1586 retry = 1;
1587 break;
1588 }
1589 if (cmd == ELS_CMD_PLOGI) {
1590 delay = 1;
1591 maxretry = lpfc_max_els_tries + 1;
1592 retry = 1;
1593 break;
1594 }
1595 break;
1596
1597 case LSRJT_LOGICAL_BSY:
1598 if (cmd == ELS_CMD_PLOGI) {
1599 delay = 1;
1600 maxretry = 48;
1601 }
1602 retry = 1;
1603 break;
1604 }
1605 break;
1606
1607 case IOSTAT_INTERMED_RSP:
1608 case IOSTAT_BA_RJT:
1609 break;
1610
1611 default:
1612 break;
1613 }
1614
1615 if (ndlp->nlp_DID == FDMI_DID) {
1616 retry = 1;
1617 }
1618
1619 if ((++cmdiocb->retry) >= maxretry) {
1620 phba->fc_stat.elsRetryExceeded++;
1621 retry = 0;
1622 }
1623
1624 if (retry) {
1625
1626 /* Retry ELS command <elsCmd> to remote NPORT <did> */
1627 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1628 "%d:0107 Retry ELS command x%x to remote "
1629 "NPORT x%x Data: x%x x%x\n",
1630 phba->brd_no,
1631 cmd, ndlp->nlp_DID, cmdiocb->retry, delay);
1632
1633 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
1634 /* If discovery / RSCN timer is running, reset it */
1635 if (timer_pending(&phba->fc_disctmo) ||
1636 (phba->fc_flag & FC_RSCN_MODE)) {
1637 lpfc_set_disctmo(phba);
1638 }
1639 }
1640
1641 phba->fc_stat.elsXmitRetry++;
1642 if (delay) {
1643 phba->fc_stat.elsDelayRetry++;
1644 ndlp->nlp_retry = cmdiocb->retry;
1645
1646 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1647 ndlp->nlp_flag |= NLP_DELAY_TMO;
1648
1649 ndlp->nlp_state = NLP_STE_NPR_NODE;
1650 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1651 ndlp->nlp_last_elscmd = cmd;
1652
1653 return (1);
1654 }
1655 switch (cmd) {
1656 case ELS_CMD_FLOGI:
1657 lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
1658 return (1);
1659 case ELS_CMD_PLOGI:
1660 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1661 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1662 lpfc_issue_els_plogi(phba, ndlp, cmdiocb->retry);
1663 return (1);
1664 case ELS_CMD_ADISC:
1665 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1666 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1667 lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
1668 return (1);
1669 case ELS_CMD_PRLI:
1670 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1671 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1672 lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
1673 return (1);
1674 case ELS_CMD_LOGO:
1675 ndlp->nlp_state = NLP_STE_NPR_NODE;
1676 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1677 lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
1678 return (1);
1679 }
1680 }
1681
1682 /* No retry ELS command <elsCmd> to remote NPORT <did> */
1683 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1684 "%d:0108 No retry ELS command x%x to remote NPORT x%x "
1685 "Data: x%x x%x\n",
1686 phba->brd_no,
1687 cmd, ndlp->nlp_DID, cmdiocb->retry, ndlp->nlp_flag);
1688
1689 return (0);
1690}
1691
1692int
1693lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
1694{
1695 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
1696
1697 /* context2 = cmd, context2->next = rsp, context3 = bpl */
1698 if (elsiocb->context2) {
1699 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
1700 /* Free the response before processing the command. */
1701 if (!list_empty(&buf_ptr1->list)) {
1702 list_remove_head(&buf_ptr1->list, buf_ptr,
1703 struct lpfc_dmabuf,
1704 list);
1705 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1706 kfree(buf_ptr);
1707 }
1708 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
1709 kfree(buf_ptr1);
1710 }
1711
1712 if (elsiocb->context3) {
1713 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
1714 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1715 kfree(buf_ptr);
1716 }
1717 spin_lock_irq(phba->host->host_lock);
1718 list_add_tail(&elsiocb->list, &phba->lpfc_iocb_list);
1719 spin_unlock_irq(phba->host->host_lock);
1720 return 0;
1721}
1722
1723static void
1724lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1725 struct lpfc_iocbq * rspiocb)
1726{
1727 struct lpfc_nodelist *ndlp;
1728
1729 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1730
1731 /* ACC to LOGO completes to NPort <nlp_DID> */
1732 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1733 "%d:0109 ACC to LOGO completes to NPort x%x "
1734 "Data: x%x x%x x%x\n",
1735 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1736 ndlp->nlp_state, ndlp->nlp_rpi);
1737
1738 spin_lock_irq(phba->host->host_lock);
1739 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
1740 spin_unlock_irq(phba->host->host_lock);
1741
1742 switch (ndlp->nlp_state) {
1743 case NLP_STE_UNUSED_NODE: /* node is just allocated */
1744 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1745 break;
1746 case NLP_STE_NPR_NODE: /* NPort Recovery mode */
1747 lpfc_unreg_rpi(phba, ndlp);
1748 break;
1749 default:
1750 break;
1751 }
1752 lpfc_els_free_iocb(phba, cmdiocb);
1753 return;
1754}
1755
1756static void
1757lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1758 struct lpfc_iocbq * rspiocb)
1759{
1760 struct lpfc_nodelist *ndlp;
1761 LPFC_MBOXQ_t *mbox = NULL;
1762
1763 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1764 if (cmdiocb->context_un.mbox)
1765 mbox = cmdiocb->context_un.mbox;
1766
1767
1768 /* Check to see if link went down during discovery */
1769 if ((lpfc_els_chk_latt(phba)) || !ndlp) {
1770 if (mbox) {
1771 mempool_free( mbox, phba->mbox_mem_pool);
1772 }
1773 goto out;
1774 }
1775
1776 /* ELS response tag <ulpIoTag> completes */
1777 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1778 "%d:0110 ELS response tag x%x completes "
1779 "Data: x%x x%x x%x x%x x%x x%x\n",
1780 phba->brd_no,
1781 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
1782 rspiocb->iocb.un.ulpWord[4], ndlp->nlp_DID,
1783 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1784
1785 if (mbox) {
1786 if ((rspiocb->iocb.ulpStatus == 0)
1787 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1788 /* set_slim mailbox command needs to execute first,
1789 * queue this command to be processed later.
1790 */
1791 lpfc_unreg_rpi(phba, ndlp);
1792 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1793 mbox->context2 = ndlp;
1794 ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE;
1795 lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
1796 if (lpfc_sli_issue_mbox(phba, mbox,
1797 (MBX_NOWAIT | MBX_STOP_IOCB))
1798 != MBX_NOT_FINISHED) {
1799 goto out;
1800 }
1801 /* NOTE: we should have messages for unsuccessful
1802 reglogin */
1803 mempool_free( mbox, phba->mbox_mem_pool);
1804 } else {
1805 mempool_free( mbox, phba->mbox_mem_pool);
1806 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1807 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1808 }
1809 }
1810 }
1811out:
1812 if (ndlp) {
1813 spin_lock_irq(phba->host->host_lock);
1814 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
1815 spin_unlock_irq(phba->host->host_lock);
1816 }
1817 lpfc_els_free_iocb(phba, cmdiocb);
1818 return;
1819}
1820
1821int
1822lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1823 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
1824 LPFC_MBOXQ_t * mbox, uint8_t newnode)
1825{
1826 IOCB_t *icmd;
1827 IOCB_t *oldcmd;
1828 struct lpfc_iocbq *elsiocb;
1829 struct lpfc_sli_ring *pring;
1830 struct lpfc_sli *psli;
1831 uint8_t *pcmd;
1832 uint16_t cmdsize;
1833 int rc;
1834
1835 psli = &phba->sli;
1836 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1837 oldcmd = &oldiocb->iocb;
1838
1839 switch (flag) {
1840 case ELS_CMD_ACC:
1841 cmdsize = sizeof (uint32_t);
1842 if ((elsiocb =
1843 lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1844 ndlp, ELS_CMD_ACC)) == 0) {
1845 return (1);
1846 }
1847 icmd = &elsiocb->iocb;
1848 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1849 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1850 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1851 pcmd += sizeof (uint32_t);
1852 break;
1853 case ELS_CMD_PLOGI:
1854 cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
1855 if ((elsiocb =
1856 lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1857 ndlp, ELS_CMD_ACC)) == 0) {
1858 return (1);
1859 }
1860 icmd = &elsiocb->iocb;
1861 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1862 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1863
1864 if (mbox)
1865 elsiocb->context_un.mbox = mbox;
1866
1867 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1868 pcmd += sizeof (uint32_t);
1869 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
1870 break;
1871 default:
1872 return (1);
1873 }
1874
1875 if (newnode)
1876 elsiocb->context1 = NULL;
1877
1878 /* Xmit ELS ACC response tag <ulpIoTag> */
1879 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1880 "%d:0128 Xmit ELS ACC response tag x%x "
1881 "Data: x%x x%x x%x x%x x%x\n",
1882 phba->brd_no,
1883 elsiocb->iocb.ulpIoTag,
1884 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1885 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1886
1887 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
1888 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
1889 } else {
1890 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
1891 }
1892
1893 phba->fc_stat.elsXmitACC++;
1894 spin_lock_irq(phba->host->host_lock);
1895 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1896 spin_unlock_irq(phba->host->host_lock);
1897 if (rc == IOCB_ERROR) {
1898 lpfc_els_free_iocb(phba, elsiocb);
1899 return (1);
1900 }
1901 return (0);
1902}
1903
1904int
1905lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
1906 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
1907{
1908 IOCB_t *icmd;
1909 IOCB_t *oldcmd;
1910 struct lpfc_iocbq *elsiocb;
1911 struct lpfc_sli_ring *pring;
1912 struct lpfc_sli *psli;
1913 uint8_t *pcmd;
1914 uint16_t cmdsize;
1915 int rc;
1916
1917 psli = &phba->sli;
1918 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1919
1920 cmdsize = 2 * sizeof (uint32_t);
1921 if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1922 ndlp, ELS_CMD_LS_RJT)) == 0) {
1923 return (1);
1924 }
1925
1926 icmd = &elsiocb->iocb;
1927 oldcmd = &oldiocb->iocb;
1928 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1929 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1930
1931 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
1932 pcmd += sizeof (uint32_t);
1933 *((uint32_t *) (pcmd)) = rejectError;
1934
1935 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
1936 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1937 "%d:0129 Xmit ELS RJT x%x response tag x%x "
1938 "Data: x%x x%x x%x x%x x%x\n",
1939 phba->brd_no,
1940 rejectError, elsiocb->iocb.ulpIoTag,
1941 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1942 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1943
1944 phba->fc_stat.elsXmitLSRJT++;
1945 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
1946 spin_lock_irq(phba->host->host_lock);
1947 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1948 spin_unlock_irq(phba->host->host_lock);
1949 if (rc == IOCB_ERROR) {
1950 lpfc_els_free_iocb(phba, elsiocb);
1951 return (1);
1952 }
1953 return (0);
1954}
1955
1956int
1957lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
1958 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
1959{
1960 ADISC *ap;
1961 IOCB_t *icmd;
1962 IOCB_t *oldcmd;
1963 struct lpfc_iocbq *elsiocb;
1964 struct lpfc_sli_ring *pring;
1965 struct lpfc_sli *psli;
1966 uint8_t *pcmd;
1967 uint16_t cmdsize;
1968 int rc;
1969
1970 psli = &phba->sli;
1971 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1972
1973 cmdsize = sizeof (uint32_t) + sizeof (ADISC);
1974 if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1975 ndlp, ELS_CMD_ACC)) == 0) {
1976 return (1);
1977 }
1978
1979 /* Xmit ADISC ACC response tag <ulpIoTag> */
1980 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1981 "%d:0130 Xmit ADISC ACC response tag x%x "
1982 "Data: x%x x%x x%x x%x x%x\n",
1983 phba->brd_no,
1984 elsiocb->iocb.ulpIoTag,
1985 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1986 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1987
1988 icmd = &elsiocb->iocb;
1989 oldcmd = &oldiocb->iocb;
1990 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1991 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1992
1993 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1994 pcmd += sizeof (uint32_t);
1995
1996 ap = (ADISC *) (pcmd);
1997 ap->hardAL_PA = phba->fc_pref_ALPA;
1998 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
1999 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
2000 ap->DID = be32_to_cpu(phba->fc_myDID);
2001
2002 phba->fc_stat.elsXmitACC++;
2003 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2004 spin_lock_irq(phba->host->host_lock);
2005 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2006 spin_unlock_irq(phba->host->host_lock);
2007 if (rc == IOCB_ERROR) {
2008 lpfc_els_free_iocb(phba, elsiocb);
2009 return (1);
2010 }
2011 return (0);
2012}
2013
2014int
2015lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
2016 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2017{
2018 PRLI *npr;
2019 lpfc_vpd_t *vpd;
2020 IOCB_t *icmd;
2021 IOCB_t *oldcmd;
2022 struct lpfc_iocbq *elsiocb;
2023 struct lpfc_sli_ring *pring;
2024 struct lpfc_sli *psli;
2025 uint8_t *pcmd;
2026 uint16_t cmdsize;
2027 int rc;
2028
2029 psli = &phba->sli;
2030 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2031
2032 cmdsize = sizeof (uint32_t) + sizeof (PRLI);
2033 if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2034 ndlp,
2035 (ELS_CMD_ACC |
2036 (ELS_CMD_PRLI & ~ELS_RSP_MASK)))) ==
2037 0) {
2038 return (1);
2039 }
2040
2041 /* Xmit PRLI ACC response tag <ulpIoTag> */
2042 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2043 "%d:0131 Xmit PRLI ACC response tag x%x "
2044 "Data: x%x x%x x%x x%x x%x\n",
2045 phba->brd_no,
2046 elsiocb->iocb.ulpIoTag,
2047 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2048 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2049
2050 icmd = &elsiocb->iocb;
2051 oldcmd = &oldiocb->iocb;
2052 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2053 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2054
2055 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2056 pcmd += sizeof (uint32_t);
2057
2058 /* For PRLI, remainder of payload is PRLI parameter page */
2059 memset(pcmd, 0, sizeof (PRLI));
2060
2061 npr = (PRLI *) pcmd;
2062 vpd = &phba->vpd;
2063 /*
2064 * If our firmware version is 3.20 or later,
2065 * set the following bits for FC-TAPE support.
2066 */
2067 if (vpd->rev.feaLevelHigh >= 0x02) {
2068 npr->ConfmComplAllowed = 1;
2069 npr->Retry = 1;
2070 npr->TaskRetryIdReq = 1;
2071 }
2072
2073 npr->acceptRspCode = PRLI_REQ_EXECUTED;
2074 npr->estabImagePair = 1;
2075 npr->readXferRdyDis = 1;
2076 npr->ConfmComplAllowed = 1;
2077
2078 npr->prliType = PRLI_FCP_TYPE;
2079 npr->initiatorFunc = 1;
2080
2081 phba->fc_stat.elsXmitACC++;
2082 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2083
2084 spin_lock_irq(phba->host->host_lock);
2085 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2086 spin_unlock_irq(phba->host->host_lock);
2087 if (rc == IOCB_ERROR) {
2088 lpfc_els_free_iocb(phba, elsiocb);
2089 return (1);
2090 }
2091 return (0);
2092}
2093
2094static int
2095lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
2096 uint8_t format,
2097 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2098{
2099 RNID *rn;
2100 IOCB_t *icmd;
2101 IOCB_t *oldcmd;
2102 struct lpfc_iocbq *elsiocb;
2103 struct lpfc_sli_ring *pring;
2104 struct lpfc_sli *psli;
2105 uint8_t *pcmd;
2106 uint16_t cmdsize;
2107 int rc;
2108
2109 psli = &phba->sli;
2110 pring = &psli->ring[LPFC_ELS_RING];
2111
2112 cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
2113 + (2 * sizeof (struct lpfc_name));
2114 if (format)
2115 cmdsize += sizeof (RNID_TOP_DISC);
2116
2117 if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2118 ndlp, ELS_CMD_ACC)) == 0) {
2119 return (1);
2120 }
2121
2122 /* Xmit RNID ACC response tag <ulpIoTag> */
2123 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2124 "%d:0132 Xmit RNID ACC response tag x%x "
2125 "Data: x%x\n",
2126 phba->brd_no,
2127 elsiocb->iocb.ulpIoTag,
2128 elsiocb->iocb.ulpContext);
2129
2130 icmd = &elsiocb->iocb;
2131 oldcmd = &oldiocb->iocb;
2132 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2133 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2134
2135 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2136 pcmd += sizeof (uint32_t);
2137
2138 memset(pcmd, 0, sizeof (RNID));
2139 rn = (RNID *) (pcmd);
2140 rn->Format = format;
2141 rn->CommonLen = (2 * sizeof (struct lpfc_name));
2142 memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
2143 memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
2144 switch (format) {
2145 case 0:
2146 rn->SpecificLen = 0;
2147 break;
2148 case RNID_TOPOLOGY_DISC:
2149 rn->SpecificLen = sizeof (RNID_TOP_DISC);
2150 memcpy(&rn->un.topologyDisc.portName,
2151 &phba->fc_portname, sizeof (struct lpfc_name));
2152 rn->un.topologyDisc.unitType = RNID_HBA;
2153 rn->un.topologyDisc.physPort = 0;
2154 rn->un.topologyDisc.attachedNodes = 0;
2155 break;
2156 default:
2157 rn->CommonLen = 0;
2158 rn->SpecificLen = 0;
2159 break;
2160 }
2161
2162 phba->fc_stat.elsXmitACC++;
2163 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2164 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2165 * it could be freed */
2166
2167 spin_lock_irq(phba->host->host_lock);
2168 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2169 spin_unlock_irq(phba->host->host_lock);
2170 if (rc == IOCB_ERROR) {
2171 lpfc_els_free_iocb(phba, elsiocb);
2172 return (1);
2173 }
2174 return (0);
2175}
2176
2177int
2178lpfc_els_disc_adisc(struct lpfc_hba * phba)
2179{
2180 int sentadisc;
2181 struct lpfc_nodelist *ndlp, *next_ndlp;
2182
2183 sentadisc = 0;
2184 /* go thru NPR list and issue any remaining ELS ADISCs */
2185 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2186 nlp_listp) {
2187 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2188 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2189 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2190 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
2191 lpfc_nlp_list(phba, ndlp,
2192 NLP_ADISC_LIST);
2193 lpfc_issue_els_adisc(phba, ndlp, 0);
2194 sentadisc++;
2195 phba->num_disc_nodes++;
2196 if (phba->num_disc_nodes >=
2197 phba->cfg_discovery_threads) {
2198 spin_lock_irq(phba->host->host_lock);
2199 phba->fc_flag |= FC_NLP_MORE;
2200 spin_unlock_irq(phba->host->host_lock);
2201 break;
2202 }
2203 }
2204 }
2205 }
2206 if (sentadisc == 0) {
2207 spin_lock_irq(phba->host->host_lock);
2208 phba->fc_flag &= ~FC_NLP_MORE;
2209 spin_unlock_irq(phba->host->host_lock);
2210 }
2211 return(sentadisc);
2212}
2213
2214int
2215lpfc_els_disc_plogi(struct lpfc_hba * phba)
2216{
2217 int sentplogi;
2218 struct lpfc_nodelist *ndlp, *next_ndlp;
2219
2220 sentplogi = 0;
2221 /* go thru NPR list and issue any remaining ELS PLOGIs */
2222 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2223 nlp_listp) {
2224 if ((ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
2225 (!(ndlp->nlp_flag & NLP_DELAY_TMO))) {
2226 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2227 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2228 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2229 lpfc_issue_els_plogi(phba, ndlp, 0);
2230 sentplogi++;
2231 phba->num_disc_nodes++;
2232 if (phba->num_disc_nodes >=
2233 phba->cfg_discovery_threads) {
2234 spin_lock_irq(phba->host->host_lock);
2235 phba->fc_flag |= FC_NLP_MORE;
2236 spin_unlock_irq(phba->host->host_lock);
2237 break;
2238 }
2239 }
2240 }
2241 }
2242 if (sentplogi == 0) {
2243 spin_lock_irq(phba->host->host_lock);
2244 phba->fc_flag &= ~FC_NLP_MORE;
2245 spin_unlock_irq(phba->host->host_lock);
2246 }
2247 return(sentplogi);
2248}
2249
2250int
2251lpfc_els_flush_rscn(struct lpfc_hba * phba)
2252{
2253 struct lpfc_dmabuf *mp;
2254 int i;
2255
2256 for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
2257 mp = phba->fc_rscn_id_list[i];
2258 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2259 kfree(mp);
2260 phba->fc_rscn_id_list[i] = NULL;
2261 }
2262 phba->fc_rscn_id_cnt = 0;
2263 spin_lock_irq(phba->host->host_lock);
2264 phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2265 spin_unlock_irq(phba->host->host_lock);
2266 lpfc_can_disctmo(phba);
2267 return (0);
2268}
2269
2270int
2271lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
2272{
2273 D_ID ns_did;
2274 D_ID rscn_did;
2275 struct lpfc_dmabuf *mp;
2276 uint32_t *lp;
2277 uint32_t payload_len, cmd, i, match;
2278
2279 ns_did.un.word = did;
2280 match = 0;
2281
2282 /* Never match fabric nodes for RSCNs */
2283 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2284 return(0);
2285
2286 /* If we are doing a FULL RSCN rediscovery, match everything */
2287 if (phba->fc_flag & FC_RSCN_DISCOVERY) {
2288 return (did);
2289 }
2290
2291 for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
2292 mp = phba->fc_rscn_id_list[i];
2293 lp = (uint32_t *) mp->virt;
2294 cmd = *lp++;
2295 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2296 payload_len -= sizeof (uint32_t); /* take off word 0 */
2297 while (payload_len) {
2298 rscn_did.un.word = *lp++;
2299 rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
2300 payload_len -= sizeof (uint32_t);
2301 switch (rscn_did.un.b.resv) {
2302 case 0: /* Single N_Port ID effected */
2303 if (ns_did.un.word == rscn_did.un.word) {
2304 match = did;
2305 }
2306 break;
2307 case 1: /* Whole N_Port Area effected */
2308 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
2309 && (ns_did.un.b.area == rscn_did.un.b.area))
2310 {
2311 match = did;
2312 }
2313 break;
2314 case 2: /* Whole N_Port Domain effected */
2315 if (ns_did.un.b.domain == rscn_did.un.b.domain)
2316 {
2317 match = did;
2318 }
2319 break;
2320 case 3: /* Whole Fabric effected */
2321 match = did;
2322 break;
2323 default:
2324 /* Unknown Identifier in RSCN list */
2325 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2326 "%d:0217 Unknown Identifier in "
2327 "RSCN payload Data: x%x\n",
2328 phba->brd_no, rscn_did.un.word);
2329 break;
2330 }
2331 if (match) {
2332 break;
2333 }
2334 }
2335 }
2336 return (match);
2337}
2338
2339static int
2340lpfc_rscn_recovery_check(struct lpfc_hba * phba)
2341{
2342 struct lpfc_nodelist *ndlp = NULL, *next_ndlp;
2343 struct list_head *listp;
2344 struct list_head *node_list[7];
2345 int i;
2346
2347 /* Look at all nodes effected by pending RSCNs and move
2348 * them to NPR list.
2349 */
2350 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
2351 node_list[1] = &phba->fc_nlpmap_list;
2352 node_list[2] = &phba->fc_nlpunmap_list;
2353 node_list[3] = &phba->fc_prli_list;
2354 node_list[4] = &phba->fc_reglogin_list;
2355 node_list[5] = &phba->fc_adisc_list;
2356 node_list[6] = &phba->fc_plogi_list;
2357 for (i = 0; i < 7; i++) {
2358 listp = node_list[i];
2359 if (list_empty(listp))
2360 continue;
2361
2362 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
2363 if (!(lpfc_rscn_payload_check(phba, ndlp->nlp_DID)))
2364 continue;
2365
2366 lpfc_disc_state_machine(phba, ndlp, NULL,
2367 NLP_EVT_DEVICE_RECOVERY);
2368 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2369 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2370 del_timer_sync(&ndlp->nlp_delayfunc);
2371 if (!list_empty(&ndlp->
2372 els_retry_evt.evt_listp))
2373 list_del_init(&ndlp->
2374 els_retry_evt.evt_listp);
2375 }
2376 }
2377 }
2378 return (0);
2379}
2380
2381static int
2382lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2383 struct lpfc_iocbq * cmdiocb,
2384 struct lpfc_nodelist * ndlp, uint8_t newnode)
2385{
2386 struct lpfc_dmabuf *pcmd;
2387 uint32_t *lp;
2388 IOCB_t *icmd;
2389 uint32_t payload_len, cmd;
2390
2391 icmd = &cmdiocb->iocb;
2392 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2393 lp = (uint32_t *) pcmd->virt;
2394
2395 cmd = *lp++;
2396 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2397 payload_len -= sizeof (uint32_t); /* take off word 0 */
2398 cmd &= ELS_CMD_MASK;
2399
2400 /* RSCN received */
2401 lpfc_printf_log(phba,
2402 KERN_INFO,
2403 LOG_DISCOVERY,
2404 "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
2405 phba->brd_no,
2406 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2407
2408 /* If we are about to begin discovery, just ACC the RSCN.
2409 * Discovery processing will satisfy it.
2410 */
2411 if (phba->hba_state < LPFC_NS_QRY) {
2412 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2413 newnode);
2414 return (0);
2415 }
2416
2417 /* If we are already processing an RSCN, save the received
2418 * RSCN payload buffer, cmdiocb->context2 to process later.
2419 */
2420 if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
2421 if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
2422 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
2423 spin_lock_irq(phba->host->host_lock);
2424 phba->fc_flag |= FC_RSCN_MODE;
2425 spin_unlock_irq(phba->host->host_lock);
2426 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
2427
2428 /* If we zero, cmdiocb->context2, the calling
2429 * routine will not try to free it.
2430 */
2431 cmdiocb->context2 = NULL;
2432
2433 /* Deferred RSCN */
2434 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2435 "%d:0235 Deferred RSCN "
2436 "Data: x%x x%x x%x\n",
2437 phba->brd_no, phba->fc_rscn_id_cnt,
2438 phba->fc_flag, phba->hba_state);
2439 } else {
2440 spin_lock_irq(phba->host->host_lock);
2441 phba->fc_flag |= FC_RSCN_DISCOVERY;
2442 spin_unlock_irq(phba->host->host_lock);
2443 /* ReDiscovery RSCN */
2444 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2445 "%d:0234 ReDiscovery RSCN "
2446 "Data: x%x x%x x%x\n",
2447 phba->brd_no, phba->fc_rscn_id_cnt,
2448 phba->fc_flag, phba->hba_state);
2449 }
2450 /* Send back ACC */
2451 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2452 newnode);
2453
2454 /* send RECOVERY event for ALL nodes that match RSCN payload */
2455 lpfc_rscn_recovery_check(phba);
2456 return (0);
2457 }
2458
2459 phba->fc_flag |= FC_RSCN_MODE;
2460 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
2461 /*
2462 * If we zero, cmdiocb->context2, the calling routine will
2463 * not try to free it.
2464 */
2465 cmdiocb->context2 = NULL;
2466
2467 lpfc_set_disctmo(phba);
2468
2469 /* Send back ACC */
2470 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
2471
2472 /* send RECOVERY event for ALL nodes that match RSCN payload */
2473 lpfc_rscn_recovery_check(phba);
2474
2475 return (lpfc_els_handle_rscn(phba));
2476}
2477
2478int
2479lpfc_els_handle_rscn(struct lpfc_hba * phba)
2480{
2481 struct lpfc_nodelist *ndlp;
2482
2483 /* Start timer for RSCN processing */
2484 lpfc_set_disctmo(phba);
2485
2486 /* RSCN processed */
2487 lpfc_printf_log(phba,
2488 KERN_INFO,
2489 LOG_DISCOVERY,
2490 "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
2491 phba->brd_no,
2492 phba->fc_flag, 0, phba->fc_rscn_id_cnt,
2493 phba->hba_state);
2494
2495 /* To process RSCN, first compare RSCN data with NameServer */
2496 phba->fc_ns_retry = 0;
2497 if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2498 NameServer_DID))) {
2499 /* Good ndlp, issue CT Request to NameServer */
2500 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
2501 /* Wait for NameServer query cmpl before we can
2502 continue */
2503 return (1);
2504 }
2505 } else {
2506 /* If login to NameServer does not exist, issue one */
2507 /* Good status, issue PLOGI to NameServer */
2508 if ((ndlp =
2509 lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID))) {
2510 /* Wait for NameServer login cmpl before we can
2511 continue */
2512 return (1);
2513 }
2514 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
2515 == 0) {
2516 lpfc_els_flush_rscn(phba);
2517 return (0);
2518 } else {
2519 lpfc_nlp_init(phba, ndlp, NameServer_DID);
2520 ndlp->nlp_type |= NLP_FABRIC;
2521 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2522 lpfc_issue_els_plogi(phba, ndlp, 0);
2523 /* Wait for NameServer login cmpl before we can
2524 continue */
2525 return (1);
2526 }
2527 }
2528
2529 lpfc_els_flush_rscn(phba);
2530 return (0);
2531}
2532
2533static int
2534lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2535 struct lpfc_iocbq * cmdiocb,
2536 struct lpfc_nodelist * ndlp, uint8_t newnode)
2537{
2538 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2539 uint32_t *lp = (uint32_t *) pcmd->virt;
2540 IOCB_t *icmd = &cmdiocb->iocb;
2541 struct serv_parm *sp;
2542 LPFC_MBOXQ_t *mbox;
2543 struct ls_rjt stat;
2544 uint32_t cmd, did;
2545 int rc;
2546
2547 cmd = *lp++;
2548 sp = (struct serv_parm *) lp;
2549
2550 /* FLOGI received */
2551
2552 lpfc_set_disctmo(phba);
2553
2554 if (phba->fc_topology == TOPOLOGY_LOOP) {
2555 /* We should never receive a FLOGI in loop mode, ignore it */
2556 did = icmd->un.elsreq64.remoteID;
2557
2558 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
2559 Loop Mode */
2560 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
2561 "%d:0113 An FLOGI ELS command x%x was received "
2562 "from DID x%x in Loop Mode\n",
2563 phba->brd_no, cmd, did);
2564 return (1);
2565 }
2566
2567 did = Fabric_DID;
2568
2569 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
2570 /* For a FLOGI we accept, then if our portname is greater
2571 * then the remote portname we initiate Nport login.
2572 */
2573
2574 rc = memcmp(&phba->fc_portname, &sp->portName,
2575 sizeof (struct lpfc_name));
2576
2577 if (!rc) {
2578 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
2579 GFP_KERNEL)) == 0) {
2580 return (1);
2581 }
2582 lpfc_linkdown(phba);
2583 lpfc_init_link(phba, mbox,
2584 phba->cfg_topology,
2585 phba->cfg_link_speed);
2586 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2587 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2588 rc = lpfc_sli_issue_mbox
2589 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
2590 if (rc == MBX_NOT_FINISHED) {
2591 mempool_free( mbox, phba->mbox_mem_pool);
2592 }
2593 return (1);
2594 }
2595 else if (rc > 0) { /* greater than */
2596 spin_lock_irq(phba->host->host_lock);
2597 phba->fc_flag |= FC_PT2PT_PLOGI;
2598 spin_unlock_irq(phba->host->host_lock);
2599 }
2600 phba->fc_flag |= FC_PT2PT;
2601 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2602 } else {
2603 /* Reject this request because invalid parameters */
2604 stat.un.b.lsRjtRsvd0 = 0;
2605 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2606 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
2607 stat.un.b.vendorUnique = 0;
2608 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2609 return (1);
2610 }
2611
2612 /* Send back ACC */
2613 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
2614
2615 return (0);
2616}
2617
2618static int
2619lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2620 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2621{
2622 struct lpfc_dmabuf *pcmd;
2623 uint32_t *lp;
2624 IOCB_t *icmd;
2625 RNID *rn;
2626 struct ls_rjt stat;
2627 uint32_t cmd, did;
2628
2629 icmd = &cmdiocb->iocb;
2630 did = icmd->un.elsreq64.remoteID;
2631 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2632 lp = (uint32_t *) pcmd->virt;
2633
2634 cmd = *lp++;
2635 rn = (RNID *) lp;
2636
2637 /* RNID received */
2638
2639 switch (rn->Format) {
2640 case 0:
2641 case RNID_TOPOLOGY_DISC:
2642 /* Send back ACC */
2643 lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
2644 break;
2645 default:
2646 /* Reject this request because format not supported */
2647 stat.un.b.lsRjtRsvd0 = 0;
2648 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2649 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2650 stat.un.b.vendorUnique = 0;
2651 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2652 }
2653 return (0);
2654}
2655
2656static int
2657lpfc_els_rcv_rrq(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2658 struct lpfc_nodelist * ndlp)
2659{
2660 struct lpfc_dmabuf *pcmd;
2661 uint32_t *lp;
2662 IOCB_t *icmd;
2663 struct lpfc_sli_ring *pring;
2664 struct lpfc_sli *psli;
2665 RRQ *rrq;
2666 uint32_t cmd, did;
2667
2668 psli = &phba->sli;
2669 pring = &psli->ring[LPFC_FCP_RING];
2670 icmd = &cmdiocb->iocb;
2671 did = icmd->un.elsreq64.remoteID;
2672 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2673 lp = (uint32_t *) pcmd->virt;
2674
2675 cmd = *lp++;
2676 rrq = (RRQ *) lp;
2677
2678 /* RRQ received */
2679 /* Get oxid / rxid from payload and abort it */
2680 spin_lock_irq(phba->host->host_lock);
2681 if ((rrq->SID == be32_to_cpu(phba->fc_myDID))) {
2682 lpfc_sli_abort_iocb(phba, pring, 0, 0, rrq->Oxid,
2683 LPFC_CTX_CTX);
2684 } else {
2685 lpfc_sli_abort_iocb(phba, pring, 0, 0, rrq->Rxid,
2686 LPFC_CTX_CTX);
2687 }
2688
2689 spin_unlock_irq(phba->host->host_lock);
2690 /* ACCEPT the rrq request */
2691 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
2692
2693 return 0;
2694}
2695
2696static int
2697lpfc_els_rcv_farp(struct lpfc_hba * phba,
2698 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2699{
2700 struct lpfc_dmabuf *pcmd;
2701 uint32_t *lp;
2702 IOCB_t *icmd;
2703 FARP *fp;
2704 uint32_t cmd, cnt, did;
2705
2706 icmd = &cmdiocb->iocb;
2707 did = icmd->un.elsreq64.remoteID;
2708 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2709 lp = (uint32_t *) pcmd->virt;
2710
2711 cmd = *lp++;
2712 fp = (FARP *) lp;
2713
2714 /* FARP-REQ received from DID <did> */
2715 lpfc_printf_log(phba,
2716 KERN_INFO,
2717 LOG_IP,
2718 "%d:0601 FARP-REQ received from DID x%x\n",
2719 phba->brd_no, did);
2720
2721 /* We will only support match on WWPN or WWNN */
2722 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
2723 return (0);
2724 }
2725
2726 cnt = 0;
2727 /* If this FARP command is searching for my portname */
2728 if (fp->Mflags & FARP_MATCH_PORT) {
2729 if (memcmp(&fp->RportName, &phba->fc_portname,
2730 sizeof (struct lpfc_name)) == 0)
2731 cnt = 1;
2732 }
2733
2734 /* If this FARP command is searching for my nodename */
2735 if (fp->Mflags & FARP_MATCH_NODE) {
2736 if (memcmp(&fp->RnodeName, &phba->fc_nodename,
2737 sizeof (struct lpfc_name)) == 0)
2738 cnt = 1;
2739 }
2740
2741 if (cnt) {
2742 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
2743 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
2744 /* Log back into the node before sending the FARP. */
2745 if (fp->Rflags & FARP_REQUEST_PLOGI) {
2746 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2747 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2748 lpfc_issue_els_plogi(phba, ndlp, 0);
2749 }
2750
2751 /* Send a FARP response to that node */
2752 if (fp->Rflags & FARP_REQUEST_FARPR) {
2753 lpfc_issue_els_farpr(phba, did, 0);
2754 }
2755 }
2756 }
2757 return (0);
2758}
2759
2760static int
2761lpfc_els_rcv_farpr(struct lpfc_hba * phba,
2762 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2763{
2764 struct lpfc_dmabuf *pcmd;
2765 uint32_t *lp;
2766 IOCB_t *icmd;
2767 uint32_t cmd, did;
2768
2769 icmd = &cmdiocb->iocb;
2770 did = icmd->un.elsreq64.remoteID;
2771 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2772 lp = (uint32_t *) pcmd->virt;
2773
2774 cmd = *lp++;
2775 /* FARP-RSP received from DID <did> */
2776 lpfc_printf_log(phba,
2777 KERN_INFO,
2778 LOG_IP,
2779 "%d:0600 FARP-RSP received from DID x%x\n",
2780 phba->brd_no, did);
2781
2782 /* ACCEPT the Farp resp request */
2783 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
2784
2785 return 0;
2786}
2787
2788static int
2789lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2790 struct lpfc_nodelist * ndlp)
2791{
2792 struct lpfc_dmabuf *pcmd;
2793 uint32_t *lp;
2794 IOCB_t *icmd;
2795 FAN *fp;
2796 uint32_t cmd, did;
2797
2798 icmd = &cmdiocb->iocb;
2799 did = icmd->un.elsreq64.remoteID;
2800 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2801 lp = (uint32_t *) pcmd->virt;
2802
2803 cmd = *lp++;
2804 fp = (FAN *) lp;
2805
2806 /* FAN received */
2807
2808 /* ACCEPT the FAN request */
2809 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
2810
2811 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
2812 /* The discovery state machine needs to take a different
2813 * action if this node has switched fabrics
2814 */
2815 if ((memcmp(&fp->FportName, &phba->fc_fabparam.portName,
2816 sizeof (struct lpfc_name)) != 0)
2817 ||
2818 (memcmp(&fp->FnodeName, &phba->fc_fabparam.nodeName,
2819 sizeof (struct lpfc_name)) != 0)) {
2820 /* This node has switched fabrics. An FLOGI is required
2821 * after the timeout
2822 */
2823 return (0);
2824 }
2825
2826 /* Start discovery */
2827 lpfc_disc_start(phba);
2828 }
2829
2830 return (0);
2831}
2832
2833void
2834lpfc_els_timeout(unsigned long ptr)
2835{
2836 struct lpfc_hba *phba;
2837 unsigned long iflag;
2838
2839 phba = (struct lpfc_hba *)ptr;
2840 if (phba == 0)
2841 return;
2842 spin_lock_irqsave(phba->host->host_lock, iflag);
2843 if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
2844 phba->work_hba_events |= WORKER_ELS_TMO;
2845 if (phba->work_wait)
2846 wake_up(phba->work_wait);
2847 }
2848 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2849 return;
2850}
2851
2852void
2853lpfc_els_timeout_handler(struct lpfc_hba *phba)
2854{
2855 struct lpfc_sli_ring *pring;
2856 struct lpfc_iocbq *tmp_iocb, *piocb;
2857 IOCB_t *cmd = NULL;
2858 struct lpfc_dmabuf *pcmd;
2859 struct list_head *dlp;
2860 uint32_t *elscmd;
2861 uint32_t els_command;
2862 uint32_t timeout;
2863 uint32_t remote_ID;
2864
2865 if (phba == 0)
2866 return;
2867 spin_lock_irq(phba->host->host_lock);
2868 /* If the timer is already canceled do nothing */
2869 if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
2870 spin_unlock_irq(phba->host->host_lock);
2871 return;
2872 }
2873 timeout = (uint32_t)(phba->fc_ratov << 1);
2874
2875 pring = &phba->sli.ring[LPFC_ELS_RING];
2876 dlp = &pring->txcmplq;
2877
2878 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
2879 cmd = &piocb->iocb;
2880
2881 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
2882 continue;
2883 }
2884 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2885 elscmd = (uint32_t *) (pcmd->virt);
2886 els_command = *elscmd;
2887
2888 if ((els_command == ELS_CMD_FARP)
2889 || (els_command == ELS_CMD_FARPR)) {
2890 continue;
2891 }
2892
2893 if (piocb->drvrTimeout > 0) {
2894 if (piocb->drvrTimeout >= timeout) {
2895 piocb->drvrTimeout -= timeout;
2896 } else {
2897 piocb->drvrTimeout = 0;
2898 }
2899 continue;
2900 }
2901
2902 list_del(&piocb->list);
2903 pring->txcmplq_cnt--;
2904
2905 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
2906 struct lpfc_nodelist *ndlp;
2907
2908 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
2909 remote_ID = ndlp->nlp_DID;
2910 if (cmd->un.elsreq64.bdl.ulpIoTag32) {
2911 lpfc_sli_issue_abort_iotag32(phba,
2912 pring, piocb);
2913 }
2914 } else {
2915 remote_ID = cmd->un.elsreq64.remoteID;
2916 }
2917
2918 lpfc_printf_log(phba,
2919 KERN_ERR,
2920 LOG_ELS,
2921 "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
2922 phba->brd_no, els_command,
2923 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
2924
2925 /*
2926 * The iocb has timed out; abort it.
2927 */
2928 if (piocb->iocb_cmpl) {
2929 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2930 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2931 spin_unlock_irq(phba->host->host_lock);
2932 (piocb->iocb_cmpl) (phba, piocb, piocb);
2933 spin_lock_irq(phba->host->host_lock);
2934 } else {
2935 list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
2936 }
2937 }
2938 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) {
2939 phba->els_tmofunc.expires = jiffies + HZ * timeout;
2940 add_timer(&phba->els_tmofunc);
2941 }
2942 spin_unlock_irq(phba->host->host_lock);
2943}
2944
2945void
2946lpfc_els_flush_cmd(struct lpfc_hba * phba)
2947{
2948 struct lpfc_sli_ring *pring;
2949 struct lpfc_iocbq *tmp_iocb, *piocb;
2950 IOCB_t *cmd = NULL;
2951 struct lpfc_dmabuf *pcmd;
2952 uint32_t *elscmd;
2953 uint32_t els_command;
2954 uint32_t remote_ID;
2955
2956 pring = &phba->sli.ring[LPFC_ELS_RING];
2957 spin_lock_irq(phba->host->host_lock);
2958 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
2959 cmd = &piocb->iocb;
2960
2961 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
2962 continue;
2963 }
2964
2965 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
2966 if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) ||
2967 (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) ||
2968 (cmd->ulpCommand == CMD_CLOSE_XRI_CN) ||
2969 (cmd->ulpCommand == CMD_ABORT_XRI_CN)) {
2970 continue;
2971 }
2972
2973 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2974 elscmd = (uint32_t *) (pcmd->virt);
2975 els_command = *elscmd;
2976
2977 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
2978 struct lpfc_nodelist *ndlp;
2979
2980 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
2981 remote_ID = ndlp->nlp_DID;
2982 if (phba->hba_state == LPFC_HBA_READY) {
2983 continue;
2984 }
2985 } else {
2986 remote_ID = cmd->un.elsreq64.remoteID;
2987 }
2988
2989 list_del(&piocb->list);
2990 pring->txcmplq_cnt--;
2991
2992 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2993 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2994
2995 if (piocb->iocb_cmpl) {
2996 spin_unlock_irq(phba->host->host_lock);
2997 (piocb->iocb_cmpl) (phba, piocb, piocb);
2998 spin_lock_irq(phba->host->host_lock);
2999 }
3000 else
3001 list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
3002 }
3003
3004 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3005 cmd = &piocb->iocb;
3006
3007 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3008 continue;
3009 }
3010 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3011 elscmd = (uint32_t *) (pcmd->virt);
3012 els_command = *elscmd;
3013
3014 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
3015 struct lpfc_nodelist *ndlp;
3016
3017 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
3018 remote_ID = ndlp->nlp_DID;
3019 if (phba->hba_state == LPFC_HBA_READY) {
3020 continue;
3021 }
3022 } else {
3023 remote_ID = cmd->un.elsreq64.remoteID;
3024 }
3025
3026 list_del(&piocb->list);
3027 pring->txcmplq_cnt--;
3028
3029 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3030 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3031
3032 if (piocb->iocb_cmpl) {
3033 spin_unlock_irq(phba->host->host_lock);
3034 (piocb->iocb_cmpl) (phba, piocb, piocb);
3035 spin_lock_irq(phba->host->host_lock);
3036 }
3037 else
3038 list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
3039 }
3040 spin_unlock_irq(phba->host->host_lock);
3041 return;
3042}
3043
3044void
3045lpfc_els_unsol_event(struct lpfc_hba * phba,
3046 struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
3047{
3048 struct lpfc_sli *psli;
3049 struct lpfc_nodelist *ndlp;
3050 struct lpfc_dmabuf *mp;
3051 uint32_t *lp;
3052 IOCB_t *icmd;
3053 struct ls_rjt stat;
3054 uint32_t cmd;
3055 uint32_t did;
3056 uint32_t newnode;
3057 uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
3058 uint32_t rjt_err = 0;
3059
3060 psli = &phba->sli;
3061 icmd = &elsiocb->iocb;
3062
3063 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3064 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
3065 /* Not enough posted buffers; Try posting more buffers */
3066 phba->fc_stat.NoRcvBuf++;
3067 lpfc_post_buffer(phba, pring, 0, 1);
3068 return;
3069 }
3070
3071 /* If there are no BDEs associated with this IOCB,
3072 * there is nothing to do.
3073 */
3074 if (icmd->ulpBdeCount == 0)
3075 return;
3076
3077 /* type of ELS cmd is first 32bit word in packet */
3078 mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
3079 cont64[0].
3080 addrHigh,
3081 icmd->un.
3082 cont64[0].addrLow));
3083 if (mp == 0) {
3084 drop_cmd = 1;
3085 goto dropit;
3086 }
3087
3088 newnode = 0;
3089 lp = (uint32_t *) mp->virt;
3090 cmd = *lp++;
3091 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
3092
3093 if (icmd->ulpStatus) {
3094 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3095 kfree(mp);
3096 drop_cmd = 1;
3097 goto dropit;
3098 }
3099
3100 /* Check to see if link went down during discovery */
3101 if (lpfc_els_chk_latt(phba)) {
3102 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3103 kfree(mp);
3104 drop_cmd = 1;
3105 goto dropit;
3106 }
3107
3108 did = icmd->un.rcvels.remoteID;
3109 if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
3110 /* Cannot find existing Fabric ndlp, so allocate a new one */
3111 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
3112 == 0) {
3113 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3114 kfree(mp);
3115 drop_cmd = 1;
3116 goto dropit;
3117 }
3118
3119 lpfc_nlp_init(phba, ndlp, did);
3120 newnode = 1;
3121 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3122 ndlp->nlp_type |= NLP_FABRIC;
3123 }
3124 }
3125
3126 phba->fc_stat.elsRcvFrame++;
3127 elsiocb->context1 = ndlp;
3128 elsiocb->context2 = mp;
3129
3130 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
3131 cmd &= ELS_CMD_MASK;
3132 }
3133 /* ELS command <elsCmd> received from NPORT <did> */
3134 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3135 "%d:0112 ELS command x%x received from NPORT x%x "
3136 "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
3137
3138 switch (cmd) {
3139 case ELS_CMD_PLOGI:
3140 phba->fc_stat.elsRcvPLOGI++;
3141 if (phba->hba_state < LPFC_DISC_AUTH) {
3142 rjt_err = LSEXP_NOTHING_MORE;
3143 break;
3144 }
3145 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
3146 break;
3147 case ELS_CMD_FLOGI:
3148 phba->fc_stat.elsRcvFLOGI++;
3149 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3150 if (newnode) {
3151 mempool_free( ndlp, phba->nlp_mem_pool);
3152 }
3153 break;
3154 case ELS_CMD_LOGO:
3155 phba->fc_stat.elsRcvLOGO++;
3156 if (phba->hba_state < LPFC_DISC_AUTH) {
3157 rjt_err = LSEXP_NOTHING_MORE;
3158 break;
3159 }
3160 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
3161 break;
3162 case ELS_CMD_PRLO:
3163 phba->fc_stat.elsRcvPRLO++;
3164 if (phba->hba_state < LPFC_DISC_AUTH) {
3165 rjt_err = LSEXP_NOTHING_MORE;
3166 break;
3167 }
3168 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
3169 break;
3170 case ELS_CMD_RSCN:
3171 phba->fc_stat.elsRcvRSCN++;
3172 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3173 if (newnode) {
3174 mempool_free( ndlp, phba->nlp_mem_pool);
3175 }
3176 break;
3177 case ELS_CMD_ADISC:
3178 phba->fc_stat.elsRcvADISC++;
3179 if (phba->hba_state < LPFC_DISC_AUTH) {
3180 rjt_err = LSEXP_NOTHING_MORE;
3181 break;
3182 }
3183 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
3184 break;
3185 case ELS_CMD_PDISC:
3186 phba->fc_stat.elsRcvPDISC++;
3187 if (phba->hba_state < LPFC_DISC_AUTH) {
3188 rjt_err = LSEXP_NOTHING_MORE;
3189 break;
3190 }
3191 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
3192 break;
3193 case ELS_CMD_FARPR:
3194 phba->fc_stat.elsRcvFARPR++;
3195 lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
3196 break;
3197 case ELS_CMD_FARP:
3198 phba->fc_stat.elsRcvFARP++;
3199 lpfc_els_rcv_farp(phba, elsiocb, ndlp);
3200 break;
3201 case ELS_CMD_FAN:
3202 phba->fc_stat.elsRcvFAN++;
3203 lpfc_els_rcv_fan(phba, elsiocb, ndlp);
3204 break;
3205 case ELS_CMD_RRQ:
3206 phba->fc_stat.elsRcvRRQ++;
3207 lpfc_els_rcv_rrq(phba, elsiocb, ndlp);
3208 break;
3209 case ELS_CMD_PRLI:
3210 phba->fc_stat.elsRcvPRLI++;
3211 if (phba->hba_state < LPFC_DISC_AUTH) {
3212 rjt_err = LSEXP_NOTHING_MORE;
3213 break;
3214 }
3215 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
3216 break;
3217 case ELS_CMD_RNID:
3218 phba->fc_stat.elsRcvRNID++;
3219 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3220 break;
3221 default:
3222 /* Unsupported ELS command, reject */
3223 rjt_err = LSEXP_NOTHING_MORE;
3224
3225 /* Unknown ELS command <elsCmd> received from NPORT <did> */
3226 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3227 "%d:0115 Unknown ELS command x%x received from "
3228 "NPORT x%x\n", phba->brd_no, cmd, did);
3229 if (newnode) {
3230 mempool_free( ndlp, phba->nlp_mem_pool);
3231 }
3232 break;
3233 }
3234
3235 /* check if need to LS_RJT received ELS cmd */
3236 if (rjt_err) {
3237 stat.un.b.lsRjtRsvd0 = 0;
3238 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3239 stat.un.b.lsRjtRsnCodeExp = rjt_err;
3240 stat.un.b.vendorUnique = 0;
3241 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
3242 }
3243
3244 if (elsiocb->context2) {
3245 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3246 kfree(mp);
3247 }
3248dropit:
3249 /* check if need to drop received ELS cmd */
3250 if (drop_cmd == 1) {
3251 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3252 "%d:0111 Dropping received ELS cmd "
3253 "Data: x%x x%x\n", phba->brd_no,
3254 icmd->ulpStatus, icmd->un.ulpWord[4]);
3255 phba->fc_stat.elsRcvDrop++;
3256 }
3257 return;
3258}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
new file mode 100644
index 000000000000..d546206038bf
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -0,0 +1,2537 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_hbadisc.c 1.266 2005/04/13 11:59:06EDT sf_support Exp $
23 */
24
25#include <linux/blkdev.h>
26#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/interrupt.h>
29
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_transport_fc.h>
33
34#include "lpfc_hw.h"
35#include "lpfc_disc.h"
36#include "lpfc_sli.h"
37#include "lpfc_scsi.h"
38#include "lpfc.h"
39#include "lpfc_logmsg.h"
40#include "lpfc_crtn.h"
41
42/* AlpaArray for assignment of scsid for scan-down and bind_method */
43static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57};
58
59static void lpfc_disc_timeout_handler(struct lpfc_hba *);
60
61static void
62lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
63{
64 if (!(ndlp->nlp_type & NLP_FABRIC)) {
65 /* Nodev timeout on NPort <nlp_DID> */
66 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
67 "%d:0203 Nodev timeout on NPort x%x "
68 "Data: x%x x%x x%x\n",
69 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
70 ndlp->nlp_state, ndlp->nlp_rpi);
71 }
72
73 spin_lock_irq(phba->host->host_lock);
74 if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
75 spin_unlock_irq(phba->host->host_lock);
76 return;
77 }
78
79 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
80
81 if (ndlp->nlp_sid != NLP_NO_SID) {
82 /* flush the target */
83 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
84 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
85 }
86 spin_unlock_irq(phba->host->host_lock);
87
88 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
89 return;
90}
91
92static void
93lpfc_work_list_done(struct lpfc_hba * phba)
94{
95 struct lpfc_work_evt *evtp = NULL;
96 struct lpfc_nodelist *ndlp;
97 int free_evt;
98
99 spin_lock_irq(phba->host->host_lock);
100 while(!list_empty(&phba->work_list)) {
101 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
102 evt_listp);
103 spin_unlock_irq(phba->host->host_lock);
104 free_evt = 1;
105 switch(evtp->evt) {
106 case LPFC_EVT_NODEV_TMO:
107 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
108 lpfc_process_nodev_timeout(phba, ndlp);
109 free_evt = 0;
110 break;
111 case LPFC_EVT_ELS_RETRY:
112 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
113 lpfc_els_retry_delay_handler(ndlp);
114 free_evt = 0;
115 break;
116 case LPFC_EVT_ONLINE:
117 *(int *)(evtp->evt_arg1) = lpfc_online(phba);
118 complete((struct completion *)(evtp->evt_arg2));
119 break;
120 case LPFC_EVT_OFFLINE:
121 *(int *)(evtp->evt_arg1) = lpfc_offline(phba);
122 complete((struct completion *)(evtp->evt_arg2));
123 break;
124 }
125 if (free_evt)
126 kfree(evtp);
127 spin_lock_irq(phba->host->host_lock);
128 }
129 spin_unlock_irq(phba->host->host_lock);
130
131}
132
133static void
134lpfc_work_done(struct lpfc_hba * phba)
135{
136 struct lpfc_sli_ring *pring;
137 int i;
138 uint32_t ha_copy;
139 uint32_t control;
140 uint32_t work_hba_events;
141
142 spin_lock_irq(phba->host->host_lock);
143 ha_copy = phba->work_ha;
144 phba->work_ha = 0;
145 work_hba_events=phba->work_hba_events;
146 spin_unlock_irq(phba->host->host_lock);
147
148 if(ha_copy & HA_ERATT)
149 lpfc_handle_eratt(phba);
150
151 if(ha_copy & HA_MBATT)
152 lpfc_sli_handle_mb_event(phba);
153
154 if(ha_copy & HA_LATT)
155 lpfc_handle_latt(phba);
156
157 if (work_hba_events & WORKER_DISC_TMO)
158 lpfc_disc_timeout_handler(phba);
159
160 if (work_hba_events & WORKER_ELS_TMO)
161 lpfc_els_timeout_handler(phba);
162
163 if (work_hba_events & WORKER_MBOX_TMO)
164 lpfc_mbox_timeout_handler(phba);
165
166 if (work_hba_events & WORKER_FDMI_TMO)
167 lpfc_fdmi_tmo_handler(phba);
168
169 spin_lock_irq(phba->host->host_lock);
170 phba->work_hba_events &= ~work_hba_events;
171 spin_unlock_irq(phba->host->host_lock);
172
173 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
174 pring = &phba->sli.ring[i];
175 if ((ha_copy & HA_RXATT)
176 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
177 if (pring->flag & LPFC_STOP_IOCB_MASK) {
178 pring->flag |= LPFC_DEFERRED_RING_EVENT;
179 } else {
180 lpfc_sli_handle_slow_ring_event(phba, pring,
181 (ha_copy &
182 HA_RXMASK));
183 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
184 }
185 /*
186 * Turn on Ring interrupts
187 */
188 spin_lock_irq(phba->host->host_lock);
189 control = readl(phba->HCregaddr);
190 control |= (HC_R0INT_ENA << i);
191 writel(control, phba->HCregaddr);
192 readl(phba->HCregaddr); /* flush */
193 spin_unlock_irq(phba->host->host_lock);
194 }
195 }
196
197 lpfc_work_list_done (phba);
198
199}
200
201static int
202check_work_wait_done(struct lpfc_hba *phba) {
203
204 spin_lock_irq(phba->host->host_lock);
205 if (phba->work_ha ||
206 phba->work_hba_events ||
207 (!list_empty(&phba->work_list)) ||
208 kthread_should_stop()) {
209 spin_unlock_irq(phba->host->host_lock);
210 return 1;
211 } else {
212 spin_unlock_irq(phba->host->host_lock);
213 return 0;
214 }
215}
216
217int
218lpfc_do_work(void *p)
219{
220 struct lpfc_hba *phba = p;
221 int rc;
222 DECLARE_WAIT_QUEUE_HEAD(work_waitq);
223
224 set_user_nice(current, -20);
225 phba->work_wait = &work_waitq;
226
227 while (1) {
228
229 rc = wait_event_interruptible(work_waitq,
230 check_work_wait_done(phba));
231 BUG_ON(rc);
232
233 if (kthread_should_stop())
234 break;
235
236 lpfc_work_done(phba);
237
238 }
239 phba->work_wait = NULL;
240 return 0;
241}
242
243/*
244 * This is only called to handle FC worker events. Since this a rare
245 * occurance, we allocate a struct lpfc_work_evt structure here instead of
246 * embedding it in the IOCB.
247 */
248int
249lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
250 uint32_t evt)
251{
252 struct lpfc_work_evt *evtp;
253
254 /*
255 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
256 * be queued to worker thread for processing
257 */
258 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
259 if (!evtp)
260 return 0;
261
262 evtp->evt_arg1 = arg1;
263 evtp->evt_arg2 = arg2;
264 evtp->evt = evt;
265
266 list_add_tail(&evtp->evt_listp, &phba->work_list);
267 spin_lock_irq(phba->host->host_lock);
268 if (phba->work_wait)
269 wake_up(phba->work_wait);
270 spin_unlock_irq(phba->host->host_lock);
271
272 return 1;
273}
274
275int
276lpfc_linkdown(struct lpfc_hba * phba)
277{
278 struct lpfc_sli *psli;
279 struct lpfc_nodelist *ndlp, *next_ndlp;
280 struct list_head *listp;
281 struct list_head *node_list[7];
282 LPFC_MBOXQ_t *mb;
283 int rc, i;
284
285 psli = &phba->sli;
286
287 spin_lock_irq(phba->host->host_lock);
288 phba->hba_state = LPFC_LINK_DOWN;
289 spin_unlock_irq(phba->host->host_lock);
290
291 /* Clean up any firmware default rpi's */
292 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
293 lpfc_unreg_did(phba, 0xffffffff, mb);
294 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
295 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
296 == MBX_NOT_FINISHED) {
297 mempool_free( mb, phba->mbox_mem_pool);
298 }
299 }
300
301 /* Cleanup any outstanding RSCN activity */
302 lpfc_els_flush_rscn(phba);
303
304 /* Cleanup any outstanding ELS commands */
305 lpfc_els_flush_cmd(phba);
306
307 /* Issue a LINK DOWN event to all nodes */
308 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
309 node_list[1] = &phba->fc_nlpmap_list;
310 node_list[2] = &phba->fc_nlpunmap_list;
311 node_list[3] = &phba->fc_prli_list;
312 node_list[4] = &phba->fc_reglogin_list;
313 node_list[5] = &phba->fc_adisc_list;
314 node_list[6] = &phba->fc_plogi_list;
315 for (i = 0; i < 7; i++) {
316 listp = node_list[i];
317 if (list_empty(listp))
318 continue;
319
320 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
321 /* Fabric nodes are not handled thru state machine for
322 link down */
323 if (ndlp->nlp_type & NLP_FABRIC) {
324 /* Remove ALL Fabric nodes except Fabric_DID */
325 if (ndlp->nlp_DID != Fabric_DID) {
326 /* Take it off current list and free */
327 lpfc_nlp_list(phba, ndlp,
328 NLP_NO_LIST);
329 }
330 }
331 else {
332
333 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
334 NLP_EVT_DEVICE_RECOVERY);
335
336 /* Check config parameter use-adisc or FCP-2 */
337 if ((rc != NLP_STE_FREED_NODE) &&
338 (phba->cfg_use_adisc == 0) &&
339 !(ndlp->nlp_fcp_info &
340 NLP_FCP_2_DEVICE)) {
341 /* We know we will have to relogin, so
342 * unreglogin the rpi right now to fail
343 * any outstanding I/Os quickly.
344 */
345 lpfc_unreg_rpi(phba, ndlp);
346 }
347 }
348 }
349 }
350
351 /* free any ndlp's on unused list */
352 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
353 nlp_listp) {
354 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
355 }
356
357 /* Setup myDID for link up if we are in pt2pt mode */
358 if (phba->fc_flag & FC_PT2PT) {
359 phba->fc_myDID = 0;
360 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
361 lpfc_config_link(phba, mb);
362 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
363 if (lpfc_sli_issue_mbox
364 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
365 == MBX_NOT_FINISHED) {
366 mempool_free( mb, phba->mbox_mem_pool);
367 }
368 }
369 spin_lock_irq(phba->host->host_lock);
370 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
371 spin_unlock_irq(phba->host->host_lock);
372 }
373 spin_lock_irq(phba->host->host_lock);
374 phba->fc_flag &= ~FC_LBIT;
375 spin_unlock_irq(phba->host->host_lock);
376
377 /* Turn off discovery timer if its running */
378 lpfc_can_disctmo(phba);
379
380 /* Must process IOCBs on all rings to handle ABORTed I/Os */
381 return (0);
382}
383
384static int
385lpfc_linkup(struct lpfc_hba * phba)
386{
387 struct lpfc_nodelist *ndlp, *next_ndlp;
388
389 spin_lock_irq(phba->host->host_lock);
390 phba->hba_state = LPFC_LINK_UP;
391 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
392 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
393 phba->fc_flag |= FC_NDISC_ACTIVE;
394 phba->fc_ns_retry = 0;
395 spin_unlock_irq(phba->host->host_lock);
396
397
398 /*
399 * Clean up old Fabric NLP_FABRIC logins.
400 */
401 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
402 nlp_listp) {
403 if (ndlp->nlp_DID == Fabric_DID) {
404 /* Take it off current list and free */
405 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
406 }
407 }
408
409 /* free any ndlp's on unused list */
410 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
411 nlp_listp) {
412 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
413 }
414
415 return 0;
416}
417
418/*
419 * This routine handles processing a CLEAR_LA mailbox
420 * command upon completion. It is setup in the LPFC_MBOXQ
421 * as the completion routine when the command is
422 * handed off to the SLI layer.
423 */
424void
425lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
426{
427 struct lpfc_sli *psli;
428 MAILBOX_t *mb;
429 uint32_t control;
430
431 psli = &phba->sli;
432 mb = &pmb->mb;
433 /* Since we don't do discovery right now, turn these off here */
434 psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
435 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
436 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
437
438 /* Check for error */
439 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
440 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
441 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
442 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
443 "state x%x\n",
444 phba->brd_no, mb->mbxStatus, phba->hba_state);
445
446 phba->hba_state = LPFC_HBA_ERROR;
447 goto out;
448 }
449
450 if (phba->fc_flag & FC_ABORT_DISCOVERY)
451 goto out;
452
453 phba->num_disc_nodes = 0;
454 /* go thru NPR list and issue ELS PLOGIs */
455 if (phba->fc_npr_cnt) {
456 lpfc_els_disc_plogi(phba);
457 }
458
459 if(!phba->num_disc_nodes) {
460 spin_lock_irq(phba->host->host_lock);
461 phba->fc_flag &= ~FC_NDISC_ACTIVE;
462 spin_unlock_irq(phba->host->host_lock);
463 }
464
465 phba->hba_state = LPFC_HBA_READY;
466
467out:
468 /* Device Discovery completes */
469 lpfc_printf_log(phba,
470 KERN_INFO,
471 LOG_DISCOVERY,
472 "%d:0225 Device Discovery completes\n",
473 phba->brd_no);
474
475 mempool_free( pmb, phba->mbox_mem_pool);
476
477 spin_lock_irq(phba->host->host_lock);
478 phba->fc_flag &= ~FC_ABORT_DISCOVERY;
479 if (phba->fc_flag & FC_ESTABLISH_LINK) {
480 phba->fc_flag &= ~FC_ESTABLISH_LINK;
481 }
482 spin_unlock_irq(phba->host->host_lock);
483
484 del_timer_sync(&phba->fc_estabtmo);
485
486 lpfc_can_disctmo(phba);
487
488 /* turn on Link Attention interrupts */
489 spin_lock_irq(phba->host->host_lock);
490 psli->sli_flag |= LPFC_PROCESS_LA;
491 control = readl(phba->HCregaddr);
492 control |= HC_LAINT_ENA;
493 writel(control, phba->HCregaddr);
494 readl(phba->HCregaddr); /* flush */
495 spin_unlock_irq(phba->host->host_lock);
496
497 return;
498}
499
500static void
501lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
502{
503 struct lpfc_sli *psli;
504 MAILBOX_t *mb;
505
506 psli = &phba->sli;
507 mb = &pmb->mb;
508 /* Check for error */
509 if (mb->mbxStatus) {
510 /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
511 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
512 "%d:0306 CONFIG_LINK mbxStatus error x%x "
513 "HBA state x%x\n",
514 phba->brd_no, mb->mbxStatus, phba->hba_state);
515
516 lpfc_linkdown(phba);
517 phba->hba_state = LPFC_HBA_ERROR;
518 goto out;
519 }
520
521 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
522 if (phba->fc_topology == TOPOLOGY_LOOP) {
523 /* If we are public loop and L bit was set */
524 if ((phba->fc_flag & FC_PUBLIC_LOOP) &&
525 !(phba->fc_flag & FC_LBIT)) {
526 /* Need to wait for FAN - use discovery timer
527 * for timeout. hba_state is identically
528 * LPFC_LOCAL_CFG_LINK while waiting for FAN
529 */
530 lpfc_set_disctmo(phba);
531 mempool_free( pmb, phba->mbox_mem_pool);
532 return;
533 }
534 }
535
536 /* Start discovery by sending a FLOGI hba_state is identically
537 * LPFC_FLOGI while waiting for FLOGI cmpl
538 */
539 phba->hba_state = LPFC_FLOGI;
540 lpfc_set_disctmo(phba);
541 lpfc_initial_flogi(phba);
542 mempool_free( pmb, phba->mbox_mem_pool);
543 return;
544 }
545 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
546 mempool_free( pmb, phba->mbox_mem_pool);
547 return;
548 }
549
550out:
551 /* CONFIG_LINK bad hba state <hba_state> */
552 lpfc_printf_log(phba,
553 KERN_ERR,
554 LOG_DISCOVERY,
555 "%d:0200 CONFIG_LINK bad hba state x%x\n",
556 phba->brd_no, phba->hba_state);
557
558 if (phba->hba_state != LPFC_CLEAR_LA) {
559 lpfc_clear_la(phba, pmb);
560 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
561 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
562 == MBX_NOT_FINISHED) {
563 mempool_free( pmb, phba->mbox_mem_pool);
564 lpfc_disc_flush_list(phba);
565 psli->ring[(psli->ip_ring)].flag &=
566 ~LPFC_STOP_IOCB_EVENT;
567 psli->ring[(psli->fcp_ring)].flag &=
568 ~LPFC_STOP_IOCB_EVENT;
569 psli->ring[(psli->next_ring)].flag &=
570 ~LPFC_STOP_IOCB_EVENT;
571 phba->hba_state = LPFC_HBA_READY;
572 }
573 } else {
574 mempool_free( pmb, phba->mbox_mem_pool);
575 }
576 return;
577}
578
579static void
580lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
581{
582 struct lpfc_sli *psli = &phba->sli;
583 MAILBOX_t *mb = &pmb->mb;
584 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
585
586
587 /* Check for error */
588 if (mb->mbxStatus) {
589 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
590 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
591 "%d:0319 READ_SPARAM mbxStatus error x%x "
592 "hba state x%x>\n",
593 phba->brd_no, mb->mbxStatus, phba->hba_state);
594
595 lpfc_linkdown(phba);
596 phba->hba_state = LPFC_HBA_ERROR;
597 goto out;
598 }
599
600 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
601 sizeof (struct serv_parm));
602 memcpy((uint8_t *) & phba->fc_nodename,
603 (uint8_t *) & phba->fc_sparam.nodeName,
604 sizeof (struct lpfc_name));
605 memcpy((uint8_t *) & phba->fc_portname,
606 (uint8_t *) & phba->fc_sparam.portName,
607 sizeof (struct lpfc_name));
608 lpfc_mbuf_free(phba, mp->virt, mp->phys);
609 kfree(mp);
610 mempool_free( pmb, phba->mbox_mem_pool);
611 return;
612
613out:
614 pmb->context1 = NULL;
615 lpfc_mbuf_free(phba, mp->virt, mp->phys);
616 kfree(mp);
617 if (phba->hba_state != LPFC_CLEAR_LA) {
618 lpfc_clear_la(phba, pmb);
619 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
620 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
621 == MBX_NOT_FINISHED) {
622 mempool_free( pmb, phba->mbox_mem_pool);
623 lpfc_disc_flush_list(phba);
624 psli->ring[(psli->ip_ring)].flag &=
625 ~LPFC_STOP_IOCB_EVENT;
626 psli->ring[(psli->fcp_ring)].flag &=
627 ~LPFC_STOP_IOCB_EVENT;
628 psli->ring[(psli->next_ring)].flag &=
629 ~LPFC_STOP_IOCB_EVENT;
630 phba->hba_state = LPFC_HBA_READY;
631 }
632 } else {
633 mempool_free( pmb, phba->mbox_mem_pool);
634 }
635 return;
636}
637
638static void
639lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
640{
641 int i;
642 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
643 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
645
646 spin_lock_irq(phba->host->host_lock);
647 switch(la->UlnkSpeed) {
648 case LA_1GHZ_LINK:
649 phba->fc_linkspeed = LA_1GHZ_LINK;
650 break;
651 case LA_2GHZ_LINK:
652 phba->fc_linkspeed = LA_2GHZ_LINK;
653 break;
654 case LA_4GHZ_LINK:
655 phba->fc_linkspeed = LA_4GHZ_LINK;
656 break;
657 default:
658 phba->fc_linkspeed = LA_UNKNW_LINK;
659 break;
660 }
661
662 phba->fc_topology = la->topology;
663
664 if (phba->fc_topology == TOPOLOGY_LOOP) {
665 /* Get Loop Map information */
666
667 if (la->il)
668 phba->fc_flag |= FC_LBIT;
669
670 phba->fc_myDID = la->granted_AL_PA;
671 i = la->un.lilpBde64.tus.f.bdeSize;
672
673 if (i == 0) {
674 phba->alpa_map[0] = 0;
675 } else {
676 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
677 int numalpa, j, k;
678 union {
679 uint8_t pamap[16];
680 struct {
681 uint32_t wd1;
682 uint32_t wd2;
683 uint32_t wd3;
684 uint32_t wd4;
685 } pa;
686 } un;
687 numalpa = phba->alpa_map[0];
688 j = 0;
689 while (j < numalpa) {
690 memset(un.pamap, 0, 16);
691 for (k = 1; j < numalpa; k++) {
692 un.pamap[k - 1] =
693 phba->alpa_map[j + 1];
694 j++;
695 if (k == 16)
696 break;
697 }
698 /* Link Up Event ALPA map */
699 lpfc_printf_log(phba,
700 KERN_WARNING,
701 LOG_LINK_EVENT,
702 "%d:1304 Link Up Event "
703 "ALPA map Data: x%x "
704 "x%x x%x x%x\n",
705 phba->brd_no,
706 un.pa.wd1, un.pa.wd2,
707 un.pa.wd3, un.pa.wd4);
708 }
709 }
710 }
711 } else {
712 phba->fc_myDID = phba->fc_pref_DID;
713 phba->fc_flag |= FC_LBIT;
714 }
715 spin_unlock_irq(phba->host->host_lock);
716
717 lpfc_linkup(phba);
718 if (sparam_mbox) {
719 lpfc_read_sparam(phba, sparam_mbox);
720 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
721 lpfc_sli_issue_mbox(phba, sparam_mbox,
722 (MBX_NOWAIT | MBX_STOP_IOCB));
723 }
724
725 if (cfglink_mbox) {
726 phba->hba_state = LPFC_LOCAL_CFG_LINK;
727 lpfc_config_link(phba, cfglink_mbox);
728 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
729 lpfc_sli_issue_mbox(phba, cfglink_mbox,
730 (MBX_NOWAIT | MBX_STOP_IOCB));
731 }
732}
733
734static void
735lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
736 uint32_t control;
737 struct lpfc_sli *psli = &phba->sli;
738
739 lpfc_linkdown(phba);
740
741 /* turn on Link Attention interrupts - no CLEAR_LA needed */
742 spin_lock_irq(phba->host->host_lock);
743 psli->sli_flag |= LPFC_PROCESS_LA;
744 control = readl(phba->HCregaddr);
745 control |= HC_LAINT_ENA;
746 writel(control, phba->HCregaddr);
747 readl(phba->HCregaddr); /* flush */
748 spin_unlock_irq(phba->host->host_lock);
749}
750
751/*
752 * This routine handles processing a READ_LA mailbox
753 * command upon completion. It is setup in the LPFC_MBOXQ
754 * as the completion routine when the command is
755 * handed off to the SLI layer.
756 */
757void
758lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
759{
760 READ_LA_VAR *la;
761 MAILBOX_t *mb = &pmb->mb;
762 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
763
764 /* Check for error */
765 if (mb->mbxStatus) {
766 lpfc_printf_log(phba,
767 KERN_INFO,
768 LOG_LINK_EVENT,
769 "%d:1307 READ_LA mbox error x%x state x%x\n",
770 phba->brd_no,
771 mb->mbxStatus, phba->hba_state);
772 lpfc_mbx_issue_link_down(phba);
773 phba->hba_state = LPFC_HBA_ERROR;
774 goto lpfc_mbx_cmpl_read_la_free_mbuf;
775 }
776
777 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
778
779 memcpy(&phba->alpa_map[0], mp->virt, 128);
780
781 if (((phba->fc_eventTag + 1) < la->eventTag) ||
782 (phba->fc_eventTag == la->eventTag)) {
783 phba->fc_stat.LinkMultiEvent++;
784 if (la->attType == AT_LINK_UP) {
785 if (phba->fc_eventTag != 0)
786 lpfc_linkdown(phba);
787 }
788 }
789
790 phba->fc_eventTag = la->eventTag;
791
792 if (la->attType == AT_LINK_UP) {
793 phba->fc_stat.LinkUp++;
794 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
795 "%d:1303 Link Up Event x%x received "
796 "Data: x%x x%x x%x x%x\n",
797 phba->brd_no, la->eventTag, phba->fc_eventTag,
798 la->granted_AL_PA, la->UlnkSpeed,
799 phba->alpa_map[0]);
800 lpfc_mbx_process_link_up(phba, la);
801 } else {
802 phba->fc_stat.LinkDown++;
803 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
804 "%d:1305 Link Down Event x%x received "
805 "Data: x%x x%x x%x\n",
806 phba->brd_no, la->eventTag, phba->fc_eventTag,
807 phba->hba_state, phba->fc_flag);
808 lpfc_mbx_issue_link_down(phba);
809 }
810
811lpfc_mbx_cmpl_read_la_free_mbuf:
812 lpfc_mbuf_free(phba, mp->virt, mp->phys);
813 kfree(mp);
814 mempool_free(pmb, phba->mbox_mem_pool);
815 return;
816}
817
818/*
819 * This routine handles processing a REG_LOGIN mailbox
820 * command upon completion. It is setup in the LPFC_MBOXQ
821 * as the completion routine when the command is
822 * handed off to the SLI layer.
823 */
824void
825lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
826{
827 struct lpfc_sli *psli;
828 MAILBOX_t *mb;
829 struct lpfc_dmabuf *mp;
830 struct lpfc_nodelist *ndlp;
831
832 psli = &phba->sli;
833 mb = &pmb->mb;
834
835 ndlp = (struct lpfc_nodelist *) pmb->context2;
836 mp = (struct lpfc_dmabuf *) (pmb->context1);
837
838 pmb->context1 = NULL;
839
840 /* Good status, call state machine */
841 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
842 lpfc_mbuf_free(phba, mp->virt, mp->phys);
843 kfree(mp);
844 mempool_free( pmb, phba->mbox_mem_pool);
845
846 return;
847}
848
849/*
850 * This routine handles processing a Fabric REG_LOGIN mailbox
851 * command upon completion. It is setup in the LPFC_MBOXQ
852 * as the completion routine when the command is
853 * handed off to the SLI layer.
854 */
855void
856lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
857{
858 struct lpfc_sli *psli;
859 MAILBOX_t *mb;
860 struct lpfc_dmabuf *mp;
861 struct lpfc_nodelist *ndlp;
862 struct lpfc_nodelist *ndlp_fdmi;
863
864
865 psli = &phba->sli;
866 mb = &pmb->mb;
867
868 ndlp = (struct lpfc_nodelist *) pmb->context2;
869 mp = (struct lpfc_dmabuf *) (pmb->context1);
870
871 if (mb->mbxStatus) {
872 lpfc_mbuf_free(phba, mp->virt, mp->phys);
873 kfree(mp);
874 mempool_free( pmb, phba->mbox_mem_pool);
875 mempool_free( ndlp, phba->nlp_mem_pool);
876
877 /* FLOGI failed, so just use loop map to make discovery list */
878 lpfc_disc_list_loopmap(phba);
879
880 /* Start discovery */
881 lpfc_disc_start(phba);
882 return;
883 }
884
885 pmb->context1 = NULL;
886
887 if (ndlp->nlp_rpi != 0)
888 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
889 ndlp->nlp_rpi = mb->un.varWords[0];
890 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
891 ndlp->nlp_type |= NLP_FABRIC;
892 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
893 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
894
895 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
896 /* This NPort has been assigned an NPort_ID by the fabric as a
897 * result of the completed fabric login. Issue a State Change
898 * Registration (SCR) ELS request to the fabric controller
899 * (SCR_DID) so that this NPort gets RSCN events from the
900 * fabric.
901 */
902 lpfc_issue_els_scr(phba, SCR_DID, 0);
903
904 /* Allocate a new node instance. If the pool is empty, just
905 * start the discovery process and skip the Nameserver login
906 * process. This is attempted again later on. Otherwise, issue
907 * a Port Login (PLOGI) to the NameServer
908 */
909 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
910 == 0) {
911 lpfc_disc_start(phba);
912 } else {
913 lpfc_nlp_init(phba, ndlp, NameServer_DID);
914 ndlp->nlp_type |= NLP_FABRIC;
915 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
916 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
917 lpfc_issue_els_plogi(phba, ndlp, 0);
918 if (phba->cfg_fdmi_on) {
919 if ((ndlp_fdmi = mempool_alloc(
920 phba->nlp_mem_pool,
921 GFP_KERNEL))) {
922 lpfc_nlp_init(phba, ndlp_fdmi,
923 FDMI_DID);
924 ndlp_fdmi->nlp_type |= NLP_FABRIC;
925 ndlp_fdmi->nlp_state =
926 NLP_STE_PLOGI_ISSUE;
927 lpfc_issue_els_plogi(phba, ndlp_fdmi,
928 0);
929 }
930 }
931 }
932 }
933
934 lpfc_mbuf_free(phba, mp->virt, mp->phys);
935 kfree(mp);
936 mempool_free( pmb, phba->mbox_mem_pool);
937
938 return;
939}
940
941/*
942 * This routine handles processing a NameServer REG_LOGIN mailbox
943 * command upon completion. It is setup in the LPFC_MBOXQ
944 * as the completion routine when the command is
945 * handed off to the SLI layer.
946 */
947void
948lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
949{
950 struct lpfc_sli *psli;
951 MAILBOX_t *mb;
952 struct lpfc_dmabuf *mp;
953 struct lpfc_nodelist *ndlp;
954
955 psli = &phba->sli;
956 mb = &pmb->mb;
957
958 ndlp = (struct lpfc_nodelist *) pmb->context2;
959 mp = (struct lpfc_dmabuf *) (pmb->context1);
960
961 if (mb->mbxStatus) {
962 lpfc_mbuf_free(phba, mp->virt, mp->phys);
963 kfree(mp);
964 mempool_free( pmb, phba->mbox_mem_pool);
965 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
966
967 /* RegLogin failed, so just use loop map to make discovery
968 list */
969 lpfc_disc_list_loopmap(phba);
970
971 /* Start discovery */
972 lpfc_disc_start(phba);
973 return;
974 }
975
976 pmb->context1 = NULL;
977
978 if (ndlp->nlp_rpi != 0)
979 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
980 ndlp->nlp_rpi = mb->un.varWords[0];
981 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
982 ndlp->nlp_type |= NLP_FABRIC;
983 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
984 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
985
986 if (phba->hba_state < LPFC_HBA_READY) {
987 /* Link up discovery requires Fabrib registration. */
988 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
989 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
990 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
991 }
992
993 phba->fc_ns_retry = 0;
994 /* Good status, issue CT Request to NameServer */
995 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
996 /* Cannot issue NameServer Query, so finish up discovery */
997 lpfc_disc_start(phba);
998 }
999
1000 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1001 kfree(mp);
1002 mempool_free( pmb, phba->mbox_mem_pool);
1003
1004 return;
1005}
1006
1007static void
1008lpfc_register_remote_port(struct lpfc_hba * phba,
1009 struct lpfc_nodelist * ndlp)
1010{
1011 struct fc_rport *rport;
1012 struct lpfc_rport_data *rdata;
1013 struct fc_rport_identifiers rport_ids;
1014 uint64_t wwn;
1015
1016 /* Remote port has reappeared. Re-register w/ FC transport */
1017 memcpy(&wwn, &ndlp->nlp_nodename, sizeof(uint64_t));
1018 rport_ids.node_name = be64_to_cpu(wwn);
1019 memcpy(&wwn, &ndlp->nlp_portname, sizeof(uint64_t));
1020 rport_ids.port_name = be64_to_cpu(wwn);
1021 rport_ids.port_id = ndlp->nlp_DID;
1022 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1023 if (ndlp->nlp_type & NLP_FCP_TARGET)
1024 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1025 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1026 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1027
1028 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1029 if (!rport) {
1030 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1031 "Warning: fc_remote_port_add failed\n");
1032 return;
1033 }
1034
1035 /* initialize static port data */
1036 rport->maxframe_size = ndlp->nlp_maxframe;
1037 rport->supported_classes = ndlp->nlp_class_sup;
1038 if ((rport->scsi_target_id != -1) &&
1039 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1040 ndlp->nlp_sid = rport->scsi_target_id;
1041 }
1042 rdata = rport->dd_data;
1043 rdata->pnode = ndlp;
1044
1045 return;
1046}
1047
1048int
1049lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1050{
1051 enum { none, unmapped, mapped } rport_add = none, rport_del = none;
1052 struct lpfc_sli *psli;
1053
1054 psli = &phba->sli;
1055 /* Sanity check to ensure we are not moving to / from the same list */
1056 if ((nlp->nlp_flag & NLP_LIST_MASK) == list) {
1057 if (list != NLP_NO_LIST)
1058 return(0);
1059 }
1060
1061 switch(nlp->nlp_flag & NLP_LIST_MASK) {
1062 case NLP_NO_LIST: /* Not on any list */
1063 break;
1064 case NLP_UNUSED_LIST:
1065 phba->fc_unused_cnt--;
1066 list_del(&nlp->nlp_listp);
1067 break;
1068 case NLP_PLOGI_LIST:
1069 phba->fc_plogi_cnt--;
1070 list_del(&nlp->nlp_listp);
1071 break;
1072 case NLP_ADISC_LIST:
1073 phba->fc_adisc_cnt--;
1074 list_del(&nlp->nlp_listp);
1075 break;
1076 case NLP_REGLOGIN_LIST:
1077 phba->fc_reglogin_cnt--;
1078 list_del(&nlp->nlp_listp);
1079 break;
1080 case NLP_PRLI_LIST:
1081 phba->fc_prli_cnt--;
1082 list_del(&nlp->nlp_listp);
1083 break;
1084 case NLP_UNMAPPED_LIST:
1085 phba->fc_unmap_cnt--;
1086 list_del(&nlp->nlp_listp);
1087 spin_lock_irq(phba->host->host_lock);
1088 nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1089 nlp->nlp_type &= ~NLP_FC_NODE;
1090 spin_unlock_irq(phba->host->host_lock);
1091 phba->nport_event_cnt++;
1092 if (nlp->rport)
1093 rport_del = unmapped;
1094 break;
1095 case NLP_MAPPED_LIST:
1096 phba->fc_map_cnt--;
1097 list_del(&nlp->nlp_listp);
1098 phba->nport_event_cnt++;
1099 if (nlp->rport)
1100 rport_del = mapped;
1101 break;
1102 case NLP_NPR_LIST:
1103 phba->fc_npr_cnt--;
1104 list_del(&nlp->nlp_listp);
1105 /* Stop delay tmo if taking node off NPR list */
1106 if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1107 (list != NLP_NPR_LIST)) {
1108 spin_lock_irq(phba->host->host_lock);
1109 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1110 spin_unlock_irq(phba->host->host_lock);
1111 del_timer_sync(&nlp->nlp_delayfunc);
1112 if (!list_empty(&nlp->els_retry_evt.evt_listp))
1113 list_del_init(&nlp->els_retry_evt.evt_listp);
1114 }
1115 break;
1116 }
1117
1118 spin_lock_irq(phba->host->host_lock);
1119 nlp->nlp_flag &= ~NLP_LIST_MASK;
1120 spin_unlock_irq(phba->host->host_lock);
1121
1122 /* Add NPort <did> to <num> list */
1123 lpfc_printf_log(phba,
1124 KERN_INFO,
1125 LOG_NODE,
1126 "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1127 phba->brd_no,
1128 nlp->nlp_DID, list, nlp->nlp_flag);
1129
1130 switch(list) {
1131 case NLP_NO_LIST: /* No list, just remove it */
1132 lpfc_nlp_remove(phba, nlp);
1133 break;
1134 case NLP_UNUSED_LIST:
1135 spin_lock_irq(phba->host->host_lock);
1136 nlp->nlp_flag |= list;
1137 spin_unlock_irq(phba->host->host_lock);
1138 /* Put it at the end of the unused list */
1139 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1140 phba->fc_unused_cnt++;
1141 break;
1142 case NLP_PLOGI_LIST:
1143 spin_lock_irq(phba->host->host_lock);
1144 nlp->nlp_flag |= list;
1145 spin_unlock_irq(phba->host->host_lock);
1146 /* Put it at the end of the plogi list */
1147 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1148 phba->fc_plogi_cnt++;
1149 break;
1150 case NLP_ADISC_LIST:
1151 spin_lock_irq(phba->host->host_lock);
1152 nlp->nlp_flag |= list;
1153 spin_unlock_irq(phba->host->host_lock);
1154 /* Put it at the end of the adisc list */
1155 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1156 phba->fc_adisc_cnt++;
1157 break;
1158 case NLP_REGLOGIN_LIST:
1159 spin_lock_irq(phba->host->host_lock);
1160 nlp->nlp_flag |= list;
1161 spin_unlock_irq(phba->host->host_lock);
1162 /* Put it at the end of the reglogin list */
1163 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1164 phba->fc_reglogin_cnt++;
1165 break;
1166 case NLP_PRLI_LIST:
1167 spin_lock_irq(phba->host->host_lock);
1168 nlp->nlp_flag |= list;
1169 spin_unlock_irq(phba->host->host_lock);
1170 /* Put it at the end of the prli list */
1171 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1172 phba->fc_prli_cnt++;
1173 break;
1174 case NLP_UNMAPPED_LIST:
1175 rport_add = unmapped;
1176 /* ensure all vestiges of "mapped" significance are gone */
1177 nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1178 spin_lock_irq(phba->host->host_lock);
1179 nlp->nlp_flag |= list;
1180 spin_unlock_irq(phba->host->host_lock);
1181 /* Put it at the end of the unmap list */
1182 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1183 phba->fc_unmap_cnt++;
1184 phba->nport_event_cnt++;
1185 /* stop nodev tmo if running */
1186 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1187 spin_lock_irq(phba->host->host_lock);
1188 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1189 spin_unlock_irq(phba->host->host_lock);
1190 del_timer_sync(&nlp->nlp_tmofunc);
1191 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1192 list_del_init(&nlp->nodev_timeout_evt.
1193 evt_listp);
1194
1195 }
1196 nlp->nlp_type |= NLP_FC_NODE;
1197 break;
1198 case NLP_MAPPED_LIST:
1199 rport_add = mapped;
1200 spin_lock_irq(phba->host->host_lock);
1201 nlp->nlp_flag |= list;
1202 spin_unlock_irq(phba->host->host_lock);
1203 /* Put it at the end of the map list */
1204 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1205 phba->fc_map_cnt++;
1206 phba->nport_event_cnt++;
1207 /* stop nodev tmo if running */
1208 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1209 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1210 del_timer_sync(&nlp->nlp_tmofunc);
1211 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1212 list_del_init(&nlp->nodev_timeout_evt.
1213 evt_listp);
1214
1215 }
1216 break;
1217 case NLP_NPR_LIST:
1218 spin_lock_irq(phba->host->host_lock);
1219 nlp->nlp_flag |= list;
1220 spin_unlock_irq(phba->host->host_lock);
1221 /* Put it at the end of the npr list */
1222 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1223 phba->fc_npr_cnt++;
1224
1225 /*
1226 * Sanity check for Fabric entity.
1227 * Set nodev_tmo for NPR state, for Fabric use 1 sec.
1228 */
1229 if (nlp->nlp_type & NLP_FABRIC) {
1230 mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
1231 }
1232 else {
1233 mod_timer(&nlp->nlp_tmofunc,
1234 jiffies + HZ * phba->cfg_nodev_tmo);
1235 }
1236 spin_lock_irq(phba->host->host_lock);
1237 nlp->nlp_flag |= NLP_NODEV_TMO;
1238 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1239 spin_unlock_irq(phba->host->host_lock);
1240 break;
1241 case NLP_JUST_DQ:
1242 break;
1243 }
1244
1245 /*
1246 * We make all the calls into the transport after we have
1247 * moved the node between lists. This so that we don't
1248 * release the lock while in-between lists.
1249 */
1250
1251 /* Don't upcall midlayer if we're unloading */
1252 if (!(phba->fc_flag & FC_UNLOADING)) {
1253 /*
1254 * We revalidate the rport pointer as the "add" function
1255 * may have removed the remote port.
1256 */
1257 if ((rport_del != none) && nlp->rport)
1258 fc_remote_port_block(nlp->rport);
1259
1260 if (rport_add != none) {
1261 /*
1262 * Tell the fc transport about the port, if we haven't
1263 * already. If we have, and it's a scsi entity, be
1264 * sure to unblock any attached scsi devices
1265 */
1266 if (!nlp->rport)
1267 lpfc_register_remote_port(phba, nlp);
1268 else
1269 fc_remote_port_unblock(nlp->rport);
1270
1271 /*
1272 * if we added to Mapped list, but the remote port
1273 * registration failed or assigned a target id outside
1274 * our presentable range - move the node to the
1275 * Unmapped List
1276 */
1277 if ((rport_add == mapped) &&
1278 ((!nlp->rport) ||
1279 (nlp->rport->scsi_target_id == -1) ||
1280 (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
1281 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1282 spin_lock_irq(phba->host->host_lock);
1283 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1284 spin_unlock_irq(phba->host->host_lock);
1285 lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
1286 }
1287 }
1288 }
1289 return (0);
1290}
1291
1292/*
1293 * Start / ReStart rescue timer for Discovery / RSCN handling
1294 */
1295void
1296lpfc_set_disctmo(struct lpfc_hba * phba)
1297{
1298 uint32_t tmo;
1299
1300 tmo = ((phba->fc_ratov * 2) + 1);
1301
1302 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1303 spin_lock_irq(phba->host->host_lock);
1304 phba->fc_flag |= FC_DISC_TMO;
1305 spin_unlock_irq(phba->host->host_lock);
1306
1307 /* Start Discovery Timer state <hba_state> */
1308 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1309 "%d:0247 Start Discovery Timer state x%x "
1310 "Data: x%x x%lx x%x x%x\n",
1311 phba->brd_no,
1312 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1313 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1314
1315 return;
1316}
1317
1318/*
1319 * Cancel rescue timer for Discovery / RSCN handling
1320 */
1321int
1322lpfc_can_disctmo(struct lpfc_hba * phba)
1323{
1324 /* Turn off discovery timer if its running */
1325 if (phba->fc_flag & FC_DISC_TMO) {
1326 spin_lock_irq(phba->host->host_lock);
1327 phba->fc_flag &= ~FC_DISC_TMO;
1328 spin_unlock_irq(phba->host->host_lock);
1329 del_timer_sync(&phba->fc_disctmo);
1330 phba->work_hba_events &= ~WORKER_DISC_TMO;
1331 }
1332
1333 /* Cancel Discovery Timer state <hba_state> */
1334 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1335 "%d:0248 Cancel Discovery Timer state x%x "
1336 "Data: x%x x%x x%x\n",
1337 phba->brd_no, phba->hba_state, phba->fc_flag,
1338 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1339
1340 return (0);
1341}
1342
1343/*
1344 * Check specified ring for outstanding IOCB on the SLI queue
1345 * Return true if iocb matches the specified nport
1346 */
1347int
1348lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1349 struct lpfc_sli_ring * pring,
1350 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1351{
1352 struct lpfc_sli *psli;
1353 IOCB_t *icmd;
1354
1355 psli = &phba->sli;
1356 icmd = &iocb->iocb;
1357 if (pring->ringno == LPFC_ELS_RING) {
1358 switch (icmd->ulpCommand) {
1359 case CMD_GEN_REQUEST64_CR:
1360 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1361 return (1);
1362 case CMD_ELS_REQUEST64_CR:
1363 case CMD_XMIT_ELS_RSP64_CX:
1364 if (iocb->context1 == (uint8_t *) ndlp)
1365 return (1);
1366 }
1367 } else if (pring->ringno == psli->ip_ring) {
1368
1369 } else if (pring->ringno == psli->fcp_ring) {
1370 /* Skip match check if waiting to relogin to FCP target */
1371 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1372 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1373 return (0);
1374 }
1375 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1376 return (1);
1377 }
1378 } else if (pring->ringno == psli->next_ring) {
1379
1380 }
1381 return (0);
1382}
1383
1384/*
1385 * Free resources / clean up outstanding I/Os
1386 * associated with nlp_rpi in the LPFC_NODELIST entry.
1387 */
1388static int
1389lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1390{
1391 struct lpfc_sli *psli;
1392 struct lpfc_sli_ring *pring;
1393 struct lpfc_iocbq *iocb, *next_iocb;
1394 IOCB_t *icmd;
1395 uint32_t rpi, i;
1396
1397 /*
1398 * Everything that matches on txcmplq will be returned
1399 * by firmware with a no rpi error.
1400 */
1401 psli = &phba->sli;
1402 rpi = ndlp->nlp_rpi;
1403 if (rpi) {
1404 /* Now process each ring */
1405 for (i = 0; i < psli->num_rings; i++) {
1406 pring = &psli->ring[i];
1407
1408 spin_lock_irq(phba->host->host_lock);
1409 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1410 list) {
1411 /*
1412 * Check to see if iocb matches the nport we are
1413 * looking for
1414 */
1415 if ((lpfc_check_sli_ndlp
1416 (phba, pring, iocb, ndlp))) {
1417 /* It matches, so deque and call compl
1418 with an error */
1419 list_del(&iocb->list);
1420 pring->txq_cnt--;
1421 if (iocb->iocb_cmpl) {
1422 icmd = &iocb->iocb;
1423 icmd->ulpStatus =
1424 IOSTAT_LOCAL_REJECT;
1425 icmd->un.ulpWord[4] =
1426 IOERR_SLI_ABORTED;
1427 spin_unlock_irq(phba->host->
1428 host_lock);
1429 (iocb->iocb_cmpl) (phba,
1430 iocb, iocb);
1431 spin_lock_irq(phba->host->
1432 host_lock);
1433 } else {
1434 list_add_tail(&iocb->list,
1435 &phba->lpfc_iocb_list);
1436 }
1437 }
1438 }
1439 spin_unlock_irq(phba->host->host_lock);
1440
1441 }
1442 }
1443 return (0);
1444}
1445
1446/*
1447 * Free rpi associated with LPFC_NODELIST entry.
1448 * This routine is called from lpfc_freenode(), when we are removing
1449 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1450 * LOGO that completes successfully, and we are waiting to PLOGI back
1451 * to the remote NPort. In addition, it is called after we receive
1452 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1453 * we are waiting to PLOGI back to the remote NPort.
1454 */
1455int
1456lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1457{
1458 LPFC_MBOXQ_t *mbox;
1459 int rc;
1460
1461 if (ndlp->nlp_rpi) {
1462 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1463 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1464 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1465 rc = lpfc_sli_issue_mbox
1466 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1467 if (rc == MBX_NOT_FINISHED)
1468 mempool_free( mbox, phba->mbox_mem_pool);
1469 }
1470 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1471 lpfc_no_rpi(phba, ndlp);
1472 ndlp->nlp_rpi = 0;
1473 return 1;
1474 }
1475 return 0;
1476}
1477
1478/*
1479 * Free resources associated with LPFC_NODELIST entry
1480 * so it can be freed.
1481 */
1482static int
1483lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1484{
1485 LPFC_MBOXQ_t *mb;
1486 LPFC_MBOXQ_t *nextmb;
1487 struct lpfc_dmabuf *mp;
1488 struct fc_rport *rport;
1489
1490 /* Cleanup node for NPort <nlp_DID> */
1491 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1492 "%d:0900 Cleanup node for NPort x%x "
1493 "Data: x%x x%x x%x\n",
1494 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1495 ndlp->nlp_state, ndlp->nlp_rpi);
1496
1497 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
1498
1499 /*
1500 * if unloading the driver - just leave the remote port in place.
1501 * The driver unload will force the attached devices to detach
1502 * and flush cache's w/o generating flush errors.
1503 */
1504 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1505 rport = ndlp->rport;
1506 ndlp->rport = NULL;
1507 fc_remote_port_unblock(rport);
1508 fc_remote_port_delete(rport);
1509 ndlp->nlp_sid = NLP_NO_SID;
1510 }
1511
1512 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1513 if ((mb = phba->sli.mbox_active)) {
1514 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1515 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1516 mb->context2 = NULL;
1517 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1518 }
1519 }
1520 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1521 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1522 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1523 mp = (struct lpfc_dmabuf *) (mb->context1);
1524 if (mp) {
1525 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1526 kfree(mp);
1527 }
1528 list_del(&mb->list);
1529 mempool_free(mb, phba->mbox_mem_pool);
1530 }
1531 }
1532
1533 lpfc_els_abort(phba,ndlp,0);
1534 spin_lock_irq(phba->host->host_lock);
1535 ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
1536 spin_unlock_irq(phba->host->host_lock);
1537 del_timer_sync(&ndlp->nlp_tmofunc);
1538
1539 del_timer_sync(&ndlp->nlp_delayfunc);
1540
1541 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1542 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1543 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1544 list_del_init(&ndlp->els_retry_evt.evt_listp);
1545
1546 lpfc_unreg_rpi(phba, ndlp);
1547
1548 return (0);
1549}
1550
1551/*
1552 * Check to see if we can free the nlp back to the freelist.
1553 * If we are in the middle of using the nlp in the discovery state
1554 * machine, defer the free till we reach the end of the state machine.
1555 */
1556int
1557lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1558{
1559 if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1560 spin_lock_irq(phba->host->host_lock);
1561 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1562 spin_unlock_irq(phba->host->host_lock);
1563 del_timer_sync(&ndlp->nlp_tmofunc);
1564 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1565 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1566
1567 }
1568
1569
1570 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1571 spin_lock_irq(phba->host->host_lock);
1572 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1573 spin_unlock_irq(phba->host->host_lock);
1574 del_timer_sync(&ndlp->nlp_delayfunc);
1575 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1576 list_del_init(&ndlp->els_retry_evt.evt_listp);
1577 }
1578
1579 if (ndlp->nlp_disc_refcnt) {
1580 spin_lock_irq(phba->host->host_lock);
1581 ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1582 spin_unlock_irq(phba->host->host_lock);
1583 }
1584 else {
1585 lpfc_freenode(phba, ndlp);
1586 mempool_free( ndlp, phba->nlp_mem_pool);
1587 }
1588 return(0);
1589}
1590
1591static int
1592lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1593{
1594 D_ID mydid;
1595 D_ID ndlpdid;
1596 D_ID matchdid;
1597
1598 if (did == Bcast_DID)
1599 return (0);
1600
1601 if (ndlp->nlp_DID == 0) {
1602 return (0);
1603 }
1604
1605 /* First check for Direct match */
1606 if (ndlp->nlp_DID == did)
1607 return (1);
1608
1609 /* Next check for area/domain identically equals 0 match */
1610 mydid.un.word = phba->fc_myDID;
1611 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1612 return (0);
1613 }
1614
1615 matchdid.un.word = did;
1616 ndlpdid.un.word = ndlp->nlp_DID;
1617 if (matchdid.un.b.id == ndlpdid.un.b.id) {
1618 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1619 (mydid.un.b.area == matchdid.un.b.area)) {
1620 if ((ndlpdid.un.b.domain == 0) &&
1621 (ndlpdid.un.b.area == 0)) {
1622 if (ndlpdid.un.b.id)
1623 return (1);
1624 }
1625 return (0);
1626 }
1627
1628 matchdid.un.word = ndlp->nlp_DID;
1629 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1630 (mydid.un.b.area == ndlpdid.un.b.area)) {
1631 if ((matchdid.un.b.domain == 0) &&
1632 (matchdid.un.b.area == 0)) {
1633 if (matchdid.un.b.id)
1634 return (1);
1635 }
1636 }
1637 }
1638 return (0);
1639}
1640
1641/* Search for a nodelist entry on a specific list */
1642struct lpfc_nodelist *
1643lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1644{
1645 struct lpfc_nodelist *ndlp, *next_ndlp;
1646 uint32_t data1;
1647
1648 if (order & NLP_SEARCH_UNMAPPED) {
1649 list_for_each_entry_safe(ndlp, next_ndlp,
1650 &phba->fc_nlpunmap_list, nlp_listp) {
1651 if (lpfc_matchdid(phba, ndlp, did)) {
1652 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1653 ((uint32_t) ndlp->nlp_xri << 16) |
1654 ((uint32_t) ndlp->nlp_type << 8) |
1655 ((uint32_t) ndlp->nlp_rpi & 0xff));
1656 /* FIND node DID unmapped */
1657 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1658 "%d:0929 FIND node DID unmapped"
1659 " Data: x%p x%x x%x x%x\n",
1660 phba->brd_no,
1661 ndlp, ndlp->nlp_DID,
1662 ndlp->nlp_flag, data1);
1663 return (ndlp);
1664 }
1665 }
1666 }
1667
1668 if (order & NLP_SEARCH_MAPPED) {
1669 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1670 nlp_listp) {
1671 if (lpfc_matchdid(phba, ndlp, did)) {
1672
1673 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1674 ((uint32_t) ndlp->nlp_xri << 16) |
1675 ((uint32_t) ndlp->nlp_type << 8) |
1676 ((uint32_t) ndlp->nlp_rpi & 0xff));
1677 /* FIND node DID mapped */
1678 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1679 "%d:0930 FIND node DID mapped "
1680 "Data: x%p x%x x%x x%x\n",
1681 phba->brd_no,
1682 ndlp, ndlp->nlp_DID,
1683 ndlp->nlp_flag, data1);
1684 return (ndlp);
1685 }
1686 }
1687 }
1688
1689 if (order & NLP_SEARCH_PLOGI) {
1690 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1691 nlp_listp) {
1692 if (lpfc_matchdid(phba, ndlp, did)) {
1693
1694 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1695 ((uint32_t) ndlp->nlp_xri << 16) |
1696 ((uint32_t) ndlp->nlp_type << 8) |
1697 ((uint32_t) ndlp->nlp_rpi & 0xff));
1698 /* LOG change to PLOGI */
1699 /* FIND node DID plogi */
1700 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1701 "%d:0908 FIND node DID plogi "
1702 "Data: x%p x%x x%x x%x\n",
1703 phba->brd_no,
1704 ndlp, ndlp->nlp_DID,
1705 ndlp->nlp_flag, data1);
1706 return (ndlp);
1707 }
1708 }
1709 }
1710
1711 if (order & NLP_SEARCH_ADISC) {
1712 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1713 nlp_listp) {
1714 if (lpfc_matchdid(phba, ndlp, did)) {
1715
1716 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1717 ((uint32_t) ndlp->nlp_xri << 16) |
1718 ((uint32_t) ndlp->nlp_type << 8) |
1719 ((uint32_t) ndlp->nlp_rpi & 0xff));
1720 /* LOG change to ADISC */
1721 /* FIND node DID adisc */
1722 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1723 "%d:0931 FIND node DID adisc "
1724 "Data: x%p x%x x%x x%x\n",
1725 phba->brd_no,
1726 ndlp, ndlp->nlp_DID,
1727 ndlp->nlp_flag, data1);
1728 return (ndlp);
1729 }
1730 }
1731 }
1732
1733 if (order & NLP_SEARCH_REGLOGIN) {
1734 list_for_each_entry_safe(ndlp, next_ndlp,
1735 &phba->fc_reglogin_list, nlp_listp) {
1736 if (lpfc_matchdid(phba, ndlp, did)) {
1737
1738 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1739 ((uint32_t) ndlp->nlp_xri << 16) |
1740 ((uint32_t) ndlp->nlp_type << 8) |
1741 ((uint32_t) ndlp->nlp_rpi & 0xff));
1742 /* LOG change to REGLOGIN */
1743 /* FIND node DID reglogin */
1744 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1745 "%d:0931 FIND node DID reglogin"
1746 " Data: x%p x%x x%x x%x\n",
1747 phba->brd_no,
1748 ndlp, ndlp->nlp_DID,
1749 ndlp->nlp_flag, data1);
1750 return (ndlp);
1751 }
1752 }
1753 }
1754
1755 if (order & NLP_SEARCH_PRLI) {
1756 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1757 nlp_listp) {
1758 if (lpfc_matchdid(phba, ndlp, did)) {
1759
1760 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1761 ((uint32_t) ndlp->nlp_xri << 16) |
1762 ((uint32_t) ndlp->nlp_type << 8) |
1763 ((uint32_t) ndlp->nlp_rpi & 0xff));
1764 /* LOG change to PRLI */
1765 /* FIND node DID prli */
1766 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1767 "%d:0931 FIND node DID prli "
1768 "Data: x%p x%x x%x x%x\n",
1769 phba->brd_no,
1770 ndlp, ndlp->nlp_DID,
1771 ndlp->nlp_flag, data1);
1772 return (ndlp);
1773 }
1774 }
1775 }
1776
1777 if (order & NLP_SEARCH_NPR) {
1778 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1779 nlp_listp) {
1780 if (lpfc_matchdid(phba, ndlp, did)) {
1781
1782 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1783 ((uint32_t) ndlp->nlp_xri << 16) |
1784 ((uint32_t) ndlp->nlp_type << 8) |
1785 ((uint32_t) ndlp->nlp_rpi & 0xff));
1786 /* LOG change to NPR */
1787 /* FIND node DID npr */
1788 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1789 "%d:0931 FIND node DID npr "
1790 "Data: x%p x%x x%x x%x\n",
1791 phba->brd_no,
1792 ndlp, ndlp->nlp_DID,
1793 ndlp->nlp_flag, data1);
1794 return (ndlp);
1795 }
1796 }
1797 }
1798
1799 if (order & NLP_SEARCH_UNUSED) {
1800 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1801 nlp_listp) {
1802 if (lpfc_matchdid(phba, ndlp, did)) {
1803
1804 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1805 ((uint32_t) ndlp->nlp_xri << 16) |
1806 ((uint32_t) ndlp->nlp_type << 8) |
1807 ((uint32_t) ndlp->nlp_rpi & 0xff));
1808 /* LOG change to UNUSED */
1809 /* FIND node DID unused */
1810 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1811 "%d:0931 FIND node DID unused "
1812 "Data: x%p x%x x%x x%x\n",
1813 phba->brd_no,
1814 ndlp, ndlp->nlp_DID,
1815 ndlp->nlp_flag, data1);
1816 return (ndlp);
1817 }
1818 }
1819 }
1820
1821 /* FIND node did <did> NOT FOUND */
1822 lpfc_printf_log(phba,
1823 KERN_INFO,
1824 LOG_NODE,
1825 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1826 phba->brd_no, did, order);
1827
1828 /* no match found */
1829 return NULL;
1830}
1831
1832struct lpfc_nodelist *
1833lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1834{
1835 struct lpfc_nodelist *ndlp;
1836 uint32_t flg;
1837
1838 if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
1839 if ((phba->hba_state == LPFC_HBA_READY) &&
1840 ((lpfc_rscn_payload_check(phba, did) == 0)))
1841 return NULL;
1842 ndlp = (struct lpfc_nodelist *)
1843 mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1844 if (!ndlp)
1845 return NULL;
1846 lpfc_nlp_init(phba, ndlp, did);
1847 ndlp->nlp_state = NLP_STE_NPR_NODE;
1848 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1849 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1850 return ndlp;
1851 }
1852 if ((phba->hba_state == LPFC_HBA_READY) &&
1853 (phba->fc_flag & FC_RSCN_MODE)) {
1854 if (lpfc_rscn_payload_check(phba, did)) {
1855 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1856 }
1857 else {
1858 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1859 ndlp = NULL;
1860 }
1861 }
1862 else {
1863 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1864 if ((flg == NLP_ADISC_LIST) ||
1865 (flg == NLP_PLOGI_LIST)) {
1866 return NULL;
1867 }
1868 ndlp->nlp_state = NLP_STE_NPR_NODE;
1869 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1870 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1871 }
1872 return ndlp;
1873}
1874
1875/* Build a list of nodes to discover based on the loopmap */
1876void
1877lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1878{
1879 int j;
1880 uint32_t alpa, index;
1881
1882 if (phba->hba_state <= LPFC_LINK_DOWN) {
1883 return;
1884 }
1885 if (phba->fc_topology != TOPOLOGY_LOOP) {
1886 return;
1887 }
1888
1889 /* Check for loop map present or not */
1890 if (phba->alpa_map[0]) {
1891 for (j = 1; j <= phba->alpa_map[0]; j++) {
1892 alpa = phba->alpa_map[j];
1893
1894 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1895 continue;
1896 }
1897 lpfc_setup_disc_node(phba, alpa);
1898 }
1899 } else {
1900 /* No alpamap, so try all alpa's */
1901 for (j = 0; j < FC_MAXLOOP; j++) {
1902 /* If cfg_scan_down is set, start from highest
1903 * ALPA (0xef) to lowest (0x1).
1904 */
1905 if (phba->cfg_scan_down)
1906 index = j;
1907 else
1908 index = FC_MAXLOOP - j - 1;
1909 alpa = lpfcAlpaArray[index];
1910 if ((phba->fc_myDID & 0xff) == alpa) {
1911 continue;
1912 }
1913
1914 lpfc_setup_disc_node(phba, alpa);
1915 }
1916 }
1917 return;
1918}
1919
1920/* Start Link up / RSCN discovery on NPR list */
1921void
1922lpfc_disc_start(struct lpfc_hba * phba)
1923{
1924 struct lpfc_sli *psli;
1925 LPFC_MBOXQ_t *mbox;
1926 struct lpfc_nodelist *ndlp, *next_ndlp;
1927 uint32_t did_changed, num_sent;
1928 uint32_t clear_la_pending;
1929 int rc;
1930
1931 psli = &phba->sli;
1932
1933 if (phba->hba_state <= LPFC_LINK_DOWN) {
1934 return;
1935 }
1936 if (phba->hba_state == LPFC_CLEAR_LA)
1937 clear_la_pending = 1;
1938 else
1939 clear_la_pending = 0;
1940
1941 if (phba->hba_state < LPFC_HBA_READY) {
1942 phba->hba_state = LPFC_DISC_AUTH;
1943 }
1944 lpfc_set_disctmo(phba);
1945
1946 if (phba->fc_prevDID == phba->fc_myDID) {
1947 did_changed = 0;
1948 } else {
1949 did_changed = 1;
1950 }
1951 phba->fc_prevDID = phba->fc_myDID;
1952 phba->num_disc_nodes = 0;
1953
1954 /* Start Discovery state <hba_state> */
1955 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1956 "%d:0202 Start Discovery hba state x%x "
1957 "Data: x%x x%x x%x\n",
1958 phba->brd_no, phba->hba_state, phba->fc_flag,
1959 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1960
1961 /* If our did changed, we MUST do PLOGI */
1962 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1963 nlp_listp) {
1964 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1965 if (did_changed) {
1966 spin_lock_irq(phba->host->host_lock);
1967 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1968 spin_unlock_irq(phba->host->host_lock);
1969 }
1970 }
1971 }
1972
1973 /* First do ADISCs - if any */
1974 num_sent = lpfc_els_disc_adisc(phba);
1975
1976 if (num_sent)
1977 return;
1978
1979 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
1980 /* If we get here, there is nothing to ADISC */
1981 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1982 phba->hba_state = LPFC_CLEAR_LA;
1983 lpfc_clear_la(phba, mbox);
1984 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
1985 rc = lpfc_sli_issue_mbox(phba, mbox,
1986 (MBX_NOWAIT | MBX_STOP_IOCB));
1987 if (rc == MBX_NOT_FINISHED) {
1988 mempool_free( mbox, phba->mbox_mem_pool);
1989 lpfc_disc_flush_list(phba);
1990 psli->ring[(psli->ip_ring)].flag &=
1991 ~LPFC_STOP_IOCB_EVENT;
1992 psli->ring[(psli->fcp_ring)].flag &=
1993 ~LPFC_STOP_IOCB_EVENT;
1994 psli->ring[(psli->next_ring)].flag &=
1995 ~LPFC_STOP_IOCB_EVENT;
1996 phba->hba_state = LPFC_HBA_READY;
1997 }
1998 }
1999 } else {
2000 /* Next do PLOGIs - if any */
2001 num_sent = lpfc_els_disc_plogi(phba);
2002
2003 if (num_sent)
2004 return;
2005
2006 if (phba->fc_flag & FC_RSCN_MODE) {
2007 /* Check to see if more RSCNs came in while we
2008 * were processing this one.
2009 */
2010 if ((phba->fc_rscn_id_cnt == 0) &&
2011 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
2012 spin_lock_irq(phba->host->host_lock);
2013 phba->fc_flag &= ~FC_RSCN_MODE;
2014 spin_unlock_irq(phba->host->host_lock);
2015 }
2016 else
2017 lpfc_els_handle_rscn(phba);
2018 }
2019 }
2020 return;
2021}
2022
2023/*
2024 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2025 * ring the match the sppecified nodelist.
2026 */
2027static void
2028lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
2029{
2030 struct lpfc_sli *psli;
2031 IOCB_t *icmd;
2032 struct lpfc_iocbq *iocb, *next_iocb;
2033 struct lpfc_sli_ring *pring;
2034 struct lpfc_dmabuf *mp;
2035
2036 psli = &phba->sli;
2037 pring = &psli->ring[LPFC_ELS_RING];
2038
2039 /* Error matching iocb on txq or txcmplq
2040 * First check the txq.
2041 */
2042 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2043 if (iocb->context1 != ndlp) {
2044 continue;
2045 }
2046 icmd = &iocb->iocb;
2047 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2048 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2049
2050 list_del(&iocb->list);
2051 pring->txq_cnt--;
2052 lpfc_els_free_iocb(phba, iocb);
2053 }
2054 }
2055
2056 /* Next check the txcmplq */
2057 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2058 if (iocb->context1 != ndlp) {
2059 continue;
2060 }
2061 icmd = &iocb->iocb;
2062 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2063 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2064
2065 iocb->iocb_cmpl = NULL;
2066 /* context2 = cmd, context2->next = rsp, context3 =
2067 bpl */
2068 if (iocb->context2) {
2069 /* Free the response IOCB before handling the
2070 command. */
2071
2072 mp = (struct lpfc_dmabuf *) (iocb->context2);
2073 mp = list_get_first(&mp->list,
2074 struct lpfc_dmabuf,
2075 list);
2076 if (mp) {
2077 /* Delay before releasing rsp buffer to
2078 * give UNREG mbox a chance to take
2079 * effect.
2080 */
2081 list_add(&mp->list,
2082 &phba->freebufList);
2083 }
2084 lpfc_mbuf_free(phba,
2085 ((struct lpfc_dmabuf *)
2086 iocb->context2)->virt,
2087 ((struct lpfc_dmabuf *)
2088 iocb->context2)->phys);
2089 kfree(iocb->context2);
2090 }
2091
2092 if (iocb->context3) {
2093 lpfc_mbuf_free(phba,
2094 ((struct lpfc_dmabuf *)
2095 iocb->context3)->virt,
2096 ((struct lpfc_dmabuf *)
2097 iocb->context3)->phys);
2098 kfree(iocb->context3);
2099 }
2100 }
2101 }
2102
2103 return;
2104}
2105
2106void
2107lpfc_disc_flush_list(struct lpfc_hba * phba)
2108{
2109 struct lpfc_nodelist *ndlp, *next_ndlp;
2110
2111 if (phba->fc_plogi_cnt) {
2112 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2113 nlp_listp) {
2114 lpfc_free_tx(phba, ndlp);
2115 lpfc_nlp_remove(phba, ndlp);
2116 }
2117 }
2118 if (phba->fc_adisc_cnt) {
2119 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2120 nlp_listp) {
2121 lpfc_free_tx(phba, ndlp);
2122 lpfc_nlp_remove(phba, ndlp);
2123 }
2124 }
2125 return;
2126}
2127
2128/*****************************************************************************/
2129/*
2130 * NAME: lpfc_disc_timeout
2131 *
2132 * FUNCTION: Fibre Channel driver discovery timeout routine.
2133 *
2134 * EXECUTION ENVIRONMENT: interrupt only
2135 *
2136 * CALLED FROM:
2137 * Timer function
2138 *
2139 * RETURNS:
2140 * none
2141 */
2142/*****************************************************************************/
2143void
2144lpfc_disc_timeout(unsigned long ptr)
2145{
2146 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2147 unsigned long flags = 0;
2148
2149 if (unlikely(!phba))
2150 return;
2151
2152 spin_lock_irqsave(phba->host->host_lock, flags);
2153 if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
2154 phba->work_hba_events |= WORKER_DISC_TMO;
2155 if (phba->work_wait)
2156 wake_up(phba->work_wait);
2157 }
2158 spin_unlock_irqrestore(phba->host->host_lock, flags);
2159 return;
2160}
2161
2162static void
2163lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2164{
2165 struct lpfc_sli *psli;
2166 struct lpfc_nodelist *ndlp;
2167 LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2168 int rc, clrlaerr = 0;
2169
2170 if (unlikely(!phba))
2171 return;
2172
2173 if (!(phba->fc_flag & FC_DISC_TMO))
2174 return;
2175
2176 psli = &phba->sli;
2177
2178 spin_lock_irq(phba->host->host_lock);
2179 phba->fc_flag &= ~FC_DISC_TMO;
2180 spin_unlock_irq(phba->host->host_lock);
2181
2182 switch (phba->hba_state) {
2183
2184 case LPFC_LOCAL_CFG_LINK:
2185 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2186 /* FAN timeout */
2187 lpfc_printf_log(phba,
2188 KERN_WARNING,
2189 LOG_DISCOVERY,
2190 "%d:0221 FAN timeout\n",
2191 phba->brd_no);
2192
2193 /* Forget about FAN, Start discovery by sending a FLOGI
2194 * hba_state is identically LPFC_FLOGI while waiting for FLOGI
2195 * cmpl
2196 */
2197 phba->hba_state = LPFC_FLOGI;
2198 lpfc_set_disctmo(phba);
2199 lpfc_initial_flogi(phba);
2200 break;
2201
2202 case LPFC_FLOGI:
2203 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2204 /* Initial FLOGI timeout */
2205 lpfc_printf_log(phba,
2206 KERN_ERR,
2207 LOG_DISCOVERY,
2208 "%d:0222 Initial FLOGI timeout\n",
2209 phba->brd_no);
2210
2211 /* Assume no Fabric and go on with discovery.
2212 * Check for outstanding ELS FLOGI to abort.
2213 */
2214
2215 /* FLOGI failed, so just use loop map to make discovery list */
2216 lpfc_disc_list_loopmap(phba);
2217
2218 /* Start discovery */
2219 lpfc_disc_start(phba);
2220 break;
2221
2222 case LPFC_FABRIC_CFG_LINK:
2223 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2224 NameServer login */
2225 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2226 "%d:0223 Timeout while waiting for NameServer "
2227 "login\n", phba->brd_no);
2228
2229 /* Next look for NameServer ndlp */
2230 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2231 if (ndlp)
2232 lpfc_nlp_remove(phba, ndlp);
2233 /* Start discovery */
2234 lpfc_disc_start(phba);
2235 break;
2236
2237 case LPFC_NS_QRY:
2238 /* Check for wait for NameServer Rsp timeout */
2239 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2240 "%d:0224 NameServer Query timeout "
2241 "Data: x%x x%x\n",
2242 phba->brd_no,
2243 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2244
2245 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2246 NameServer_DID);
2247 if (ndlp) {
2248 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2249 /* Try it one more time */
2250 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2251 if (rc == 0)
2252 break;
2253 }
2254 phba->fc_ns_retry = 0;
2255 }
2256
2257 /* Nothing to authenticate, so CLEAR_LA right now */
2258 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2259 if (!clearlambox) {
2260 clrlaerr = 1;
2261 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2262 "%d:0226 Device Discovery "
2263 "completion error\n",
2264 phba->brd_no);
2265 phba->hba_state = LPFC_HBA_ERROR;
2266 break;
2267 }
2268
2269 phba->hba_state = LPFC_CLEAR_LA;
2270 lpfc_clear_la(phba, clearlambox);
2271 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2272 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2273 (MBX_NOWAIT | MBX_STOP_IOCB));
2274 if (rc == MBX_NOT_FINISHED) {
2275 mempool_free(clearlambox, phba->mbox_mem_pool);
2276 clrlaerr = 1;
2277 break;
2278 }
2279
2280 /* Setup and issue mailbox INITIALIZE LINK command */
2281 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2282 if (!initlinkmbox) {
2283 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2284 "%d:0226 Device Discovery "
2285 "completion error\n",
2286 phba->brd_no);
2287 phba->hba_state = LPFC_HBA_ERROR;
2288 break;
2289 }
2290
2291 lpfc_linkdown(phba);
2292 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2293 phba->cfg_link_speed);
2294 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2295 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2296 (MBX_NOWAIT | MBX_STOP_IOCB));
2297 if (rc == MBX_NOT_FINISHED)
2298 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2299
2300 break;
2301
2302 case LPFC_DISC_AUTH:
2303 /* Node Authentication timeout */
2304 lpfc_printf_log(phba,
2305 KERN_ERR,
2306 LOG_DISCOVERY,
2307 "%d:0227 Node Authentication timeout\n",
2308 phba->brd_no);
2309 lpfc_disc_flush_list(phba);
2310 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2311 if (!clearlambox) {
2312 clrlaerr = 1;
2313 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2314 "%d:0226 Device Discovery "
2315 "completion error\n",
2316 phba->brd_no);
2317 phba->hba_state = LPFC_HBA_ERROR;
2318 break;
2319 }
2320 phba->hba_state = LPFC_CLEAR_LA;
2321 lpfc_clear_la(phba, clearlambox);
2322 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2323 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2324 (MBX_NOWAIT | MBX_STOP_IOCB));
2325 if (rc == MBX_NOT_FINISHED) {
2326 mempool_free(clearlambox, phba->mbox_mem_pool);
2327 clrlaerr = 1;
2328 }
2329 break;
2330
2331 case LPFC_CLEAR_LA:
2332 /* CLEAR LA timeout */
2333 lpfc_printf_log(phba,
2334 KERN_ERR,
2335 LOG_DISCOVERY,
2336 "%d:0228 CLEAR LA timeout\n",
2337 phba->brd_no);
2338 clrlaerr = 1;
2339 break;
2340
2341 case LPFC_HBA_READY:
2342 if (phba->fc_flag & FC_RSCN_MODE) {
2343 lpfc_printf_log(phba,
2344 KERN_ERR,
2345 LOG_DISCOVERY,
2346 "%d:0231 RSCN timeout Data: x%x x%x\n",
2347 phba->brd_no,
2348 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2349
2350 /* Cleanup any outstanding ELS commands */
2351 lpfc_els_flush_cmd(phba);
2352
2353 lpfc_els_flush_rscn(phba);
2354 lpfc_disc_flush_list(phba);
2355 }
2356 break;
2357 }
2358
2359 if (clrlaerr) {
2360 lpfc_disc_flush_list(phba);
2361 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2362 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2363 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2364 phba->hba_state = LPFC_HBA_READY;
2365 }
2366
2367 return;
2368}
2369
2370static void
2371lpfc_nodev_timeout(unsigned long ptr)
2372{
2373 struct lpfc_hba *phba;
2374 struct lpfc_nodelist *ndlp;
2375 unsigned long iflag;
2376 struct lpfc_work_evt *evtp;
2377
2378 ndlp = (struct lpfc_nodelist *)ptr;
2379 phba = ndlp->nlp_phba;
2380 evtp = &ndlp->nodev_timeout_evt;
2381 spin_lock_irqsave(phba->host->host_lock, iflag);
2382
2383 if (!list_empty(&evtp->evt_listp)) {
2384 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2385 return;
2386 }
2387 evtp->evt_arg1 = ndlp;
2388 evtp->evt = LPFC_EVT_NODEV_TMO;
2389 list_add_tail(&evtp->evt_listp, &phba->work_list);
2390 if (phba->work_wait)
2391 wake_up(phba->work_wait);
2392
2393 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2394 return;
2395}
2396
2397
2398/*
2399 * This routine handles processing a NameServer REG_LOGIN mailbox
2400 * command upon completion. It is setup in the LPFC_MBOXQ
2401 * as the completion routine when the command is
2402 * handed off to the SLI layer.
2403 */
2404void
2405lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2406{
2407 struct lpfc_sli *psli;
2408 MAILBOX_t *mb;
2409 struct lpfc_dmabuf *mp;
2410 struct lpfc_nodelist *ndlp;
2411
2412 psli = &phba->sli;
2413 mb = &pmb->mb;
2414
2415 ndlp = (struct lpfc_nodelist *) pmb->context2;
2416 mp = (struct lpfc_dmabuf *) (pmb->context1);
2417
2418 pmb->context1 = NULL;
2419
2420 if (ndlp->nlp_rpi != 0)
2421 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
2422 ndlp->nlp_rpi = mb->un.varWords[0];
2423 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
2424 ndlp->nlp_type |= NLP_FABRIC;
2425 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2426 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2427
2428 /* Start issuing Fabric-Device Management Interface (FDMI)
2429 * command to 0xfffffa (FDMI well known port)
2430 */
2431 if (phba->cfg_fdmi_on == 1) {
2432 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2433 } else {
2434 /*
2435 * Delay issuing FDMI command if fdmi-on=2
2436 * (supporting RPA/hostnmae)
2437 */
2438 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2439 }
2440
2441 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2442 kfree(mp);
2443 mempool_free( pmb, phba->mbox_mem_pool);
2444
2445 return;
2446}
2447
2448/*
2449 * This routine looks up the ndlp hash
2450 * table for the given RPI. If rpi found
2451 * it return the node list pointer
2452 * else return 0.
2453 */
2454struct lpfc_nodelist *
2455lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2456{
2457 struct lpfc_nodelist *ret;
2458
2459 ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2460 while ((ret != 0) && (ret->nlp_rpi != rpi)) {
2461 ret = ret->nlp_rpi_hash_next;
2462 }
2463 return ret;
2464}
2465
2466/*
2467 * This routine looks up the ndlp hash table for the
2468 * given RPI. If rpi found it return the node list
2469 * pointer else return 0 after deleting the entry
2470 * from hash table.
2471 */
2472struct lpfc_nodelist *
2473lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
2474{
2475 struct lpfc_nodelist *ret, *temp;;
2476
2477 ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2478 if (ret == 0)
2479 return NULL;
2480
2481 if (ret->nlp_rpi == rpi) {
2482 phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
2483 ret->nlp_rpi_hash_next;
2484 ret->nlp_rpi_hash_next = NULL;
2485 return ret;
2486 }
2487
2488 while ((ret->nlp_rpi_hash_next != 0) &&
2489 (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
2490 ret = ret->nlp_rpi_hash_next;
2491 }
2492
2493 if (ret->nlp_rpi_hash_next != 0) {
2494 temp = ret->nlp_rpi_hash_next;
2495 ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
2496 temp->nlp_rpi_hash_next = NULL;
2497 return temp;
2498 } else {
2499 return NULL;
2500 }
2501}
2502
2503/*
2504 * This routine adds the node list entry to the
2505 * ndlp hash table.
2506 */
2507void
2508lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2509 uint16_t rpi)
2510{
2511
2512 uint32_t index;
2513
2514 index = LPFC_RPI_HASH_FUNC(rpi);
2515 ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
2516 phba->fc_nlplookup[index] = ndlp;
2517 return;
2518}
2519
2520void
2521lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2522 uint32_t did)
2523{
2524 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2525 INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2526 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2527 init_timer(&ndlp->nlp_tmofunc);
2528 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2529 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2530 init_timer(&ndlp->nlp_delayfunc);
2531 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2532 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2533 ndlp->nlp_DID = did;
2534 ndlp->nlp_phba = phba;
2535 ndlp->nlp_sid = NLP_NO_SID;
2536 return;
2537}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
new file mode 100644
index 000000000000..fc958a99dadb
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -0,0 +1,2687 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_hw.h 1.37 2005/03/29 19:51:45EST sf_support Exp $
23 */
24
25#define FDMI_DID 0xfffffaU
26#define NameServer_DID 0xfffffcU
27#define SCR_DID 0xfffffdU
28#define Fabric_DID 0xfffffeU
29#define Bcast_DID 0xffffffU
30#define Mask_DID 0xffffffU
31#define CT_DID_MASK 0xffff00U
32#define Fabric_DID_MASK 0xfff000U
33#define WELL_KNOWN_DID_MASK 0xfffff0U
34
35#define PT2PT_LocalID 1
36#define PT2PT_RemoteID 2
37
38#define FF_DEF_EDTOV 2000 /* Default E_D_TOV (2000ms) */
39#define FF_DEF_ALTOV 15 /* Default AL_TIME (15ms) */
40#define FF_DEF_RATOV 2 /* Default RA_TOV (2s) */
41#define FF_DEF_ARBTOV 1900 /* Default ARB_TOV (1900ms) */
42
43#define LPFC_BUF_RING0 64 /* Number of buffers to post to RING
44 0 */
45
46#define FCELSSIZE 1024 /* maximum ELS transfer size */
47
48#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
49#define LPFC_IP_RING 1 /* ring 1 for IP commands */
50#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
51#define LPFC_FCP_NEXT_RING 3
52
53#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
54#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
55#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 IP command ring entries */
56#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 IP response ring entries */
57#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
58#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
59#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
60#define SLI2_IOCB_RSP_R2_ENTRIES 20 /* SLI-2 ELS response ring entries */
61#define SLI2_IOCB_CMD_R3_ENTRIES 0
62#define SLI2_IOCB_RSP_R3_ENTRIES 0
63#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
64#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
65
66/* Common Transport structures and definitions */
67
68union CtRevisionId {
69 /* Structure is in Big Endian format */
70 struct {
71 uint32_t Revision:8;
72 uint32_t InId:24;
73 } bits;
74 uint32_t word;
75};
76
77union CtCommandResponse {
78 /* Structure is in Big Endian format */
79 struct {
80 uint32_t CmdRsp:16;
81 uint32_t Size:16;
82 } bits;
83 uint32_t word;
84};
85
86struct lpfc_sli_ct_request {
87 /* Structure is in Big Endian format */
88 union CtRevisionId RevisionId;
89 uint8_t FsType;
90 uint8_t FsSubType;
91 uint8_t Options;
92 uint8_t Rsrvd1;
93 union CtCommandResponse CommandResponse;
94 uint8_t Rsrvd2;
95 uint8_t ReasonCode;
96 uint8_t Explanation;
97 uint8_t VendorUnique;
98
99 union {
100 uint32_t PortID;
101 struct gid {
102 uint8_t PortType; /* for GID_PT requests */
103 uint8_t DomainScope;
104 uint8_t AreaScope;
105 uint8_t Fc4Type; /* for GID_FT requests */
106 } gid;
107 struct rft {
108 uint32_t PortId; /* For RFT_ID requests */
109
110#ifdef __BIG_ENDIAN_BITFIELD
111 uint32_t rsvd0:16;
112 uint32_t rsvd1:7;
113 uint32_t fcpReg:1; /* Type 8 */
114 uint32_t rsvd2:2;
115 uint32_t ipReg:1; /* Type 5 */
116 uint32_t rsvd3:5;
117#else /* __LITTLE_ENDIAN_BITFIELD */
118 uint32_t rsvd0:16;
119 uint32_t fcpReg:1; /* Type 8 */
120 uint32_t rsvd1:7;
121 uint32_t rsvd3:5;
122 uint32_t ipReg:1; /* Type 5 */
123 uint32_t rsvd2:2;
124#endif
125
126 uint32_t rsvd[7];
127 } rft;
128 struct rnn {
129 uint32_t PortId; /* For RNN_ID requests */
130 uint8_t wwnn[8];
131 } rnn;
132 struct rsnn { /* For RSNN_ID requests */
133 uint8_t wwnn[8];
134 uint8_t len;
135 uint8_t symbname[255];
136 } rsnn;
137 } un;
138};
139
140#define SLI_CT_REVISION 1
141#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
142#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
143#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
144#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
145
146/*
147 * FsType Definitions
148 */
149
150#define SLI_CT_MANAGEMENT_SERVICE 0xFA
151#define SLI_CT_TIME_SERVICE 0xFB
152#define SLI_CT_DIRECTORY_SERVICE 0xFC
153#define SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD
154
155/*
156 * Directory Service Subtypes
157 */
158
159#define SLI_CT_DIRECTORY_NAME_SERVER 0x02
160
161/*
162 * Response Codes
163 */
164
165#define SLI_CT_RESPONSE_FS_RJT 0x8001
166#define SLI_CT_RESPONSE_FS_ACC 0x8002
167
168/*
169 * Reason Codes
170 */
171
172#define SLI_CT_NO_ADDITIONAL_EXPL 0x0
173#define SLI_CT_INVALID_COMMAND 0x01
174#define SLI_CT_INVALID_VERSION 0x02
175#define SLI_CT_LOGICAL_ERROR 0x03
176#define SLI_CT_INVALID_IU_SIZE 0x04
177#define SLI_CT_LOGICAL_BUSY 0x05
178#define SLI_CT_PROTOCOL_ERROR 0x07
179#define SLI_CT_UNABLE_TO_PERFORM_REQ 0x09
180#define SLI_CT_REQ_NOT_SUPPORTED 0x0b
181#define SLI_CT_HBA_INFO_NOT_REGISTERED 0x10
182#define SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE 0x11
183#define SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN 0x12
184#define SLI_CT_HBA_ATTR_NOT_PRESENT 0x13
185#define SLI_CT_PORT_INFO_NOT_REGISTERED 0x20
186#define SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21
187#define SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN 0x22
188#define SLI_CT_VENDOR_UNIQUE 0xff
189
190/*
191 * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations
192 */
193
194#define SLI_CT_NO_PORT_ID 0x01
195#define SLI_CT_NO_PORT_NAME 0x02
196#define SLI_CT_NO_NODE_NAME 0x03
197#define SLI_CT_NO_CLASS_OF_SERVICE 0x04
198#define SLI_CT_NO_IP_ADDRESS 0x05
199#define SLI_CT_NO_IPA 0x06
200#define SLI_CT_NO_FC4_TYPES 0x07
201#define SLI_CT_NO_SYMBOLIC_PORT_NAME 0x08
202#define SLI_CT_NO_SYMBOLIC_NODE_NAME 0x09
203#define SLI_CT_NO_PORT_TYPE 0x0A
204#define SLI_CT_ACCESS_DENIED 0x10
205#define SLI_CT_INVALID_PORT_ID 0x11
206#define SLI_CT_DATABASE_EMPTY 0x12
207
208/*
209 * Name Server Command Codes
210 */
211
212#define SLI_CTNS_GA_NXT 0x0100
213#define SLI_CTNS_GPN_ID 0x0112
214#define SLI_CTNS_GNN_ID 0x0113
215#define SLI_CTNS_GCS_ID 0x0114
216#define SLI_CTNS_GFT_ID 0x0117
217#define SLI_CTNS_GSPN_ID 0x0118
218#define SLI_CTNS_GPT_ID 0x011A
219#define SLI_CTNS_GID_PN 0x0121
220#define SLI_CTNS_GID_NN 0x0131
221#define SLI_CTNS_GIP_NN 0x0135
222#define SLI_CTNS_GIPA_NN 0x0136
223#define SLI_CTNS_GSNN_NN 0x0139
224#define SLI_CTNS_GNN_IP 0x0153
225#define SLI_CTNS_GIPA_IP 0x0156
226#define SLI_CTNS_GID_FT 0x0171
227#define SLI_CTNS_GID_PT 0x01A1
228#define SLI_CTNS_RPN_ID 0x0212
229#define SLI_CTNS_RNN_ID 0x0213
230#define SLI_CTNS_RCS_ID 0x0214
231#define SLI_CTNS_RFT_ID 0x0217
232#define SLI_CTNS_RSPN_ID 0x0218
233#define SLI_CTNS_RPT_ID 0x021A
234#define SLI_CTNS_RIP_NN 0x0235
235#define SLI_CTNS_RIPA_NN 0x0236
236#define SLI_CTNS_RSNN_NN 0x0239
237#define SLI_CTNS_DA_ID 0x0300
238
239/*
240 * Port Types
241 */
242
243#define SLI_CTPT_N_PORT 0x01
244#define SLI_CTPT_NL_PORT 0x02
245#define SLI_CTPT_FNL_PORT 0x03
246#define SLI_CTPT_IP 0x04
247#define SLI_CTPT_FCP 0x08
248#define SLI_CTPT_NX_PORT 0x7F
249#define SLI_CTPT_F_PORT 0x81
250#define SLI_CTPT_FL_PORT 0x82
251#define SLI_CTPT_E_PORT 0x84
252
253#define SLI_CT_LAST_ENTRY 0x80000000
254
255/* Fibre Channel Service Parameter definitions */
256
257#define FC_PH_4_0 6 /* FC-PH version 4.0 */
258#define FC_PH_4_1 7 /* FC-PH version 4.1 */
259#define FC_PH_4_2 8 /* FC-PH version 4.2 */
260#define FC_PH_4_3 9 /* FC-PH version 4.3 */
261
262#define FC_PH_LOW 8 /* Lowest supported FC-PH version */
263#define FC_PH_HIGH 9 /* Highest supported FC-PH version */
264#define FC_PH3 0x20 /* FC-PH-3 version */
265
266#define FF_FRAME_SIZE 2048
267
268struct lpfc_name {
269#ifdef __BIG_ENDIAN_BITFIELD
270 uint8_t nameType:4; /* FC Word 0, bit 28:31 */
271 uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
272#else /* __LITTLE_ENDIAN_BITFIELD */
273 uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
274 uint8_t nameType:4; /* FC Word 0, bit 28:31 */
275#endif
276
277#define NAME_IEEE 0x1 /* IEEE name - nameType */
278#define NAME_IEEE_EXT 0x2 /* IEEE extended name */
279#define NAME_FC_TYPE 0x3 /* FC native name type */
280#define NAME_IP_TYPE 0x4 /* IP address */
281#define NAME_CCITT_TYPE 0xC
282#define NAME_CCITT_GR_TYPE 0xE
283 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
284 uint8_t IEEE[6]; /* FC IEEE address */
285};
286
287struct csp {
288 uint8_t fcphHigh; /* FC Word 0, byte 0 */
289 uint8_t fcphLow;
290 uint8_t bbCreditMsb;
291 uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
292
293#ifdef __BIG_ENDIAN_BITFIELD
294 uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
295 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
296 uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
297 uint16_t fPort:1; /* FC Word 1, bit 28 */
298 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
299 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
300 uint16_t multicast:1; /* FC Word 1, bit 25 */
301 uint16_t broadcast:1; /* FC Word 1, bit 24 */
302
303 uint16_t huntgroup:1; /* FC Word 1, bit 23 */
304 uint16_t simplex:1; /* FC Word 1, bit 22 */
305 uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
306 uint16_t dhd:1; /* FC Word 1, bit 18 */
307 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
308 uint16_t payloadlength:1; /* FC Word 1, bit 16 */
309#else /* __LITTLE_ENDIAN_BITFIELD */
310 uint16_t broadcast:1; /* FC Word 1, bit 24 */
311 uint16_t multicast:1; /* FC Word 1, bit 25 */
312 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
313 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
314 uint16_t fPort:1; /* FC Word 1, bit 28 */
315 uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
316 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
317 uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
318
319 uint16_t payloadlength:1; /* FC Word 1, bit 16 */
320 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
321 uint16_t dhd:1; /* FC Word 1, bit 18 */
322 uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
323 uint16_t simplex:1; /* FC Word 1, bit 22 */
324 uint16_t huntgroup:1; /* FC Word 1, bit 23 */
325#endif
326
327 uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */
328 uint8_t bbRcvSizeLsb; /* FC Word 1, byte 3 */
329 union {
330 struct {
331 uint8_t word2Reserved1; /* FC Word 2 byte 0 */
332
333 uint8_t totalConcurrSeq; /* FC Word 2 byte 1 */
334 uint8_t roByCategoryMsb; /* FC Word 2 byte 2 */
335
336 uint8_t roByCategoryLsb; /* FC Word 2 byte 3 */
337 } nPort;
338 uint32_t r_a_tov; /* R_A_TOV must be in B.E. format */
339 } w2;
340
341 uint32_t e_d_tov; /* E_D_TOV must be in B.E. format */
342};
343
344struct class_parms {
345#ifdef __BIG_ENDIAN_BITFIELD
346 uint8_t classValid:1; /* FC Word 0, bit 31 */
347 uint8_t intermix:1; /* FC Word 0, bit 30 */
348 uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
349 uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
350 uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
351 uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
352#else /* __LITTLE_ENDIAN_BITFIELD */
353 uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
354 uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
355 uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
356 uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
357 uint8_t intermix:1; /* FC Word 0, bit 30 */
358 uint8_t classValid:1; /* FC Word 0, bit 31 */
359
360#endif
361
362 uint8_t word0Reserved2; /* FC Word 0, bit 16:23 */
363
364#ifdef __BIG_ENDIAN_BITFIELD
365 uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
366 uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
367 uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
368 uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
369 uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
370#else /* __LITTLE_ENDIAN_BITFIELD */
371 uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
372 uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
373 uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
374 uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
375 uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
376#endif
377
378 uint8_t word0Reserved4; /* FC Word 0, bit 0: 7 */
379
380#ifdef __BIG_ENDIAN_BITFIELD
381 uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
382 uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
383 uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
384 uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
385 uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
386 uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
387#else /* __LITTLE_ENDIAN_BITFIELD */
388 uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
389 uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
390 uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
391 uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
392 uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
393 uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
394#endif
395
396 uint8_t word1Reserved2; /* FC Word 1, bit 16:23 */
397 uint8_t rcvDataSizeMsb; /* FC Word 1, bit 8:15 */
398 uint8_t rcvDataSizeLsb; /* FC Word 1, bit 0: 7 */
399
400 uint8_t concurrentSeqMsb; /* FC Word 2, bit 24:31 */
401 uint8_t concurrentSeqLsb; /* FC Word 2, bit 16:23 */
402 uint8_t EeCreditSeqMsb; /* FC Word 2, bit 8:15 */
403 uint8_t EeCreditSeqLsb; /* FC Word 2, bit 0: 7 */
404
405 uint8_t openSeqPerXchgMsb; /* FC Word 3, bit 24:31 */
406 uint8_t openSeqPerXchgLsb; /* FC Word 3, bit 16:23 */
407 uint8_t word3Reserved1; /* Fc Word 3, bit 8:15 */
408 uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
409};
410
411struct serv_parm { /* Structure is in Big Endian format */
412 struct csp cmn;
413 struct lpfc_name portName;
414 struct lpfc_name nodeName;
415 struct class_parms cls1;
416 struct class_parms cls2;
417 struct class_parms cls3;
418 struct class_parms cls4;
419 uint8_t vendorVersion[16];
420};
421
422/*
423 * Extended Link Service LS_COMMAND codes (Payload Word 0)
424 */
425#ifdef __BIG_ENDIAN_BITFIELD
426#define ELS_CMD_MASK 0xffff0000
427#define ELS_RSP_MASK 0xff000000
428#define ELS_CMD_LS_RJT 0x01000000
429#define ELS_CMD_ACC 0x02000000
430#define ELS_CMD_PLOGI 0x03000000
431#define ELS_CMD_FLOGI 0x04000000
432#define ELS_CMD_LOGO 0x05000000
433#define ELS_CMD_ABTX 0x06000000
434#define ELS_CMD_RCS 0x07000000
435#define ELS_CMD_RES 0x08000000
436#define ELS_CMD_RSS 0x09000000
437#define ELS_CMD_RSI 0x0A000000
438#define ELS_CMD_ESTS 0x0B000000
439#define ELS_CMD_ESTC 0x0C000000
440#define ELS_CMD_ADVC 0x0D000000
441#define ELS_CMD_RTV 0x0E000000
442#define ELS_CMD_RLS 0x0F000000
443#define ELS_CMD_ECHO 0x10000000
444#define ELS_CMD_TEST 0x11000000
445#define ELS_CMD_RRQ 0x12000000
446#define ELS_CMD_PRLI 0x20100014
447#define ELS_CMD_PRLO 0x21100014
448#define ELS_CMD_PDISC 0x50000000
449#define ELS_CMD_FDISC 0x51000000
450#define ELS_CMD_ADISC 0x52000000
451#define ELS_CMD_FARP 0x54000000
452#define ELS_CMD_FARPR 0x55000000
453#define ELS_CMD_FAN 0x60000000
454#define ELS_CMD_RSCN 0x61040000
455#define ELS_CMD_SCR 0x62000000
456#define ELS_CMD_RNID 0x78000000
457#else /* __LITTLE_ENDIAN_BITFIELD */
458#define ELS_CMD_MASK 0xffff
459#define ELS_RSP_MASK 0xff
460#define ELS_CMD_LS_RJT 0x01
461#define ELS_CMD_ACC 0x02
462#define ELS_CMD_PLOGI 0x03
463#define ELS_CMD_FLOGI 0x04
464#define ELS_CMD_LOGO 0x05
465#define ELS_CMD_ABTX 0x06
466#define ELS_CMD_RCS 0x07
467#define ELS_CMD_RES 0x08
468#define ELS_CMD_RSS 0x09
469#define ELS_CMD_RSI 0x0A
470#define ELS_CMD_ESTS 0x0B
471#define ELS_CMD_ESTC 0x0C
472#define ELS_CMD_ADVC 0x0D
473#define ELS_CMD_RTV 0x0E
474#define ELS_CMD_RLS 0x0F
475#define ELS_CMD_ECHO 0x10
476#define ELS_CMD_TEST 0x11
477#define ELS_CMD_RRQ 0x12
478#define ELS_CMD_PRLI 0x14001020
479#define ELS_CMD_PRLO 0x14001021
480#define ELS_CMD_PDISC 0x50
481#define ELS_CMD_FDISC 0x51
482#define ELS_CMD_ADISC 0x52
483#define ELS_CMD_FARP 0x54
484#define ELS_CMD_FARPR 0x55
485#define ELS_CMD_FAN 0x60
486#define ELS_CMD_RSCN 0x0461
487#define ELS_CMD_SCR 0x62
488#define ELS_CMD_RNID 0x78
489#endif
490
491/*
492 * LS_RJT Payload Definition
493 */
494
495struct ls_rjt { /* Structure is in Big Endian format */
496 union {
497 uint32_t lsRjtError;
498 struct {
499 uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
500
501 uint8_t lsRjtRsnCode; /* FC Word 0, bit 16:23 */
502 /* LS_RJT reason codes */
503#define LSRJT_INVALID_CMD 0x01
504#define LSRJT_LOGICAL_ERR 0x03
505#define LSRJT_LOGICAL_BSY 0x05
506#define LSRJT_PROTOCOL_ERR 0x07
507#define LSRJT_UNABLE_TPC 0x09 /* Unable to perform command */
508#define LSRJT_CMD_UNSUPPORTED 0x0B
509#define LSRJT_VENDOR_UNIQUE 0xFF /* See Byte 3 */
510
511 uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */
512 /* LS_RJT reason explanation */
513#define LSEXP_NOTHING_MORE 0x00
514#define LSEXP_SPARM_OPTIONS 0x01
515#define LSEXP_SPARM_ICTL 0x03
516#define LSEXP_SPARM_RCTL 0x05
517#define LSEXP_SPARM_RCV_SIZE 0x07
518#define LSEXP_SPARM_CONCUR_SEQ 0x09
519#define LSEXP_SPARM_CREDIT 0x0B
520#define LSEXP_INVALID_PNAME 0x0D
521#define LSEXP_INVALID_NNAME 0x0E
522#define LSEXP_INVALID_CSP 0x0F
523#define LSEXP_INVALID_ASSOC_HDR 0x11
524#define LSEXP_ASSOC_HDR_REQ 0x13
525#define LSEXP_INVALID_O_SID 0x15
526#define LSEXP_INVALID_OX_RX 0x17
527#define LSEXP_CMD_IN_PROGRESS 0x19
528#define LSEXP_INVALID_NPORT_ID 0x1F
529#define LSEXP_INVALID_SEQ_ID 0x21
530#define LSEXP_INVALID_XCHG 0x23
531#define LSEXP_INACTIVE_XCHG 0x25
532#define LSEXP_RQ_REQUIRED 0x27
533#define LSEXP_OUT_OF_RESOURCE 0x29
534#define LSEXP_CANT_GIVE_DATA 0x2A
535#define LSEXP_REQ_UNSUPPORTED 0x2C
536 uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
537 } b;
538 } un;
539};
540
541/*
542 * N_Port Login (FLOGO/PLOGO Request) Payload Definition
543 */
544
545typedef struct _LOGO { /* Structure is in Big Endian format */
546 union {
547 uint32_t nPortId32; /* Access nPortId as a word */
548 struct {
549 uint8_t word1Reserved1; /* FC Word 1, bit 31:24 */
550 uint8_t nPortIdByte0; /* N_port ID bit 16:23 */
551 uint8_t nPortIdByte1; /* N_port ID bit 8:15 */
552 uint8_t nPortIdByte2; /* N_port ID bit 0: 7 */
553 } b;
554 } un;
555 struct lpfc_name portName; /* N_port name field */
556} LOGO;
557
558/*
559 * FCP Login (PRLI Request / ACC) Payload Definition
560 */
561
562#define PRLX_PAGE_LEN 0x10
563#define TPRLO_PAGE_LEN 0x14
564
565typedef struct _PRLI { /* Structure is in Big Endian format */
566 uint8_t prliType; /* FC Parm Word 0, bit 24:31 */
567
568#define PRLI_FCP_TYPE 0x08
569 uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
570
571#ifdef __BIG_ENDIAN_BITFIELD
572 uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
573 uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
574 uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
575
576 /* ACC = imagePairEstablished */
577 uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
578 uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
579#else /* __LITTLE_ENDIAN_BITFIELD */
580 uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
581 uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
582 uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
583 uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
584 uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
585 /* ACC = imagePairEstablished */
586#endif
587
588#define PRLI_REQ_EXECUTED 0x1 /* acceptRspCode */
589#define PRLI_NO_RESOURCES 0x2
590#define PRLI_INIT_INCOMPLETE 0x3
591#define PRLI_NO_SUCH_PA 0x4
592#define PRLI_PREDEF_CONFIG 0x5
593#define PRLI_PARTIAL_SUCCESS 0x6
594#define PRLI_INVALID_PAGE_CNT 0x7
595 uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
596
597 uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
598
599 uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
600
601 uint8_t word3Reserved1; /* FC Parm Word 3, bit 24:31 */
602 uint8_t word3Reserved2; /* FC Parm Word 3, bit 16:23 */
603
604#ifdef __BIG_ENDIAN_BITFIELD
605 uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
606 uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
607 uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
608 uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
609 uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
610 uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
611 uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
612 uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
613 uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
614 uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
615 uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
616 uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
617 uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
618 uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
619 uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
620 uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
621#else /* __LITTLE_ENDIAN_BITFIELD */
622 uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
623 uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
624 uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
625 uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
626 uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
627 uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
628 uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
629 uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
630 uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
631 uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
632 uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
633 uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
634 uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
635 uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
636 uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
637 uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
638#endif
639} PRLI;
640
641/*
642 * FCP Logout (PRLO Request / ACC) Payload Definition
643 */
644
645typedef struct _PRLO { /* Structure is in Big Endian format */
646 uint8_t prloType; /* FC Parm Word 0, bit 24:31 */
647
648#define PRLO_FCP_TYPE 0x08
649 uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
650
651#ifdef __BIG_ENDIAN_BITFIELD
652 uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
653 uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
654 uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
655 uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
656#else /* __LITTLE_ENDIAN_BITFIELD */
657 uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
658 uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
659 uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
660 uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
661#endif
662
663#define PRLO_REQ_EXECUTED 0x1 /* acceptRspCode */
664#define PRLO_NO_SUCH_IMAGE 0x4
665#define PRLO_INVALID_PAGE_CNT 0x7
666
667 uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
668
669 uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
670
671 uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
672
673 uint32_t word3Reserved1; /* FC Parm Word 3, bit 0:31 */
674} PRLO;
675
676typedef struct _ADISC { /* Structure is in Big Endian format */
677 uint32_t hardAL_PA;
678 struct lpfc_name portName;
679 struct lpfc_name nodeName;
680 uint32_t DID;
681} ADISC;
682
683typedef struct _FARP { /* Structure is in Big Endian format */
684 uint32_t Mflags:8;
685 uint32_t Odid:24;
686#define FARP_NO_ACTION 0 /* FARP information enclosed, no
687 action */
688#define FARP_MATCH_PORT 0x1 /* Match on Responder Port Name */
689#define FARP_MATCH_NODE 0x2 /* Match on Responder Node Name */
690#define FARP_MATCH_IP 0x4 /* Match on IP address, not supported */
691#define FARP_MATCH_IPV4 0x5 /* Match on IPV4 address, not
692 supported */
693#define FARP_MATCH_IPV6 0x6 /* Match on IPV6 address, not
694 supported */
695 uint32_t Rflags:8;
696 uint32_t Rdid:24;
697#define FARP_REQUEST_PLOGI 0x1 /* Request for PLOGI */
698#define FARP_REQUEST_FARPR 0x2 /* Request for FARP Response */
699 struct lpfc_name OportName;
700 struct lpfc_name OnodeName;
701 struct lpfc_name RportName;
702 struct lpfc_name RnodeName;
703 uint8_t Oipaddr[16];
704 uint8_t Ripaddr[16];
705} FARP;
706
707typedef struct _FAN { /* Structure is in Big Endian format */
708 uint32_t Fdid;
709 struct lpfc_name FportName;
710 struct lpfc_name FnodeName;
711} FAN;
712
713typedef struct _SCR { /* Structure is in Big Endian format */
714 uint8_t resvd1;
715 uint8_t resvd2;
716 uint8_t resvd3;
717 uint8_t Function;
718#define SCR_FUNC_FABRIC 0x01
719#define SCR_FUNC_NPORT 0x02
720#define SCR_FUNC_FULL 0x03
721#define SCR_CLEAR 0xff
722} SCR;
723
724typedef struct _RNID_TOP_DISC {
725 struct lpfc_name portName;
726 uint8_t resvd[8];
727 uint32_t unitType;
728#define RNID_HBA 0x7
729#define RNID_HOST 0xa
730#define RNID_DRIVER 0xd
731 uint32_t physPort;
732 uint32_t attachedNodes;
733 uint16_t ipVersion;
734#define RNID_IPV4 0x1
735#define RNID_IPV6 0x2
736 uint16_t UDPport;
737 uint8_t ipAddr[16];
738 uint16_t resvd1;
739 uint16_t flags;
740#define RNID_TD_SUPPORT 0x1
741#define RNID_LP_VALID 0x2
742} RNID_TOP_DISC;
743
744typedef struct _RNID { /* Structure is in Big Endian format */
745 uint8_t Format;
746#define RNID_TOPOLOGY_DISC 0xdf
747 uint8_t CommonLen;
748 uint8_t resvd1;
749 uint8_t SpecificLen;
750 struct lpfc_name portName;
751 struct lpfc_name nodeName;
752 union {
753 RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
754 } un;
755} RNID;
756
757typedef struct _RRQ { /* Structure is in Big Endian format */
758 uint32_t SID;
759 uint16_t Oxid;
760 uint16_t Rxid;
761 uint8_t resv[32]; /* optional association hdr */
762} RRQ;
763
764/* This is used for RSCN command */
765typedef struct _D_ID { /* Structure is in Big Endian format */
766 union {
767 uint32_t word;
768 struct {
769#ifdef __BIG_ENDIAN_BITFIELD
770 uint8_t resv;
771 uint8_t domain;
772 uint8_t area;
773 uint8_t id;
774#else /* __LITTLE_ENDIAN_BITFIELD */
775 uint8_t id;
776 uint8_t area;
777 uint8_t domain;
778 uint8_t resv;
779#endif
780 } b;
781 } un;
782} D_ID;
783
784/*
785 * Structure to define all ELS Payload types
786 */
787
788typedef struct _ELS_PKT { /* Structure is in Big Endian format */
789 uint8_t elsCode; /* FC Word 0, bit 24:31 */
790 uint8_t elsByte1;
791 uint8_t elsByte2;
792 uint8_t elsByte3;
793 union {
794 struct ls_rjt lsRjt; /* Payload for LS_RJT ELS response */
795 struct serv_parm logi; /* Payload for PLOGI/FLOGI/PDISC/ACC */
796 LOGO logo; /* Payload for PLOGO/FLOGO/ACC */
797 PRLI prli; /* Payload for PRLI/ACC */
798 PRLO prlo; /* Payload for PRLO/ACC */
799 ADISC adisc; /* Payload for ADISC/ACC */
800 FARP farp; /* Payload for FARP/ACC */
801 FAN fan; /* Payload for FAN */
802 SCR scr; /* Payload for SCR/ACC */
803 RRQ rrq; /* Payload for RRQ */
804 RNID rnid; /* Payload for RNID */
805 uint8_t pad[128 - 4]; /* Pad out to payload of 128 bytes */
806 } un;
807} ELS_PKT;
808
809/*
810 * FDMI
811 * HBA MAnagement Operations Command Codes
812 */
813#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
814#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
815#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
816#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
817#define SLI_MGMT_RHBA 0x200 /* Register HBA */
818#define SLI_MGMT_RHAT 0x201 /* Register HBA atttributes */
819#define SLI_MGMT_RPRT 0x210 /* Register Port */
820#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
821#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
822#define SLI_MGMT_DPRT 0x310 /* De-register Port */
823
824/*
825 * Management Service Subtypes
826 */
827#define SLI_CT_FDMI_Subtypes 0x10
828
829/*
830 * HBA Management Service Reject Code
831 */
832#define REJECT_CODE 0x9 /* Unable to perform command request */
833
834/*
835 * HBA Management Service Reject Reason Code
836 * Please refer to the Reason Codes above
837 */
838
839/*
840 * HBA Attribute Types
841 */
842#define NODE_NAME 0x1
843#define MANUFACTURER 0x2
844#define SERIAL_NUMBER 0x3
845#define MODEL 0x4
846#define MODEL_DESCRIPTION 0x5
847#define HARDWARE_VERSION 0x6
848#define DRIVER_VERSION 0x7
849#define OPTION_ROM_VERSION 0x8
850#define FIRMWARE_VERSION 0x9
851#define OS_NAME_VERSION 0xa
852#define MAX_CT_PAYLOAD_LEN 0xb
853
854/*
855 * Port Attrubute Types
856 */
857#define SUPPORTED_FC4_TYPES 0x1
858#define SUPPORTED_SPEED 0x2
859#define PORT_SPEED 0x3
860#define MAX_FRAME_SIZE 0x4
861#define OS_DEVICE_NAME 0x5
862#define HOST_NAME 0x6
863
864union AttributesDef {
865 /* Structure is in Big Endian format */
866 struct {
867 uint32_t AttrType:16;
868 uint32_t AttrLen:16;
869 } bits;
870 uint32_t word;
871};
872
873
874/*
875 * HBA Attribute Entry (8 - 260 bytes)
876 */
877typedef struct {
878 union AttributesDef ad;
879 union {
880 uint32_t VendorSpecific;
881 uint8_t Manufacturer[64];
882 uint8_t SerialNumber[64];
883 uint8_t Model[256];
884 uint8_t ModelDescription[256];
885 uint8_t HardwareVersion[256];
886 uint8_t DriverVersion[256];
887 uint8_t OptionROMVersion[256];
888 uint8_t FirmwareVersion[256];
889 struct lpfc_name NodeName;
890 uint8_t SupportFC4Types[32];
891 uint32_t SupportSpeed;
892 uint32_t PortSpeed;
893 uint32_t MaxFrameSize;
894 uint8_t OsDeviceName[256];
895 uint8_t OsNameVersion[256];
896 uint32_t MaxCTPayloadLen;
897 uint8_t HostName[256];
898 } un;
899} ATTRIBUTE_ENTRY;
900
901/*
902 * HBA Attribute Block
903 */
904typedef struct {
905 uint32_t EntryCnt; /* Number of HBA attribute entries */
906 ATTRIBUTE_ENTRY Entry; /* Variable-length array */
907} ATTRIBUTE_BLOCK;
908
909/*
910 * Port Entry
911 */
912typedef struct {
913 struct lpfc_name PortName;
914} PORT_ENTRY;
915
916/*
917 * HBA Identifier
918 */
919typedef struct {
920 struct lpfc_name PortName;
921} HBA_IDENTIFIER;
922
923/*
924 * Registered Port List Format
925 */
926typedef struct {
927 uint32_t EntryCnt;
928 PORT_ENTRY pe; /* Variable-length array */
929} REG_PORT_LIST;
930
931/*
932 * Register HBA(RHBA)
933 */
934typedef struct {
935 HBA_IDENTIFIER hi;
936 REG_PORT_LIST rpl; /* variable-length array */
937/* ATTRIBUTE_BLOCK ab; */
938} REG_HBA;
939
940/*
941 * Register HBA Attributes (RHAT)
942 */
943typedef struct {
944 struct lpfc_name HBA_PortName;
945 ATTRIBUTE_BLOCK ab;
946} REG_HBA_ATTRIBUTE;
947
948/*
949 * Register Port Attributes (RPA)
950 */
951typedef struct {
952 struct lpfc_name PortName;
953 ATTRIBUTE_BLOCK ab;
954} REG_PORT_ATTRIBUTE;
955
956/*
957 * Get Registered HBA List (GRHL) Accept Payload Format
958 */
959typedef struct {
960 uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
961 struct lpfc_name HBA_PortName; /* Variable-length array */
962} GRHL_ACC_PAYLOAD;
963
964/*
965 * Get Registered Port List (GRPL) Accept Payload Format
966 */
967typedef struct {
968 uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */
969 PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */
970} GRPL_ACC_PAYLOAD;
971
972/*
973 * Get Port Attributes (GPAT) Accept Payload Format
974 */
975
976typedef struct {
977 ATTRIBUTE_BLOCK pab;
978} GPAT_ACC_PAYLOAD;
979
980
981/*
982 * Begin HBA configuration parameters.
983 * The PCI configuration register BAR assignments are:
984 * BAR0, offset 0x10 - SLIM base memory address
985 * BAR1, offset 0x14 - SLIM base memory high address
986 * BAR2, offset 0x18 - REGISTER base memory address
987 * BAR3, offset 0x1c - REGISTER base memory high address
988 * BAR4, offset 0x20 - BIU I/O registers
989 * BAR5, offset 0x24 - REGISTER base io high address
990 */
991
992/* Number of rings currently used and available. */
993#define MAX_CONFIGURED_RINGS 3
994#define MAX_RINGS 4
995
996/* IOCB / Mailbox is owned by FireFly */
997#define OWN_CHIP 1
998
999/* IOCB / Mailbox is owned by Host */
1000#define OWN_HOST 0
1001
1002/* Number of 4-byte words in an IOCB. */
1003#define IOCB_WORD_SZ 8
1004
1005/* defines for type field in fc header */
1006#define FC_ELS_DATA 0x1
1007#define FC_LLC_SNAP 0x5
1008#define FC_FCP_DATA 0x8
1009#define FC_COMMON_TRANSPORT_ULP 0x20
1010
1011/* defines for rctl field in fc header */
1012#define FC_DEV_DATA 0x0
1013#define FC_UNSOL_CTL 0x2
1014#define FC_SOL_CTL 0x3
1015#define FC_UNSOL_DATA 0x4
1016#define FC_FCP_CMND 0x6
1017#define FC_ELS_REQ 0x22
1018#define FC_ELS_RSP 0x23
1019
1020/* network headers for Dfctl field */
1021#define FC_NET_HDR 0x20
1022
1023/* Start FireFly Register definitions */
1024#define PCI_VENDOR_ID_EMULEX 0x10df
1025#define PCI_DEVICE_ID_FIREFLY 0x1ae5
1026#define PCI_DEVICE_ID_SUPERFLY 0xf700
1027#define PCI_DEVICE_ID_DRAGONFLY 0xf800
1028#define PCI_DEVICE_ID_RFLY 0xf095
1029#define PCI_DEVICE_ID_PFLY 0xf098
1030#define PCI_DEVICE_ID_TFLY 0xf0a5
1031#define PCI_DEVICE_ID_CENTAUR 0xf900
1032#define PCI_DEVICE_ID_PEGASUS 0xf980
1033#define PCI_DEVICE_ID_THOR 0xfa00
1034#define PCI_DEVICE_ID_VIPER 0xfb00
1035#define PCI_DEVICE_ID_HELIOS 0xfd00
1036#define PCI_DEVICE_ID_BMID 0xf0d5
1037#define PCI_DEVICE_ID_BSMB 0xf0d1
1038#define PCI_DEVICE_ID_ZEPHYR 0xfe00
1039#define PCI_DEVICE_ID_ZMID 0xf0e5
1040#define PCI_DEVICE_ID_ZSMB 0xf0e1
1041#define PCI_DEVICE_ID_LP101 0xf0a1
1042#define PCI_DEVICE_ID_LP10000S 0xfc00
1043
1044#define JEDEC_ID_ADDRESS 0x0080001c
1045#define FIREFLY_JEDEC_ID 0x1ACC
1046#define SUPERFLY_JEDEC_ID 0x0020
1047#define DRAGONFLY_JEDEC_ID 0x0021
1048#define DRAGONFLY_V2_JEDEC_ID 0x0025
1049#define CENTAUR_2G_JEDEC_ID 0x0026
1050#define CENTAUR_1G_JEDEC_ID 0x0028
1051#define PEGASUS_ORION_JEDEC_ID 0x0036
1052#define PEGASUS_JEDEC_ID 0x0038
1053#define THOR_JEDEC_ID 0x0012
1054#define HELIOS_JEDEC_ID 0x0364
1055#define ZEPHYR_JEDEC_ID 0x0577
1056#define VIPER_JEDEC_ID 0x4838
1057
1058#define JEDEC_ID_MASK 0x0FFFF000
1059#define JEDEC_ID_SHIFT 12
1060#define FC_JEDEC_ID(id) ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT)
1061
1062typedef struct { /* FireFly BIU registers */
1063 uint32_t hostAtt; /* See definitions for Host Attention
1064 register */
1065 uint32_t chipAtt; /* See definitions for Chip Attention
1066 register */
1067 uint32_t hostStatus; /* See definitions for Host Status register */
1068 uint32_t hostControl; /* See definitions for Host Control register */
1069 uint32_t buiConfig; /* See definitions for BIU configuration
1070 register */
1071} FF_REGS;
1072
1073/* IO Register size in bytes */
1074#define FF_REG_AREA_SIZE 256
1075
1076/* Host Attention Register */
1077
1078#define HA_REG_OFFSET 0 /* Byte offset from register base address */
1079
1080#define HA_R0RE_REQ 0x00000001 /* Bit 0 */
1081#define HA_R0CE_RSP 0x00000002 /* Bit 1 */
1082#define HA_R0ATT 0x00000008 /* Bit 3 */
1083#define HA_R1RE_REQ 0x00000010 /* Bit 4 */
1084#define HA_R1CE_RSP 0x00000020 /* Bit 5 */
1085#define HA_R1ATT 0x00000080 /* Bit 7 */
1086#define HA_R2RE_REQ 0x00000100 /* Bit 8 */
1087#define HA_R2CE_RSP 0x00000200 /* Bit 9 */
1088#define HA_R2ATT 0x00000800 /* Bit 11 */
1089#define HA_R3RE_REQ 0x00001000 /* Bit 12 */
1090#define HA_R3CE_RSP 0x00002000 /* Bit 13 */
1091#define HA_R3ATT 0x00008000 /* Bit 15 */
1092#define HA_LATT 0x20000000 /* Bit 29 */
1093#define HA_MBATT 0x40000000 /* Bit 30 */
1094#define HA_ERATT 0x80000000 /* Bit 31 */
1095
1096#define HA_RXRE_REQ 0x00000001 /* Bit 0 */
1097#define HA_RXCE_RSP 0x00000002 /* Bit 1 */
1098#define HA_RXATT 0x00000008 /* Bit 3 */
1099#define HA_RXMASK 0x0000000f
1100
1101/* Chip Attention Register */
1102
1103#define CA_REG_OFFSET 4 /* Byte offset from register base address */
1104
1105#define CA_R0CE_REQ 0x00000001 /* Bit 0 */
1106#define CA_R0RE_RSP 0x00000002 /* Bit 1 */
1107#define CA_R0ATT 0x00000008 /* Bit 3 */
1108#define CA_R1CE_REQ 0x00000010 /* Bit 4 */
1109#define CA_R1RE_RSP 0x00000020 /* Bit 5 */
1110#define CA_R1ATT 0x00000080 /* Bit 7 */
1111#define CA_R2CE_REQ 0x00000100 /* Bit 8 */
1112#define CA_R2RE_RSP 0x00000200 /* Bit 9 */
1113#define CA_R2ATT 0x00000800 /* Bit 11 */
1114#define CA_R3CE_REQ 0x00001000 /* Bit 12 */
1115#define CA_R3RE_RSP 0x00002000 /* Bit 13 */
1116#define CA_R3ATT 0x00008000 /* Bit 15 */
1117#define CA_MBATT 0x40000000 /* Bit 30 */
1118
1119/* Host Status Register */
1120
1121#define HS_REG_OFFSET 8 /* Byte offset from register base address */
1122
1123#define HS_MBRDY 0x00400000 /* Bit 22 */
1124#define HS_FFRDY 0x00800000 /* Bit 23 */
1125#define HS_FFER8 0x01000000 /* Bit 24 */
1126#define HS_FFER7 0x02000000 /* Bit 25 */
1127#define HS_FFER6 0x04000000 /* Bit 26 */
1128#define HS_FFER5 0x08000000 /* Bit 27 */
1129#define HS_FFER4 0x10000000 /* Bit 28 */
1130#define HS_FFER3 0x20000000 /* Bit 29 */
1131#define HS_FFER2 0x40000000 /* Bit 30 */
1132#define HS_FFER1 0x80000000 /* Bit 31 */
1133#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */
1134
1135/* Host Control Register */
1136
1137#define HC_REG_OFFSET 12 /* Word offset from register base address */
1138
1139#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
1140#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
1141#define HC_R1INT_ENA 0x00000004 /* Bit 2 */
1142#define HC_R2INT_ENA 0x00000008 /* Bit 3 */
1143#define HC_R3INT_ENA 0x00000010 /* Bit 4 */
1144#define HC_INITHBI 0x02000000 /* Bit 25 */
1145#define HC_INITMB 0x04000000 /* Bit 26 */
1146#define HC_INITFF 0x08000000 /* Bit 27 */
1147#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
1148#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
1149
1150/* Mailbox Commands */
1151#define MBX_SHUTDOWN 0x00 /* terminate testing */
1152#define MBX_LOAD_SM 0x01
1153#define MBX_READ_NV 0x02
1154#define MBX_WRITE_NV 0x03
1155#define MBX_RUN_BIU_DIAG 0x04
1156#define MBX_INIT_LINK 0x05
1157#define MBX_DOWN_LINK 0x06
1158#define MBX_CONFIG_LINK 0x07
1159#define MBX_CONFIG_RING 0x09
1160#define MBX_RESET_RING 0x0A
1161#define MBX_READ_CONFIG 0x0B
1162#define MBX_READ_RCONFIG 0x0C
1163#define MBX_READ_SPARM 0x0D
1164#define MBX_READ_STATUS 0x0E
1165#define MBX_READ_RPI 0x0F
1166#define MBX_READ_XRI 0x10
1167#define MBX_READ_REV 0x11
1168#define MBX_READ_LNK_STAT 0x12
1169#define MBX_REG_LOGIN 0x13
1170#define MBX_UNREG_LOGIN 0x14
1171#define MBX_READ_LA 0x15
1172#define MBX_CLEAR_LA 0x16
1173#define MBX_DUMP_MEMORY 0x17
1174#define MBX_DUMP_CONTEXT 0x18
1175#define MBX_RUN_DIAGS 0x19
1176#define MBX_RESTART 0x1A
1177#define MBX_UPDATE_CFG 0x1B
1178#define MBX_DOWN_LOAD 0x1C
1179#define MBX_DEL_LD_ENTRY 0x1D
1180#define MBX_RUN_PROGRAM 0x1E
1181#define MBX_SET_MASK 0x20
1182#define MBX_SET_SLIM 0x21
1183#define MBX_UNREG_D_ID 0x23
1184#define MBX_CONFIG_FARP 0x25
1185
1186#define MBX_LOAD_AREA 0x81
1187#define MBX_RUN_BIU_DIAG64 0x84
1188#define MBX_CONFIG_PORT 0x88
1189#define MBX_READ_SPARM64 0x8D
1190#define MBX_READ_RPI64 0x8F
1191#define MBX_REG_LOGIN64 0x93
1192#define MBX_READ_LA64 0x95
1193
1194#define MBX_FLASH_WR_ULA 0x98
1195#define MBX_SET_DEBUG 0x99
1196#define MBX_LOAD_EXP_ROM 0x9C
1197
1198#define MBX_MAX_CMDS 0x9D
1199#define MBX_SLI2_CMD_MASK 0x80
1200
1201/* IOCB Commands */
1202
1203#define CMD_RCV_SEQUENCE_CX 0x01
1204#define CMD_XMIT_SEQUENCE_CR 0x02
1205#define CMD_XMIT_SEQUENCE_CX 0x03
1206#define CMD_XMIT_BCAST_CN 0x04
1207#define CMD_XMIT_BCAST_CX 0x05
1208#define CMD_QUE_RING_BUF_CN 0x06
1209#define CMD_QUE_XRI_BUF_CX 0x07
1210#define CMD_IOCB_CONTINUE_CN 0x08
1211#define CMD_RET_XRI_BUF_CX 0x09
1212#define CMD_ELS_REQUEST_CR 0x0A
1213#define CMD_ELS_REQUEST_CX 0x0B
1214#define CMD_RCV_ELS_REQ_CX 0x0D
1215#define CMD_ABORT_XRI_CN 0x0E
1216#define CMD_ABORT_XRI_CX 0x0F
1217#define CMD_CLOSE_XRI_CN 0x10
1218#define CMD_CLOSE_XRI_CX 0x11
1219#define CMD_CREATE_XRI_CR 0x12
1220#define CMD_CREATE_XRI_CX 0x13
1221#define CMD_GET_RPI_CN 0x14
1222#define CMD_XMIT_ELS_RSP_CX 0x15
1223#define CMD_GET_RPI_CR 0x16
1224#define CMD_XRI_ABORTED_CX 0x17
1225#define CMD_FCP_IWRITE_CR 0x18
1226#define CMD_FCP_IWRITE_CX 0x19
1227#define CMD_FCP_IREAD_CR 0x1A
1228#define CMD_FCP_IREAD_CX 0x1B
1229#define CMD_FCP_ICMND_CR 0x1C
1230#define CMD_FCP_ICMND_CX 0x1D
1231
1232#define CMD_ADAPTER_MSG 0x20
1233#define CMD_ADAPTER_DUMP 0x22
1234
1235/* SLI_2 IOCB Command Set */
1236
1237#define CMD_RCV_SEQUENCE64_CX 0x81
1238#define CMD_XMIT_SEQUENCE64_CR 0x82
1239#define CMD_XMIT_SEQUENCE64_CX 0x83
1240#define CMD_XMIT_BCAST64_CN 0x84
1241#define CMD_XMIT_BCAST64_CX 0x85
1242#define CMD_QUE_RING_BUF64_CN 0x86
1243#define CMD_QUE_XRI_BUF64_CX 0x87
1244#define CMD_IOCB_CONTINUE64_CN 0x88
1245#define CMD_RET_XRI_BUF64_CX 0x89
1246#define CMD_ELS_REQUEST64_CR 0x8A
1247#define CMD_ELS_REQUEST64_CX 0x8B
1248#define CMD_ABORT_MXRI64_CN 0x8C
1249#define CMD_RCV_ELS_REQ64_CX 0x8D
1250#define CMD_XMIT_ELS_RSP64_CX 0x95
1251#define CMD_FCP_IWRITE64_CR 0x98
1252#define CMD_FCP_IWRITE64_CX 0x99
1253#define CMD_FCP_IREAD64_CR 0x9A
1254#define CMD_FCP_IREAD64_CX 0x9B
1255#define CMD_FCP_ICMND64_CR 0x9C
1256#define CMD_FCP_ICMND64_CX 0x9D
1257
1258#define CMD_GEN_REQUEST64_CR 0xC2
1259#define CMD_GEN_REQUEST64_CX 0xC3
1260
1261#define CMD_MAX_IOCB_CMD 0xE6
1262#define CMD_IOCB_MASK 0xff
1263
1264#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
1265 iocb */
1266#define LPFC_MAX_ADPTMSG 32 /* max msg data */
1267/*
1268 * Define Status
1269 */
1270#define MBX_SUCCESS 0
1271#define MBXERR_NUM_RINGS 1
1272#define MBXERR_NUM_IOCBS 2
1273#define MBXERR_IOCBS_EXCEEDED 3
1274#define MBXERR_BAD_RING_NUMBER 4
1275#define MBXERR_MASK_ENTRIES_RANGE 5
1276#define MBXERR_MASKS_EXCEEDED 6
1277#define MBXERR_BAD_PROFILE 7
1278#define MBXERR_BAD_DEF_CLASS 8
1279#define MBXERR_BAD_MAX_RESPONDER 9
1280#define MBXERR_BAD_MAX_ORIGINATOR 10
1281#define MBXERR_RPI_REGISTERED 11
1282#define MBXERR_RPI_FULL 12
1283#define MBXERR_NO_RESOURCES 13
1284#define MBXERR_BAD_RCV_LENGTH 14
1285#define MBXERR_DMA_ERROR 15
1286#define MBXERR_ERROR 16
1287#define MBX_NOT_FINISHED 255
1288
1289#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
1290#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
1291
1292/*
1293 * Begin Structure Definitions for Mailbox Commands
1294 */
1295
1296typedef struct {
1297#ifdef __BIG_ENDIAN_BITFIELD
1298 uint8_t tval;
1299 uint8_t tmask;
1300 uint8_t rval;
1301 uint8_t rmask;
1302#else /* __LITTLE_ENDIAN_BITFIELD */
1303 uint8_t rmask;
1304 uint8_t rval;
1305 uint8_t tmask;
1306 uint8_t tval;
1307#endif
1308} RR_REG;
1309
1310struct ulp_bde {
1311 uint32_t bdeAddress;
1312#ifdef __BIG_ENDIAN_BITFIELD
1313 uint32_t bdeReserved:4;
1314 uint32_t bdeAddrHigh:4;
1315 uint32_t bdeSize:24;
1316#else /* __LITTLE_ENDIAN_BITFIELD */
1317 uint32_t bdeSize:24;
1318 uint32_t bdeAddrHigh:4;
1319 uint32_t bdeReserved:4;
1320#endif
1321};
1322
1323struct ulp_bde64 { /* SLI-2 */
1324 union ULP_BDE_TUS {
1325 uint32_t w;
1326 struct {
1327#ifdef __BIG_ENDIAN_BITFIELD
1328 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1329 VALUE !! */
1330 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1331#else /* __LITTLE_ENDIAN_BITFIELD */
1332 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1333 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1334 VALUE !! */
1335#endif
1336
1337#define BUFF_USE_RSVD 0x01 /* bdeFlags */
1338#define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */
1339#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */
1340#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit
1341 buffer */
1342#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit
1343 addr */
1344#define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */
1345#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */
1346#define BUFF_TYPE_INVALID 0x80 /* "" "" */
1347 } f;
1348 } tus;
1349 uint32_t addrLow;
1350 uint32_t addrHigh;
1351};
1352#define BDE64_SIZE_WORD 0
1353#define BPL64_SIZE_WORD 0x40
1354
1355typedef struct ULP_BDL { /* SLI-2 */
1356#ifdef __BIG_ENDIAN_BITFIELD
1357 uint32_t bdeFlags:8; /* BDL Flags */
1358 uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
1359#else /* __LITTLE_ENDIAN_BITFIELD */
1360 uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
1361 uint32_t bdeFlags:8; /* BDL Flags */
1362#endif
1363
1364 uint32_t addrLow; /* Address 0:31 */
1365 uint32_t addrHigh; /* Address 32:63 */
1366 uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
1367} ULP_BDL;
1368
1369/* Structure for MB Command LOAD_SM and DOWN_LOAD */
1370
1371typedef struct {
1372#ifdef __BIG_ENDIAN_BITFIELD
1373 uint32_t rsvd2:25;
1374 uint32_t acknowledgment:1;
1375 uint32_t version:1;
1376 uint32_t erase_or_prog:1;
1377 uint32_t update_flash:1;
1378 uint32_t update_ram:1;
1379 uint32_t method:1;
1380 uint32_t load_cmplt:1;
1381#else /* __LITTLE_ENDIAN_BITFIELD */
1382 uint32_t load_cmplt:1;
1383 uint32_t method:1;
1384 uint32_t update_ram:1;
1385 uint32_t update_flash:1;
1386 uint32_t erase_or_prog:1;
1387 uint32_t version:1;
1388 uint32_t acknowledgment:1;
1389 uint32_t rsvd2:25;
1390#endif
1391
1392 uint32_t dl_to_adr_low;
1393 uint32_t dl_to_adr_high;
1394 uint32_t dl_len;
1395 union {
1396 uint32_t dl_from_mbx_offset;
1397 struct ulp_bde dl_from_bde;
1398 struct ulp_bde64 dl_from_bde64;
1399 } un;
1400
1401} LOAD_SM_VAR;
1402
1403/* Structure for MB Command READ_NVPARM (02) */
1404
1405typedef struct {
1406 uint32_t rsvd1[3]; /* Read as all one's */
1407 uint32_t rsvd2; /* Read as all zero's */
1408 uint32_t portname[2]; /* N_PORT name */
1409 uint32_t nodename[2]; /* NODE name */
1410
1411#ifdef __BIG_ENDIAN_BITFIELD
1412 uint32_t pref_DID:24;
1413 uint32_t hardAL_PA:8;
1414#else /* __LITTLE_ENDIAN_BITFIELD */
1415 uint32_t hardAL_PA:8;
1416 uint32_t pref_DID:24;
1417#endif
1418
1419 uint32_t rsvd3[21]; /* Read as all one's */
1420} READ_NV_VAR;
1421
1422/* Structure for MB Command WRITE_NVPARMS (03) */
1423
1424typedef struct {
1425 uint32_t rsvd1[3]; /* Must be all one's */
1426 uint32_t rsvd2; /* Must be all zero's */
1427 uint32_t portname[2]; /* N_PORT name */
1428 uint32_t nodename[2]; /* NODE name */
1429
1430#ifdef __BIG_ENDIAN_BITFIELD
1431 uint32_t pref_DID:24;
1432 uint32_t hardAL_PA:8;
1433#else /* __LITTLE_ENDIAN_BITFIELD */
1434 uint32_t hardAL_PA:8;
1435 uint32_t pref_DID:24;
1436#endif
1437
1438 uint32_t rsvd3[21]; /* Must be all one's */
1439} WRITE_NV_VAR;
1440
1441/* Structure for MB Command RUN_BIU_DIAG (04) */
1442/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */
1443
1444typedef struct {
1445 uint32_t rsvd1;
1446 union {
1447 struct {
1448 struct ulp_bde xmit_bde;
1449 struct ulp_bde rcv_bde;
1450 } s1;
1451 struct {
1452 struct ulp_bde64 xmit_bde64;
1453 struct ulp_bde64 rcv_bde64;
1454 } s2;
1455 } un;
1456} BIU_DIAG_VAR;
1457
1458/* Structure for MB Command INIT_LINK (05) */
1459
1460typedef struct {
1461#ifdef __BIG_ENDIAN_BITFIELD
1462 uint32_t rsvd1:24;
1463 uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
1464#else /* __LITTLE_ENDIAN_BITFIELD */
1465 uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
1466 uint32_t rsvd1:24;
1467#endif
1468
1469#ifdef __BIG_ENDIAN_BITFIELD
1470 uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
1471 uint8_t rsvd2;
1472 uint16_t link_flags;
1473#else /* __LITTLE_ENDIAN_BITFIELD */
1474 uint16_t link_flags;
1475 uint8_t rsvd2;
1476 uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
1477#endif
1478
1479#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
1480#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
1481#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
1482#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
1483#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
1484#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
1485
1486#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
1487#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
1488
1489 uint32_t link_speed;
1490#define LINK_SPEED_AUTO 0 /* Auto selection */
1491#define LINK_SPEED_1G 1 /* 1 Gigabaud */
1492#define LINK_SPEED_2G 2 /* 2 Gigabaud */
1493#define LINK_SPEED_4G 4 /* 4 Gigabaud */
1494#define LINK_SPEED_8G 8 /* 4 Gigabaud */
1495#define LINK_SPEED_10G 16 /* 10 Gigabaud */
1496
1497} INIT_LINK_VAR;
1498
1499/* Structure for MB Command DOWN_LINK (06) */
1500
1501typedef struct {
1502 uint32_t rsvd1;
1503} DOWN_LINK_VAR;
1504
1505/* Structure for MB Command CONFIG_LINK (07) */
1506
1507typedef struct {
1508#ifdef __BIG_ENDIAN_BITFIELD
1509 uint32_t cr:1;
1510 uint32_t ci:1;
1511 uint32_t cr_delay:6;
1512 uint32_t cr_count:8;
1513 uint32_t rsvd1:8;
1514 uint32_t MaxBBC:8;
1515#else /* __LITTLE_ENDIAN_BITFIELD */
1516 uint32_t MaxBBC:8;
1517 uint32_t rsvd1:8;
1518 uint32_t cr_count:8;
1519 uint32_t cr_delay:6;
1520 uint32_t ci:1;
1521 uint32_t cr:1;
1522#endif
1523
1524 uint32_t myId;
1525 uint32_t rsvd2;
1526 uint32_t edtov;
1527 uint32_t arbtov;
1528 uint32_t ratov;
1529 uint32_t rttov;
1530 uint32_t altov;
1531 uint32_t crtov;
1532 uint32_t citov;
1533#ifdef __BIG_ENDIAN_BITFIELD
1534 uint32_t rrq_enable:1;
1535 uint32_t rrq_immed:1;
1536 uint32_t rsvd4:29;
1537 uint32_t ack0_enable:1;
1538#else /* __LITTLE_ENDIAN_BITFIELD */
1539 uint32_t ack0_enable:1;
1540 uint32_t rsvd4:29;
1541 uint32_t rrq_immed:1;
1542 uint32_t rrq_enable:1;
1543#endif
1544} CONFIG_LINK;
1545
1546/* Structure for MB Command PART_SLIM (08)
1547 * will be removed since SLI1 is no longer supported!
1548 */
1549typedef struct {
1550#ifdef __BIG_ENDIAN_BITFIELD
1551 uint16_t offCiocb;
1552 uint16_t numCiocb;
1553 uint16_t offRiocb;
1554 uint16_t numRiocb;
1555#else /* __LITTLE_ENDIAN_BITFIELD */
1556 uint16_t numCiocb;
1557 uint16_t offCiocb;
1558 uint16_t numRiocb;
1559 uint16_t offRiocb;
1560#endif
1561} RING_DEF;
1562
1563typedef struct {
1564#ifdef __BIG_ENDIAN_BITFIELD
1565 uint32_t unused1:24;
1566 uint32_t numRing:8;
1567#else /* __LITTLE_ENDIAN_BITFIELD */
1568 uint32_t numRing:8;
1569 uint32_t unused1:24;
1570#endif
1571
1572 RING_DEF ringdef[4];
1573 uint32_t hbainit;
1574} PART_SLIM_VAR;
1575
1576/* Structure for MB Command CONFIG_RING (09) */
1577
1578typedef struct {
1579#ifdef __BIG_ENDIAN_BITFIELD
1580 uint32_t unused2:6;
1581 uint32_t recvSeq:1;
1582 uint32_t recvNotify:1;
1583 uint32_t numMask:8;
1584 uint32_t profile:8;
1585 uint32_t unused1:4;
1586 uint32_t ring:4;
1587#else /* __LITTLE_ENDIAN_BITFIELD */
1588 uint32_t ring:4;
1589 uint32_t unused1:4;
1590 uint32_t profile:8;
1591 uint32_t numMask:8;
1592 uint32_t recvNotify:1;
1593 uint32_t recvSeq:1;
1594 uint32_t unused2:6;
1595#endif
1596
1597#ifdef __BIG_ENDIAN_BITFIELD
1598 uint16_t maxRespXchg;
1599 uint16_t maxOrigXchg;
1600#else /* __LITTLE_ENDIAN_BITFIELD */
1601 uint16_t maxOrigXchg;
1602 uint16_t maxRespXchg;
1603#endif
1604
1605 RR_REG rrRegs[6];
1606} CONFIG_RING_VAR;
1607
1608/* Structure for MB Command RESET_RING (10) */
1609
1610typedef struct {
1611 uint32_t ring_no;
1612} RESET_RING_VAR;
1613
1614/* Structure for MB Command READ_CONFIG (11) */
1615
1616typedef struct {
1617#ifdef __BIG_ENDIAN_BITFIELD
1618 uint32_t cr:1;
1619 uint32_t ci:1;
1620 uint32_t cr_delay:6;
1621 uint32_t cr_count:8;
1622 uint32_t InitBBC:8;
1623 uint32_t MaxBBC:8;
1624#else /* __LITTLE_ENDIAN_BITFIELD */
1625 uint32_t MaxBBC:8;
1626 uint32_t InitBBC:8;
1627 uint32_t cr_count:8;
1628 uint32_t cr_delay:6;
1629 uint32_t ci:1;
1630 uint32_t cr:1;
1631#endif
1632
1633#ifdef __BIG_ENDIAN_BITFIELD
1634 uint32_t topology:8;
1635 uint32_t myDid:24;
1636#else /* __LITTLE_ENDIAN_BITFIELD */
1637 uint32_t myDid:24;
1638 uint32_t topology:8;
1639#endif
1640
1641 /* Defines for topology (defined previously) */
1642#ifdef __BIG_ENDIAN_BITFIELD
1643 uint32_t AR:1;
1644 uint32_t IR:1;
1645 uint32_t rsvd1:29;
1646 uint32_t ack0:1;
1647#else /* __LITTLE_ENDIAN_BITFIELD */
1648 uint32_t ack0:1;
1649 uint32_t rsvd1:29;
1650 uint32_t IR:1;
1651 uint32_t AR:1;
1652#endif
1653
1654 uint32_t edtov;
1655 uint32_t arbtov;
1656 uint32_t ratov;
1657 uint32_t rttov;
1658 uint32_t altov;
1659 uint32_t lmt;
1660#define LMT_RESERVED 0x0 /* Not used */
1661#define LMT_266_10bit 0x1 /* 265.625 Mbaud 10 bit iface */
1662#define LMT_532_10bit 0x2 /* 531.25 Mbaud 10 bit iface */
1663#define LMT_1063_20bit 0x3 /* 1062.5 Mbaud 20 bit iface */
1664#define LMT_1063_10bit 0x4 /* 1062.5 Mbaud 10 bit iface */
1665#define LMT_2125_10bit 0x8 /* 2125 Mbaud 10 bit iface */
1666#define LMT_4250_10bit 0x40 /* 4250 Mbaud 10 bit iface */
1667
1668 uint32_t rsvd2;
1669 uint32_t rsvd3;
1670 uint32_t max_xri;
1671 uint32_t max_iocb;
1672 uint32_t max_rpi;
1673 uint32_t avail_xri;
1674 uint32_t avail_iocb;
1675 uint32_t avail_rpi;
1676 uint32_t default_rpi;
1677} READ_CONFIG_VAR;
1678
1679/* Structure for MB Command READ_RCONFIG (12) */
1680
1681typedef struct {
1682#ifdef __BIG_ENDIAN_BITFIELD
1683 uint32_t rsvd2:7;
1684 uint32_t recvNotify:1;
1685 uint32_t numMask:8;
1686 uint32_t profile:8;
1687 uint32_t rsvd1:4;
1688 uint32_t ring:4;
1689#else /* __LITTLE_ENDIAN_BITFIELD */
1690 uint32_t ring:4;
1691 uint32_t rsvd1:4;
1692 uint32_t profile:8;
1693 uint32_t numMask:8;
1694 uint32_t recvNotify:1;
1695 uint32_t rsvd2:7;
1696#endif
1697
1698#ifdef __BIG_ENDIAN_BITFIELD
1699 uint16_t maxResp;
1700 uint16_t maxOrig;
1701#else /* __LITTLE_ENDIAN_BITFIELD */
1702 uint16_t maxOrig;
1703 uint16_t maxResp;
1704#endif
1705
1706 RR_REG rrRegs[6];
1707
1708#ifdef __BIG_ENDIAN_BITFIELD
1709 uint16_t cmdRingOffset;
1710 uint16_t cmdEntryCnt;
1711 uint16_t rspRingOffset;
1712 uint16_t rspEntryCnt;
1713 uint16_t nextCmdOffset;
1714 uint16_t rsvd3;
1715 uint16_t nextRspOffset;
1716 uint16_t rsvd4;
1717#else /* __LITTLE_ENDIAN_BITFIELD */
1718 uint16_t cmdEntryCnt;
1719 uint16_t cmdRingOffset;
1720 uint16_t rspEntryCnt;
1721 uint16_t rspRingOffset;
1722 uint16_t rsvd3;
1723 uint16_t nextCmdOffset;
1724 uint16_t rsvd4;
1725 uint16_t nextRspOffset;
1726#endif
1727} READ_RCONF_VAR;
1728
1729/* Structure for MB Command READ_SPARM (13) */
1730/* Structure for MB Command READ_SPARM64 (0x8D) */
1731
1732typedef struct {
1733 uint32_t rsvd1;
1734 uint32_t rsvd2;
1735 union {
1736 struct ulp_bde sp; /* This BDE points to struct serv_parm
1737 structure */
1738 struct ulp_bde64 sp64;
1739 } un;
1740} READ_SPARM_VAR;
1741
1742/* Structure for MB Command READ_STATUS (14) */
1743
1744typedef struct {
1745#ifdef __BIG_ENDIAN_BITFIELD
1746 uint32_t rsvd1:31;
1747 uint32_t clrCounters:1;
1748 uint16_t activeXriCnt;
1749 uint16_t activeRpiCnt;
1750#else /* __LITTLE_ENDIAN_BITFIELD */
1751 uint32_t clrCounters:1;
1752 uint32_t rsvd1:31;
1753 uint16_t activeRpiCnt;
1754 uint16_t activeXriCnt;
1755#endif
1756
1757 uint32_t xmitByteCnt;
1758 uint32_t rcvByteCnt;
1759 uint32_t xmitFrameCnt;
1760 uint32_t rcvFrameCnt;
1761 uint32_t xmitSeqCnt;
1762 uint32_t rcvSeqCnt;
1763 uint32_t totalOrigExchanges;
1764 uint32_t totalRespExchanges;
1765 uint32_t rcvPbsyCnt;
1766 uint32_t rcvFbsyCnt;
1767} READ_STATUS_VAR;
1768
1769/* Structure for MB Command READ_RPI (15) */
1770/* Structure for MB Command READ_RPI64 (0x8F) */
1771
1772typedef struct {
1773#ifdef __BIG_ENDIAN_BITFIELD
1774 uint16_t nextRpi;
1775 uint16_t reqRpi;
1776 uint32_t rsvd2:8;
1777 uint32_t DID:24;
1778#else /* __LITTLE_ENDIAN_BITFIELD */
1779 uint16_t reqRpi;
1780 uint16_t nextRpi;
1781 uint32_t DID:24;
1782 uint32_t rsvd2:8;
1783#endif
1784
1785 union {
1786 struct ulp_bde sp;
1787 struct ulp_bde64 sp64;
1788 } un;
1789
1790} READ_RPI_VAR;
1791
1792/* Structure for MB Command READ_XRI (16) */
1793
1794typedef struct {
1795#ifdef __BIG_ENDIAN_BITFIELD
1796 uint16_t nextXri;
1797 uint16_t reqXri;
1798 uint16_t rsvd1;
1799 uint16_t rpi;
1800 uint32_t rsvd2:8;
1801 uint32_t DID:24;
1802 uint32_t rsvd3:8;
1803 uint32_t SID:24;
1804 uint32_t rsvd4;
1805 uint8_t seqId;
1806 uint8_t rsvd5;
1807 uint16_t seqCount;
1808 uint16_t oxId;
1809 uint16_t rxId;
1810 uint32_t rsvd6:30;
1811 uint32_t si:1;
1812 uint32_t exchOrig:1;
1813#else /* __LITTLE_ENDIAN_BITFIELD */
1814 uint16_t reqXri;
1815 uint16_t nextXri;
1816 uint16_t rpi;
1817 uint16_t rsvd1;
1818 uint32_t DID:24;
1819 uint32_t rsvd2:8;
1820 uint32_t SID:24;
1821 uint32_t rsvd3:8;
1822 uint32_t rsvd4;
1823 uint16_t seqCount;
1824 uint8_t rsvd5;
1825 uint8_t seqId;
1826 uint16_t rxId;
1827 uint16_t oxId;
1828 uint32_t exchOrig:1;
1829 uint32_t si:1;
1830 uint32_t rsvd6:30;
1831#endif
1832} READ_XRI_VAR;
1833
1834/* Structure for MB Command READ_REV (17) */
1835
1836typedef struct {
1837#ifdef __BIG_ENDIAN_BITFIELD
1838 uint32_t cv:1;
1839 uint32_t rr:1;
1840 uint32_t rsvd1:29;
1841 uint32_t rv:1;
1842#else /* __LITTLE_ENDIAN_BITFIELD */
1843 uint32_t rv:1;
1844 uint32_t rsvd1:29;
1845 uint32_t rr:1;
1846 uint32_t cv:1;
1847#endif
1848
1849 uint32_t biuRev;
1850 uint32_t smRev;
1851 union {
1852 uint32_t smFwRev;
1853 struct {
1854#ifdef __BIG_ENDIAN_BITFIELD
1855 uint8_t ProgType;
1856 uint8_t ProgId;
1857 uint16_t ProgVer:4;
1858 uint16_t ProgRev:4;
1859 uint16_t ProgFixLvl:2;
1860 uint16_t ProgDistType:2;
1861 uint16_t DistCnt:4;
1862#else /* __LITTLE_ENDIAN_BITFIELD */
1863 uint16_t DistCnt:4;
1864 uint16_t ProgDistType:2;
1865 uint16_t ProgFixLvl:2;
1866 uint16_t ProgRev:4;
1867 uint16_t ProgVer:4;
1868 uint8_t ProgId;
1869 uint8_t ProgType;
1870#endif
1871
1872 } b;
1873 } un;
1874 uint32_t endecRev;
1875#ifdef __BIG_ENDIAN_BITFIELD
1876 uint8_t feaLevelHigh;
1877 uint8_t feaLevelLow;
1878 uint8_t fcphHigh;
1879 uint8_t fcphLow;
1880#else /* __LITTLE_ENDIAN_BITFIELD */
1881 uint8_t fcphLow;
1882 uint8_t fcphHigh;
1883 uint8_t feaLevelLow;
1884 uint8_t feaLevelHigh;
1885#endif
1886
1887 uint32_t postKernRev;
1888 uint32_t opFwRev;
1889 uint8_t opFwName[16];
1890 uint32_t sli1FwRev;
1891 uint8_t sli1FwName[16];
1892 uint32_t sli2FwRev;
1893 uint8_t sli2FwName[16];
1894 uint32_t rsvd2;
1895 uint32_t RandomData[7];
1896} READ_REV_VAR;
1897
1898/* Structure for MB Command READ_LINK_STAT (18) */
1899
1900typedef struct {
1901 uint32_t rsvd1;
1902 uint32_t linkFailureCnt;
1903 uint32_t lossSyncCnt;
1904
1905 uint32_t lossSignalCnt;
1906 uint32_t primSeqErrCnt;
1907 uint32_t invalidXmitWord;
1908 uint32_t crcCnt;
1909 uint32_t primSeqTimeout;
1910 uint32_t elasticOverrun;
1911 uint32_t arbTimeout;
1912} READ_LNK_VAR;
1913
1914/* Structure for MB Command REG_LOGIN (19) */
1915/* Structure for MB Command REG_LOGIN64 (0x93) */
1916
1917typedef struct {
1918#ifdef __BIG_ENDIAN_BITFIELD
1919 uint16_t rsvd1;
1920 uint16_t rpi;
1921 uint32_t rsvd2:8;
1922 uint32_t did:24;
1923#else /* __LITTLE_ENDIAN_BITFIELD */
1924 uint16_t rpi;
1925 uint16_t rsvd1;
1926 uint32_t did:24;
1927 uint32_t rsvd2:8;
1928#endif
1929
1930 union {
1931 struct ulp_bde sp;
1932 struct ulp_bde64 sp64;
1933 } un;
1934
1935} REG_LOGIN_VAR;
1936
1937/* Word 30 contents for REG_LOGIN */
1938typedef union {
1939 struct {
1940#ifdef __BIG_ENDIAN_BITFIELD
1941 uint16_t rsvd1:12;
1942 uint16_t wd30_class:4;
1943 uint16_t xri;
1944#else /* __LITTLE_ENDIAN_BITFIELD */
1945 uint16_t xri;
1946 uint16_t wd30_class:4;
1947 uint16_t rsvd1:12;
1948#endif
1949 } f;
1950 uint32_t word;
1951} REG_WD30;
1952
1953/* Structure for MB Command UNREG_LOGIN (20) */
1954
1955typedef struct {
1956#ifdef __BIG_ENDIAN_BITFIELD
1957 uint16_t rsvd1;
1958 uint16_t rpi;
1959#else /* __LITTLE_ENDIAN_BITFIELD */
1960 uint16_t rpi;
1961 uint16_t rsvd1;
1962#endif
1963} UNREG_LOGIN_VAR;
1964
1965/* Structure for MB Command UNREG_D_ID (0x23) */
1966
1967typedef struct {
1968 uint32_t did;
1969} UNREG_D_ID_VAR;
1970
1971/* Structure for MB Command READ_LA (21) */
1972/* Structure for MB Command READ_LA64 (0x95) */
1973
1974typedef struct {
1975 uint32_t eventTag; /* Event tag */
1976#ifdef __BIG_ENDIAN_BITFIELD
1977 uint32_t rsvd1:22;
1978 uint32_t pb:1;
1979 uint32_t il:1;
1980 uint32_t attType:8;
1981#else /* __LITTLE_ENDIAN_BITFIELD */
1982 uint32_t attType:8;
1983 uint32_t il:1;
1984 uint32_t pb:1;
1985 uint32_t rsvd1:22;
1986#endif
1987
1988#define AT_RESERVED 0x00 /* Reserved - attType */
1989#define AT_LINK_UP 0x01 /* Link is up */
1990#define AT_LINK_DOWN 0x02 /* Link is down */
1991
1992#ifdef __BIG_ENDIAN_BITFIELD
1993 uint8_t granted_AL_PA;
1994 uint8_t lipAlPs;
1995 uint8_t lipType;
1996 uint8_t topology;
1997#else /* __LITTLE_ENDIAN_BITFIELD */
1998 uint8_t topology;
1999 uint8_t lipType;
2000 uint8_t lipAlPs;
2001 uint8_t granted_AL_PA;
2002#endif
2003
2004#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
2005#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
2006
2007 union {
2008 struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
2009 to */
2010 /* store the LILP AL_PA position map into */
2011 struct ulp_bde64 lilpBde64;
2012 } un;
2013
2014#ifdef __BIG_ENDIAN_BITFIELD
2015 uint32_t Dlu:1;
2016 uint32_t Dtf:1;
2017 uint32_t Drsvd2:14;
2018 uint32_t DlnkSpeed:8;
2019 uint32_t DnlPort:4;
2020 uint32_t Dtx:2;
2021 uint32_t Drx:2;
2022#else /* __LITTLE_ENDIAN_BITFIELD */
2023 uint32_t Drx:2;
2024 uint32_t Dtx:2;
2025 uint32_t DnlPort:4;
2026 uint32_t DlnkSpeed:8;
2027 uint32_t Drsvd2:14;
2028 uint32_t Dtf:1;
2029 uint32_t Dlu:1;
2030#endif
2031
2032#ifdef __BIG_ENDIAN_BITFIELD
2033 uint32_t Ulu:1;
2034 uint32_t Utf:1;
2035 uint32_t Ursvd2:14;
2036 uint32_t UlnkSpeed:8;
2037 uint32_t UnlPort:4;
2038 uint32_t Utx:2;
2039 uint32_t Urx:2;
2040#else /* __LITTLE_ENDIAN_BITFIELD */
2041 uint32_t Urx:2;
2042 uint32_t Utx:2;
2043 uint32_t UnlPort:4;
2044 uint32_t UlnkSpeed:8;
2045 uint32_t Ursvd2:14;
2046 uint32_t Utf:1;
2047 uint32_t Ulu:1;
2048#endif
2049
2050#define LA_UNKNW_LINK 0x0 /* lnkSpeed */
2051#define LA_1GHZ_LINK 0x04 /* lnkSpeed */
2052#define LA_2GHZ_LINK 0x08 /* lnkSpeed */
2053#define LA_4GHZ_LINK 0x10 /* lnkSpeed */
2054#define LA_8GHZ_LINK 0x20 /* lnkSpeed */
2055#define LA_10GHZ_LINK 0x40 /* lnkSpeed */
2056
2057} READ_LA_VAR;
2058
2059/* Structure for MB Command CLEAR_LA (22) */
2060
2061typedef struct {
2062 uint32_t eventTag; /* Event tag */
2063 uint32_t rsvd1;
2064} CLEAR_LA_VAR;
2065
2066/* Structure for MB Command DUMP */
2067
2068typedef struct {
2069#ifdef __BIG_ENDIAN_BITFIELD
2070 uint32_t rsvd:25;
2071 uint32_t ra:1;
2072 uint32_t co:1;
2073 uint32_t cv:1;
2074 uint32_t type:4;
2075 uint32_t entry_index:16;
2076 uint32_t region_id:16;
2077#else /* __LITTLE_ENDIAN_BITFIELD */
2078 uint32_t type:4;
2079 uint32_t cv:1;
2080 uint32_t co:1;
2081 uint32_t ra:1;
2082 uint32_t rsvd:25;
2083 uint32_t region_id:16;
2084 uint32_t entry_index:16;
2085#endif
2086
2087 uint32_t rsvd1;
2088 uint32_t word_cnt;
2089 uint32_t resp_offset;
2090} DUMP_VAR;
2091
2092#define DMP_MEM_REG 0x1
2093#define DMP_NV_PARAMS 0x2
2094
2095#define DMP_REGION_VPD 0xe
2096#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */
2097#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2098#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2099
2100/* Structure for MB Command CONFIG_PORT (0x88) */
2101
2102typedef struct {
2103 uint32_t pcbLen;
2104 uint32_t pcbLow; /* bit 31:0 of memory based port config block */
2105 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
2106 uint32_t hbainit[5];
2107} CONFIG_PORT_VAR;
2108
2109/* SLI-2 Port Control Block */
2110
2111/* SLIM POINTER */
2112#define SLIMOFF 0x30 /* WORD */
2113
2114typedef struct _SLI2_RDSC {
2115 uint32_t cmdEntries;
2116 uint32_t cmdAddrLow;
2117 uint32_t cmdAddrHigh;
2118
2119 uint32_t rspEntries;
2120 uint32_t rspAddrLow;
2121 uint32_t rspAddrHigh;
2122} SLI2_RDSC;
2123
2124typedef struct _PCB {
2125#ifdef __BIG_ENDIAN_BITFIELD
2126 uint32_t type:8;
2127#define TYPE_NATIVE_SLI2 0x01;
2128 uint32_t feature:8;
2129#define FEATURE_INITIAL_SLI2 0x01;
2130 uint32_t rsvd:12;
2131 uint32_t maxRing:4;
2132#else /* __LITTLE_ENDIAN_BITFIELD */
2133 uint32_t maxRing:4;
2134 uint32_t rsvd:12;
2135 uint32_t feature:8;
2136#define FEATURE_INITIAL_SLI2 0x01;
2137 uint32_t type:8;
2138#define TYPE_NATIVE_SLI2 0x01;
2139#endif
2140
2141 uint32_t mailBoxSize;
2142 uint32_t mbAddrLow;
2143 uint32_t mbAddrHigh;
2144
2145 uint32_t hgpAddrLow;
2146 uint32_t hgpAddrHigh;
2147
2148 uint32_t pgpAddrLow;
2149 uint32_t pgpAddrHigh;
2150 SLI2_RDSC rdsc[MAX_RINGS];
2151} PCB_t;
2152
2153/* NEW_FEATURE */
2154typedef struct {
2155#ifdef __BIG_ENDIAN_BITFIELD
2156 uint32_t rsvd0:27;
2157 uint32_t discardFarp:1;
2158 uint32_t IPEnable:1;
2159 uint32_t nodeName:1;
2160 uint32_t portName:1;
2161 uint32_t filterEnable:1;
2162#else /* __LITTLE_ENDIAN_BITFIELD */
2163 uint32_t filterEnable:1;
2164 uint32_t portName:1;
2165 uint32_t nodeName:1;
2166 uint32_t IPEnable:1;
2167 uint32_t discardFarp:1;
2168 uint32_t rsvd:27;
2169#endif
2170
2171 uint8_t portname[8]; /* Used to be struct lpfc_name */
2172 uint8_t nodename[8];
2173 uint32_t rsvd1;
2174 uint32_t rsvd2;
2175 uint32_t rsvd3;
2176 uint32_t IPAddress;
2177} CONFIG_FARP_VAR;
2178
2179/* Union of all Mailbox Command types */
2180#define MAILBOX_CMD_WSIZE 32
2181#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
2182
2183typedef union {
2184 uint32_t varWords[MAILBOX_CMD_WSIZE - 1];
2185 LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
2186 READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
2187 WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
2188 BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
2189 INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
2190 DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
2191 CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
2192 PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
2193 CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
2194 RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
2195 READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
2196 READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
2197 READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
2198 READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
2199 READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
2200 READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
2201 READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
2202 READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
2203 REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
2204 UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
2205 READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
2206 CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
2207 DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
2208 UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
2209 CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */
2210 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
2211} MAILVARIANTS;
2212
2213/*
2214 * SLI-2 specific structures
2215 */
2216
2217typedef struct {
2218 uint32_t cmdPutInx;
2219 uint32_t rspGetInx;
2220} HGP;
2221
2222typedef struct {
2223 uint32_t cmdGetInx;
2224 uint32_t rspPutInx;
2225} PGP;
2226
2227typedef struct _SLI2_DESC {
2228 HGP host[MAX_RINGS];
2229 uint32_t unused1[16];
2230 PGP port[MAX_RINGS];
2231} SLI2_DESC;
2232
2233typedef union {
2234 SLI2_DESC s2;
2235} SLI_VAR;
2236
2237typedef struct {
2238#ifdef __BIG_ENDIAN_BITFIELD
2239 uint16_t mbxStatus;
2240 uint8_t mbxCommand;
2241 uint8_t mbxReserved:6;
2242 uint8_t mbxHc:1;
2243 uint8_t mbxOwner:1; /* Low order bit first word */
2244#else /* __LITTLE_ENDIAN_BITFIELD */
2245 uint8_t mbxOwner:1; /* Low order bit first word */
2246 uint8_t mbxHc:1;
2247 uint8_t mbxReserved:6;
2248 uint8_t mbxCommand;
2249 uint16_t mbxStatus;
2250#endif
2251
2252 MAILVARIANTS un;
2253 SLI_VAR us;
2254} MAILBOX_t;
2255
2256/*
2257 * Begin Structure Definitions for IOCB Commands
2258 */
2259
2260typedef struct {
2261#ifdef __BIG_ENDIAN_BITFIELD
2262 uint8_t statAction;
2263 uint8_t statRsn;
2264 uint8_t statBaExp;
2265 uint8_t statLocalError;
2266#else /* __LITTLE_ENDIAN_BITFIELD */
2267 uint8_t statLocalError;
2268 uint8_t statBaExp;
2269 uint8_t statRsn;
2270 uint8_t statAction;
2271#endif
2272 /* statRsn P/F_RJT reason codes */
2273#define RJT_BAD_D_ID 0x01 /* Invalid D_ID field */
2274#define RJT_BAD_S_ID 0x02 /* Invalid S_ID field */
2275#define RJT_UNAVAIL_TEMP 0x03 /* N_Port unavailable temp. */
2276#define RJT_UNAVAIL_PERM 0x04 /* N_Port unavailable perm. */
2277#define RJT_UNSUP_CLASS 0x05 /* Class not supported */
2278#define RJT_DELIM_ERR 0x06 /* Delimiter usage error */
2279#define RJT_UNSUP_TYPE 0x07 /* Type not supported */
2280#define RJT_BAD_CONTROL 0x08 /* Invalid link conrtol */
2281#define RJT_BAD_RCTL 0x09 /* R_CTL invalid */
2282#define RJT_BAD_FCTL 0x0A /* F_CTL invalid */
2283#define RJT_BAD_OXID 0x0B /* OX_ID invalid */
2284#define RJT_BAD_RXID 0x0C /* RX_ID invalid */
2285#define RJT_BAD_SEQID 0x0D /* SEQ_ID invalid */
2286#define RJT_BAD_DFCTL 0x0E /* DF_CTL invalid */
2287#define RJT_BAD_SEQCNT 0x0F /* SEQ_CNT invalid */
2288#define RJT_BAD_PARM 0x10 /* Param. field invalid */
2289#define RJT_XCHG_ERR 0x11 /* Exchange error */
2290#define RJT_PROT_ERR 0x12 /* Protocol error */
2291#define RJT_BAD_LENGTH 0x13 /* Invalid Length */
2292#define RJT_UNEXPECTED_ACK 0x14 /* Unexpected ACK */
2293#define RJT_LOGIN_REQUIRED 0x16 /* Login required */
2294#define RJT_TOO_MANY_SEQ 0x17 /* Excessive sequences */
2295#define RJT_XCHG_NOT_STRT 0x18 /* Exchange not started */
2296#define RJT_UNSUP_SEC_HDR 0x19 /* Security hdr not supported */
2297#define RJT_UNAVAIL_PATH 0x1A /* Fabric Path not available */
2298#define RJT_VENDOR_UNIQUE 0xFF /* Vendor unique error */
2299
2300#define IOERR_SUCCESS 0x00 /* statLocalError */
2301#define IOERR_MISSING_CONTINUE 0x01
2302#define IOERR_SEQUENCE_TIMEOUT 0x02
2303#define IOERR_INTERNAL_ERROR 0x03
2304#define IOERR_INVALID_RPI 0x04
2305#define IOERR_NO_XRI 0x05
2306#define IOERR_ILLEGAL_COMMAND 0x06
2307#define IOERR_XCHG_DROPPED 0x07
2308#define IOERR_ILLEGAL_FIELD 0x08
2309#define IOERR_BAD_CONTINUE 0x09
2310#define IOERR_TOO_MANY_BUFFERS 0x0A
2311#define IOERR_RCV_BUFFER_WAITING 0x0B
2312#define IOERR_NO_CONNECTION 0x0C
2313#define IOERR_TX_DMA_FAILED 0x0D
2314#define IOERR_RX_DMA_FAILED 0x0E
2315#define IOERR_ILLEGAL_FRAME 0x0F
2316#define IOERR_EXTRA_DATA 0x10
2317#define IOERR_NO_RESOURCES 0x11
2318#define IOERR_RESERVED 0x12
2319#define IOERR_ILLEGAL_LENGTH 0x13
2320#define IOERR_UNSUPPORTED_FEATURE 0x14
2321#define IOERR_ABORT_IN_PROGRESS 0x15
2322#define IOERR_ABORT_REQUESTED 0x16
2323#define IOERR_RECEIVE_BUFFER_TIMEOUT 0x17
2324#define IOERR_LOOP_OPEN_FAILURE 0x18
2325#define IOERR_RING_RESET 0x19
2326#define IOERR_LINK_DOWN 0x1A
2327#define IOERR_CORRUPTED_DATA 0x1B
2328#define IOERR_CORRUPTED_RPI 0x1C
2329#define IOERR_OUT_OF_ORDER_DATA 0x1D
2330#define IOERR_OUT_OF_ORDER_ACK 0x1E
2331#define IOERR_DUP_FRAME 0x1F
2332#define IOERR_LINK_CONTROL_FRAME 0x20 /* ACK_N received */
2333#define IOERR_BAD_HOST_ADDRESS 0x21
2334#define IOERR_RCV_HDRBUF_WAITING 0x22
2335#define IOERR_MISSING_HDR_BUFFER 0x23
2336#define IOERR_MSEQ_CHAIN_CORRUPTED 0x24
2337#define IOERR_ABORTMULT_REQUESTED 0x25
2338#define IOERR_BUFFER_SHORTAGE 0x28
2339#define IOERR_DEFAULT 0x29
2340#define IOERR_CNT 0x2A
2341
2342#define IOERR_DRVR_MASK 0x100
2343#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
2344#define IOERR_SLI_BRESET 0x102
2345#define IOERR_SLI_ABORTED 0x103
2346} PARM_ERR;
2347
2348typedef union {
2349 struct {
2350#ifdef __BIG_ENDIAN_BITFIELD
2351 uint8_t Rctl; /* R_CTL field */
2352 uint8_t Type; /* TYPE field */
2353 uint8_t Dfctl; /* DF_CTL field */
2354 uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
2355#else /* __LITTLE_ENDIAN_BITFIELD */
2356 uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
2357 uint8_t Dfctl; /* DF_CTL field */
2358 uint8_t Type; /* TYPE field */
2359 uint8_t Rctl; /* R_CTL field */
2360#endif
2361
2362#define BC 0x02 /* Broadcast Received - Fctl */
2363#define SI 0x04 /* Sequence Initiative */
2364#define LA 0x08 /* Ignore Link Attention state */
2365#define LS 0x80 /* Last Sequence */
2366 } hcsw;
2367 uint32_t reserved;
2368} WORD5;
2369
2370/* IOCB Command template for a generic response */
2371typedef struct {
2372 uint32_t reserved[4];
2373 PARM_ERR perr;
2374} GENERIC_RSP;
2375
2376/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */
2377typedef struct {
2378 struct ulp_bde xrsqbde[2];
2379 uint32_t xrsqRo; /* Starting Relative Offset */
2380 WORD5 w5; /* Header control/status word */
2381} XR_SEQ_FIELDS;
2382
2383/* IOCB Command template for ELS_REQUEST */
2384typedef struct {
2385 struct ulp_bde elsReq;
2386 struct ulp_bde elsRsp;
2387
2388#ifdef __BIG_ENDIAN_BITFIELD
2389 uint32_t word4Rsvd:7;
2390 uint32_t fl:1;
2391 uint32_t myID:24;
2392 uint32_t word5Rsvd:8;
2393 uint32_t remoteID:24;
2394#else /* __LITTLE_ENDIAN_BITFIELD */
2395 uint32_t myID:24;
2396 uint32_t fl:1;
2397 uint32_t word4Rsvd:7;
2398 uint32_t remoteID:24;
2399 uint32_t word5Rsvd:8;
2400#endif
2401} ELS_REQUEST;
2402
2403/* IOCB Command template for RCV_ELS_REQ */
2404typedef struct {
2405 struct ulp_bde elsReq[2];
2406 uint32_t parmRo;
2407
2408#ifdef __BIG_ENDIAN_BITFIELD
2409 uint32_t word5Rsvd:8;
2410 uint32_t remoteID:24;
2411#else /* __LITTLE_ENDIAN_BITFIELD */
2412 uint32_t remoteID:24;
2413 uint32_t word5Rsvd:8;
2414#endif
2415} RCV_ELS_REQ;
2416
2417/* IOCB Command template for ABORT / CLOSE_XRI */
2418typedef struct {
2419 uint32_t rsvd[3];
2420 uint32_t abortType;
2421#define ABORT_TYPE_ABTX 0x00000000
2422#define ABORT_TYPE_ABTS 0x00000001
2423 uint32_t parm;
2424#ifdef __BIG_ENDIAN_BITFIELD
2425 uint16_t abortContextTag; /* ulpContext from command to abort/close */
2426 uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
2427#else /* __LITTLE_ENDIAN_BITFIELD */
2428 uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
2429 uint16_t abortContextTag; /* ulpContext from command to abort/close */
2430#endif
2431} AC_XRI;
2432
2433/* IOCB Command template for ABORT_MXRI64 */
2434typedef struct {
2435 uint32_t rsvd[3];
2436 uint32_t abortType;
2437 uint32_t parm;
2438 uint32_t iotag32;
2439} A_MXRI64;
2440
2441/* IOCB Command template for GET_RPI */
2442typedef struct {
2443 uint32_t rsvd[4];
2444 uint32_t parmRo;
2445#ifdef __BIG_ENDIAN_BITFIELD
2446 uint32_t word5Rsvd:8;
2447 uint32_t remoteID:24;
2448#else /* __LITTLE_ENDIAN_BITFIELD */
2449 uint32_t remoteID:24;
2450 uint32_t word5Rsvd:8;
2451#endif
2452} GET_RPI;
2453
2454/* IOCB Command template for all FCP Initiator commands */
2455typedef struct {
2456 struct ulp_bde fcpi_cmnd; /* FCP_CMND payload descriptor */
2457 struct ulp_bde fcpi_rsp; /* Rcv buffer */
2458 uint32_t fcpi_parm;
2459 uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
2460} FCPI_FIELDS;
2461
2462/* IOCB Command template for all FCP Target commands */
2463typedef struct {
2464 struct ulp_bde fcpt_Buffer[2]; /* FCP_CMND payload descriptor */
2465 uint32_t fcpt_Offset;
2466 uint32_t fcpt_Length; /* transfer ready for IWRITE */
2467} FCPT_FIELDS;
2468
2469/* SLI-2 IOCB structure definitions */
2470
2471/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */
2472typedef struct {
2473 ULP_BDL bdl;
2474 uint32_t xrsqRo; /* Starting Relative Offset */
2475 WORD5 w5; /* Header control/status word */
2476} XMT_SEQ_FIELDS64;
2477
2478/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
2479typedef struct {
2480 struct ulp_bde64 rcvBde;
2481 uint32_t rsvd1;
2482 uint32_t xrsqRo; /* Starting Relative Offset */
2483 WORD5 w5; /* Header control/status word */
2484} RCV_SEQ_FIELDS64;
2485
2486/* IOCB Command template for ELS_REQUEST64 */
2487typedef struct {
2488 ULP_BDL bdl;
2489#ifdef __BIG_ENDIAN_BITFIELD
2490 uint32_t word4Rsvd:7;
2491 uint32_t fl:1;
2492 uint32_t myID:24;
2493 uint32_t word5Rsvd:8;
2494 uint32_t remoteID:24;
2495#else /* __LITTLE_ENDIAN_BITFIELD */
2496 uint32_t myID:24;
2497 uint32_t fl:1;
2498 uint32_t word4Rsvd:7;
2499 uint32_t remoteID:24;
2500 uint32_t word5Rsvd:8;
2501#endif
2502} ELS_REQUEST64;
2503
2504/* IOCB Command template for GEN_REQUEST64 */
2505typedef struct {
2506 ULP_BDL bdl;
2507 uint32_t xrsqRo; /* Starting Relative Offset */
2508 WORD5 w5; /* Header control/status word */
2509} GEN_REQUEST64;
2510
2511/* IOCB Command template for RCV_ELS_REQ64 */
2512typedef struct {
2513 struct ulp_bde64 elsReq;
2514 uint32_t rcvd1;
2515 uint32_t parmRo;
2516
2517#ifdef __BIG_ENDIAN_BITFIELD
2518 uint32_t word5Rsvd:8;
2519 uint32_t remoteID:24;
2520#else /* __LITTLE_ENDIAN_BITFIELD */
2521 uint32_t remoteID:24;
2522 uint32_t word5Rsvd:8;
2523#endif
2524} RCV_ELS_REQ64;
2525
2526/* IOCB Command template for all 64 bit FCP Initiator commands */
2527typedef struct {
2528 ULP_BDL bdl;
2529 uint32_t fcpi_parm;
2530 uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
2531} FCPI_FIELDS64;
2532
2533/* IOCB Command template for all 64 bit FCP Target commands */
2534typedef struct {
2535 ULP_BDL bdl;
2536 uint32_t fcpt_Offset;
2537 uint32_t fcpt_Length; /* transfer ready for IWRITE */
2538} FCPT_FIELDS64;
2539
2540typedef struct _IOCB { /* IOCB structure */
2541 union {
2542 GENERIC_RSP grsp; /* Generic response */
2543 XR_SEQ_FIELDS xrseq; /* XMIT / BCAST / RCV_SEQUENCE cmd */
2544 struct ulp_bde cont[3]; /* up to 3 continuation bdes */
2545 RCV_ELS_REQ rcvels; /* RCV_ELS_REQ template */
2546 AC_XRI acxri; /* ABORT / CLOSE_XRI template */
2547 A_MXRI64 amxri; /* abort multiple xri command overlay */
2548 GET_RPI getrpi; /* GET_RPI template */
2549 FCPI_FIELDS fcpi; /* FCP Initiator template */
2550 FCPT_FIELDS fcpt; /* FCP target template */
2551
2552 /* SLI-2 structures */
2553
2554 struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
2555 bde_64s */
2556 ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
2557 GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
2558 RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
2559 XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */
2560 FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */
2561 FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */
2562
2563 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
2564 } un;
2565 union {
2566 struct {
2567#ifdef __BIG_ENDIAN_BITFIELD
2568 uint16_t ulpContext; /* High order bits word 6 */
2569 uint16_t ulpIoTag; /* Low order bits word 6 */
2570#else /* __LITTLE_ENDIAN_BITFIELD */
2571 uint16_t ulpIoTag; /* Low order bits word 6 */
2572 uint16_t ulpContext; /* High order bits word 6 */
2573#endif
2574 } t1;
2575 struct {
2576#ifdef __BIG_ENDIAN_BITFIELD
2577 uint16_t ulpContext; /* High order bits word 6 */
2578 uint16_t ulpIoTag1:2; /* Low order bits word 6 */
2579 uint16_t ulpIoTag0:14; /* Low order bits word 6 */
2580#else /* __LITTLE_ENDIAN_BITFIELD */
2581 uint16_t ulpIoTag0:14; /* Low order bits word 6 */
2582 uint16_t ulpIoTag1:2; /* Low order bits word 6 */
2583 uint16_t ulpContext; /* High order bits word 6 */
2584#endif
2585 } t2;
2586 } un1;
2587#define ulpContext un1.t1.ulpContext
2588#define ulpIoTag un1.t1.ulpIoTag
2589#define ulpIoTag0 un1.t2.ulpIoTag0
2590
2591#ifdef __BIG_ENDIAN_BITFIELD
2592 uint32_t ulpTimeout:8;
2593 uint32_t ulpXS:1;
2594 uint32_t ulpFCP2Rcvy:1;
2595 uint32_t ulpPU:2;
2596 uint32_t ulpIr:1;
2597 uint32_t ulpClass:3;
2598 uint32_t ulpCommand:8;
2599 uint32_t ulpStatus:4;
2600 uint32_t ulpBdeCount:2;
2601 uint32_t ulpLe:1;
2602 uint32_t ulpOwner:1; /* Low order bit word 7 */
2603#else /* __LITTLE_ENDIAN_BITFIELD */
2604 uint32_t ulpOwner:1; /* Low order bit word 7 */
2605 uint32_t ulpLe:1;
2606 uint32_t ulpBdeCount:2;
2607 uint32_t ulpStatus:4;
2608 uint32_t ulpCommand:8;
2609 uint32_t ulpClass:3;
2610 uint32_t ulpIr:1;
2611 uint32_t ulpPU:2;
2612 uint32_t ulpFCP2Rcvy:1;
2613 uint32_t ulpXS:1;
2614 uint32_t ulpTimeout:8;
2615#endif
2616
2617#define PARM_UNUSED 0 /* PU field (Word 4) not used */
2618#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
2619#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
2620#define CLASS1 0 /* Class 1 */
2621#define CLASS2 1 /* Class 2 */
2622#define CLASS3 2 /* Class 3 */
2623#define CLASS_FCP_INTERMIX 7 /* FCP Data->Cls 1, all else->Cls 2 */
2624
2625#define IOSTAT_SUCCESS 0x0 /* ulpStatus - HBA defined */
2626#define IOSTAT_FCP_RSP_ERROR 0x1
2627#define IOSTAT_REMOTE_STOP 0x2
2628#define IOSTAT_LOCAL_REJECT 0x3
2629#define IOSTAT_NPORT_RJT 0x4
2630#define IOSTAT_FABRIC_RJT 0x5
2631#define IOSTAT_NPORT_BSY 0x6
2632#define IOSTAT_FABRIC_BSY 0x7
2633#define IOSTAT_INTERMED_RSP 0x8
2634#define IOSTAT_LS_RJT 0x9
2635#define IOSTAT_BA_RJT 0xA
2636#define IOSTAT_RSVD1 0xB
2637#define IOSTAT_RSVD2 0xC
2638#define IOSTAT_RSVD3 0xD
2639#define IOSTAT_RSVD4 0xE
2640#define IOSTAT_RSVD5 0xF
2641#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
2642#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
2643#define IOSTAT_CNT 0x11
2644
2645} IOCB_t;
2646
2647
2648#define SLI1_SLIM_SIZE (4 * 1024)
2649
2650/* Up to 498 IOCBs will fit into 16k
2651 * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
2652 */
2653#define SLI2_SLIM_SIZE (16 * 1024)
2654
2655/* Maximum IOCBs that will fit in SLI2 slim */
2656#define MAX_SLI2_IOCB 498
2657
2658struct lpfc_sli2_slim {
2659 MAILBOX_t mbx;
2660 PCB_t pcb;
2661 IOCB_t IOCBs[MAX_SLI2_IOCB];
2662};
2663
2664/*******************************************************************
2665This macro check PCI device to allow special handling for LC HBAs.
2666
2667Parameters:
2668device : struct pci_dev 's device field
2669
2670return 1 => TRUE
2671 0 => FALSE
2672 *******************************************************************/
2673static inline int
2674lpfc_is_LC_HBA(unsigned short device)
2675{
2676 if ((device == PCI_DEVICE_ID_TFLY) ||
2677 (device == PCI_DEVICE_ID_PFLY) ||
2678 (device == PCI_DEVICE_ID_LP101) ||
2679 (device == PCI_DEVICE_ID_BMID) ||
2680 (device == PCI_DEVICE_ID_BSMB) ||
2681 (device == PCI_DEVICE_ID_ZMID) ||
2682 (device == PCI_DEVICE_ID_ZSMB) ||
2683 (device == PCI_DEVICE_ID_RFLY))
2684 return 1;
2685 else
2686 return 0;
2687}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
new file mode 100644
index 000000000000..233c912b63ce
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -0,0 +1,1739 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_init.c 1.233 2005/04/13 11:59:09EDT sf_support Exp $
23 */
24
25#include <linux/blkdev.h>
26#include <linux/delay.h>
27#include <linux/dma-mapping.h>
28#include <linux/idr.h>
29#include <linux/interrupt.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_transport_fc.h>
37
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_disc.h"
41#include "lpfc_scsi.h"
42#include "lpfc.h"
43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h"
45#include "lpfc_version.h"
46
47static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *);
48static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
49static int lpfc_post_rcv_buf(struct lpfc_hba *);
50
51static struct scsi_transport_template *lpfc_transport_template = NULL;
52static DEFINE_IDR(lpfc_hba_index);
53
54/************************************************************************/
55/* */
56/* lpfc_config_port_prep */
57/* This routine will do LPFC initialization prior to the */
58/* CONFIG_PORT mailbox command. This will be initialized */
59/* as a SLI layer callback routine. */
60/* This routine returns 0 on success or -ERESTART if it wants */
61/* the SLI layer to reset the HBA and try again. Any */
62/* other return value indicates an error. */
63/* */
64/************************************************************************/
65int
66lpfc_config_port_prep(struct lpfc_hba * phba)
67{
68 lpfc_vpd_t *vp = &phba->vpd;
69 int i = 0, rc;
70 LPFC_MBOXQ_t *pmb;
71 MAILBOX_t *mb;
72 char *lpfc_vpd_data = NULL;
73 uint16_t offset = 0;
74 static char licensed[56] =
75 "key unlock for use with gnu public licensed code only\0";
76
77 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
78 if (!pmb) {
79 phba->hba_state = LPFC_HBA_ERROR;
80 return -ENOMEM;
81 }
82
83 mb = &pmb->mb;
84 phba->hba_state = LPFC_INIT_MBX_CMDS;
85
86 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
87 uint32_t *ptext = (uint32_t *) licensed;
88
89 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
90 *ptext = cpu_to_be32(*ptext);
91
92 lpfc_read_nv(phba, pmb);
93 memset((char*)mb->un.varRDnvp.rsvd3, 0,
94 sizeof (mb->un.varRDnvp.rsvd3));
95 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
96 sizeof (licensed));
97
98 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
99
100 if (rc != MBX_SUCCESS) {
101 lpfc_printf_log(phba,
102 KERN_ERR,
103 LOG_MBOX,
104 "%d:0324 Config Port initialization "
105 "error, mbxCmd x%x READ_NVPARM, "
106 "mbxStatus x%x\n",
107 phba->brd_no,
108 mb->mbxCommand, mb->mbxStatus);
109 mempool_free(pmb, phba->mbox_mem_pool);
110 return -ERESTART;
111 }
112 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
113 sizeof (mb->un.varRDnvp.nodename));
114 }
115
116 /* Setup and issue mailbox READ REV command */
117 lpfc_read_rev(phba, pmb);
118 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
119 if (rc != MBX_SUCCESS) {
120 lpfc_printf_log(phba,
121 KERN_ERR,
122 LOG_INIT,
123 "%d:0439 Adapter failed to init, mbxCmd x%x "
124 "READ_REV, mbxStatus x%x\n",
125 phba->brd_no,
126 mb->mbxCommand, mb->mbxStatus);
127 mempool_free( pmb, phba->mbox_mem_pool);
128 return -ERESTART;
129 }
130
131 /* The HBA's current state is provided by the ProgType and rr fields.
132 * Read and check the value of these fields before continuing to config
133 * this port.
134 */
135 if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) {
136 /* Old firmware */
137 vp->rev.rBit = 0;
138 lpfc_printf_log(phba,
139 KERN_ERR,
140 LOG_INIT,
141 "%d:0440 Adapter failed to init, mbxCmd x%x "
142 "READ_REV detected outdated firmware"
143 "Data: x%x\n",
144 phba->brd_no,
145 mb->mbxCommand, 0);
146 mempool_free(pmb, phba->mbox_mem_pool);
147 return -ERESTART;
148 } else {
149 vp->rev.rBit = 1;
150 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
151 memcpy(vp->rev.sli1FwName,
152 (char*)mb->un.varRdRev.sli1FwName, 16);
153 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
154 memcpy(vp->rev.sli2FwName,
155 (char *)mb->un.varRdRev.sli2FwName, 16);
156 }
157
158 /* Save information as VPD data */
159 vp->rev.biuRev = mb->un.varRdRev.biuRev;
160 vp->rev.smRev = mb->un.varRdRev.smRev;
161 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
162 vp->rev.endecRev = mb->un.varRdRev.endecRev;
163 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
164 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
165 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
166 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
167 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
168 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
169
170 if (lpfc_is_LC_HBA(phba->pcidev->device))
171 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
172 sizeof (phba->RandomData));
173
174 /* Get the default values for Model Name and Description */
175 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
176
177 /* Get adapter VPD information */
178 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
179 if (!pmb->context2)
180 goto out_free_mbox;
181 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
182 if (!lpfc_vpd_data)
183 goto out_free_context2;
184
185 do {
186 lpfc_dump_mem(phba, pmb, offset);
187 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
188
189 if (rc != MBX_SUCCESS) {
190 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
191 "%d:0441 VPD not present on adapter, "
192 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
193 phba->brd_no,
194 mb->mbxCommand, mb->mbxStatus);
195 kfree(lpfc_vpd_data);
196 lpfc_vpd_data = NULL;
197 break;
198 }
199
200 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
201 mb->un.varDmp.word_cnt);
202 offset += mb->un.varDmp.word_cnt;
203 } while (mb->un.varDmp.word_cnt);
204 lpfc_parse_vpd(phba, lpfc_vpd_data);
205
206 kfree(lpfc_vpd_data);
207out_free_context2:
208 kfree(pmb->context2);
209out_free_mbox:
210 mempool_free(pmb, phba->mbox_mem_pool);
211 return 0;
212}
213
214/************************************************************************/
215/* */
216/* lpfc_config_port_post */
217/* This routine will do LPFC initialization after the */
218/* CONFIG_PORT mailbox command. This will be initialized */
219/* as a SLI layer callback routine. */
220/* This routine returns 0 on success. Any other return value */
221/* indicates an error. */
222/* */
223/************************************************************************/
224int
225lpfc_config_port_post(struct lpfc_hba * phba)
226{
227 LPFC_MBOXQ_t *pmb;
228 MAILBOX_t *mb;
229 struct lpfc_dmabuf *mp;
230 struct lpfc_sli *psli = &phba->sli;
231 uint32_t status, timeout;
232 int i, j, rc;
233
234 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
235 if (!pmb) {
236 phba->hba_state = LPFC_HBA_ERROR;
237 return -ENOMEM;
238 }
239 mb = &pmb->mb;
240
241 lpfc_config_link(phba, pmb);
242 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
243 if (rc != MBX_SUCCESS) {
244 lpfc_printf_log(phba,
245 KERN_ERR,
246 LOG_INIT,
247 "%d:0447 Adapter failed init, mbxCmd x%x "
248 "CONFIG_LINK mbxStatus x%x\n",
249 phba->brd_no,
250 mb->mbxCommand, mb->mbxStatus);
251 phba->hba_state = LPFC_HBA_ERROR;
252 mempool_free( pmb, phba->mbox_mem_pool);
253 return -EIO;
254 }
255
256 /* Get login parameters for NID. */
257 lpfc_read_sparam(phba, pmb);
258 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
259 lpfc_printf_log(phba,
260 KERN_ERR,
261 LOG_INIT,
262 "%d:0448 Adapter failed init, mbxCmd x%x "
263 "READ_SPARM mbxStatus x%x\n",
264 phba->brd_no,
265 mb->mbxCommand, mb->mbxStatus);
266 phba->hba_state = LPFC_HBA_ERROR;
267 mp = (struct lpfc_dmabuf *) pmb->context1;
268 mempool_free( pmb, phba->mbox_mem_pool);
269 lpfc_mbuf_free(phba, mp->virt, mp->phys);
270 kfree(mp);
271 return -EIO;
272 }
273
274 mp = (struct lpfc_dmabuf *) pmb->context1;
275
276 memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
277 lpfc_mbuf_free(phba, mp->virt, mp->phys);
278 kfree(mp);
279 pmb->context1 = NULL;
280
281 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
282 sizeof (struct lpfc_name));
283 memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
284 sizeof (struct lpfc_name));
285 /* If no serial number in VPD data, use low 6 bytes of WWNN */
286 /* This should be consolidated into parse_vpd ? - mr */
287 if (phba->SerialNumber[0] == 0) {
288 uint8_t *outptr;
289
290 outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
291 for (i = 0; i < 12; i++) {
292 status = *outptr++;
293 j = ((status & 0xf0) >> 4);
294 if (j <= 9)
295 phba->SerialNumber[i] =
296 (char)((uint8_t) 0x30 + (uint8_t) j);
297 else
298 phba->SerialNumber[i] =
299 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
300 i++;
301 j = (status & 0xf);
302 if (j <= 9)
303 phba->SerialNumber[i] =
304 (char)((uint8_t) 0x30 + (uint8_t) j);
305 else
306 phba->SerialNumber[i] =
307 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
308 }
309 }
310
311 /* This should turn on DELAYED ABTS for ELS timeouts */
312 lpfc_set_slim(phba, pmb, 0x052198, 0x1);
313 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
314 phba->hba_state = LPFC_HBA_ERROR;
315 mempool_free( pmb, phba->mbox_mem_pool);
316 return -EIO;
317 }
318
319
320 lpfc_read_config(phba, pmb);
321 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
322 lpfc_printf_log(phba,
323 KERN_ERR,
324 LOG_INIT,
325 "%d:0453 Adapter failed to init, mbxCmd x%x "
326 "READ_CONFIG, mbxStatus x%x\n",
327 phba->brd_no,
328 mb->mbxCommand, mb->mbxStatus);
329 phba->hba_state = LPFC_HBA_ERROR;
330 mempool_free( pmb, phba->mbox_mem_pool);
331 return -EIO;
332 }
333
334 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
335 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
336 phba->cfg_hba_queue_depth =
337 mb->un.varRdConfig.max_xri + 1;
338
339 phba->lmt = mb->un.varRdConfig.lmt;
340 /* HBA is not 4GB capable, or HBA is not 2GB capable,
341 don't let link speed ask for it */
342 if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) &&
343 (phba->cfg_link_speed > LINK_SPEED_2G)) ||
344 (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) &&
345 (phba->cfg_link_speed > LINK_SPEED_1G))) {
346 /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */
347 lpfc_printf_log(phba,
348 KERN_WARNING,
349 LOG_LINK_EVENT,
350 "%d:1302 Invalid speed for this board: "
351 "Reset link speed to auto: x%x\n",
352 phba->brd_no,
353 phba->cfg_link_speed);
354 phba->cfg_link_speed = LINK_SPEED_AUTO;
355 }
356
357 phba->hba_state = LPFC_LINK_DOWN;
358
359 /* Only process IOCBs on ring 0 till hba_state is READY */
360 if (psli->ring[psli->ip_ring].cmdringaddr)
361 psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
362 if (psli->ring[psli->fcp_ring].cmdringaddr)
363 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
364 if (psli->ring[psli->next_ring].cmdringaddr)
365 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
366
367 /* Post receive buffers for desired rings */
368 lpfc_post_rcv_buf(phba);
369
370 /* Enable appropriate host interrupts */
371 spin_lock_irq(phba->host->host_lock);
372 status = readl(phba->HCregaddr);
373 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
374 if (psli->num_rings > 0)
375 status |= HC_R0INT_ENA;
376 if (psli->num_rings > 1)
377 status |= HC_R1INT_ENA;
378 if (psli->num_rings > 2)
379 status |= HC_R2INT_ENA;
380 if (psli->num_rings > 3)
381 status |= HC_R3INT_ENA;
382
383 writel(status, phba->HCregaddr);
384 readl(phba->HCregaddr); /* flush */
385 spin_unlock_irq(phba->host->host_lock);
386
387 /*
388 * Setup the ring 0 (els) timeout handler
389 */
390 timeout = phba->fc_ratov << 1;
391 phba->els_tmofunc.expires = jiffies + HZ * timeout;
392 add_timer(&phba->els_tmofunc);
393
394 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
395 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
396 if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) {
397 lpfc_printf_log(phba,
398 KERN_ERR,
399 LOG_INIT,
400 "%d:0454 Adapter failed to init, mbxCmd x%x "
401 "INIT_LINK, mbxStatus x%x\n",
402 phba->brd_no,
403 mb->mbxCommand, mb->mbxStatus);
404
405 /* Clear all interrupt enable conditions */
406 writel(0, phba->HCregaddr);
407 readl(phba->HCregaddr); /* flush */
408 /* Clear all pending interrupts */
409 writel(0xffffffff, phba->HAregaddr);
410 readl(phba->HAregaddr); /* flush */
411
412 phba->hba_state = LPFC_HBA_ERROR;
413 mempool_free(pmb, phba->mbox_mem_pool);
414 return -EIO;
415 }
416 /* MBOX buffer will be freed in mbox compl */
417
418 i = 0;
419 while ((phba->hba_state != LPFC_HBA_READY) ||
420 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
421 ((phba->fc_map_cnt == 0) && (i<2)) ||
422 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
423 /* Check every second for 30 retries. */
424 i++;
425 if (i > 30) {
426 break;
427 }
428 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
429 /* The link is down. Set linkdown timeout */
430 break;
431 }
432
433 /* Delay for 1 second to give discovery time to complete. */
434 msleep(1000);
435
436 }
437
438 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let
439 * any potential PRLIs to flush thru the SLI sub-system.
440 */
441 msleep(50);
442
443 return (0);
444}
445
446/************************************************************************/
447/* */
448/* lpfc_hba_down_prep */
449/* This routine will do LPFC uninitialization before the */
450/* HBA is reset when bringing down the SLI Layer. This will be */
451/* initialized as a SLI layer callback routine. */
452/* This routine returns 0 on success. Any other return value */
453/* indicates an error. */
454/* */
455/************************************************************************/
456int
457lpfc_hba_down_prep(struct lpfc_hba * phba)
458{
459 /* Disable interrupts */
460 writel(0, phba->HCregaddr);
461 readl(phba->HCregaddr); /* flush */
462
463 /* Cleanup potential discovery resources */
464 lpfc_els_flush_rscn(phba);
465 lpfc_els_flush_cmd(phba);
466 lpfc_disc_flush_list(phba);
467
468 return (0);
469}
470
471/************************************************************************/
472/* */
473/* lpfc_handle_eratt */
474/* This routine will handle processing a Host Attention */
475/* Error Status event. This will be initialized */
476/* as a SLI layer callback routine. */
477/* */
478/************************************************************************/
479void
480lpfc_handle_eratt(struct lpfc_hba * phba)
481{
482 struct lpfc_sli *psli = &phba->sli;
483 struct lpfc_sli_ring *pring;
484
485 /*
486 * If a reset is sent to the HBA restore PCI configuration registers.
487 */
488 if ( phba->hba_state == LPFC_INIT_START ) {
489 mdelay(1);
490 readl(phba->HCregaddr); /* flush */
491 writel(0, phba->HCregaddr);
492 readl(phba->HCregaddr); /* flush */
493
494 /* Restore PCI cmd register */
495 pci_write_config_word(phba->pcidev,
496 PCI_COMMAND, phba->pci_cfg_value);
497 }
498
499 if (phba->work_hs & HS_FFER6) {
500 /* Re-establishing Link */
501 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
502 "%d:1301 Re-establishing Link "
503 "Data: x%x x%x x%x\n",
504 phba->brd_no, phba->work_hs,
505 phba->work_status[0], phba->work_status[1]);
506 spin_lock_irq(phba->host->host_lock);
507 phba->fc_flag |= FC_ESTABLISH_LINK;
508 spin_unlock_irq(phba->host->host_lock);
509
510 /*
511 * Firmware stops when it triggled erratt with HS_FFER6.
512 * That could cause the I/Os dropped by the firmware.
513 * Error iocb (I/O) on txcmplq and let the SCSI layer
514 * retry it after re-establishing link.
515 */
516 pring = &psli->ring[psli->fcp_ring];
517 lpfc_sli_abort_iocb_ring(phba, pring);
518
519
520 /*
521 * There was a firmware error. Take the hba offline and then
522 * attempt to restart it.
523 */
524 lpfc_offline(phba);
525 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
526 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
527 return;
528 }
529 } else {
530 /* The if clause above forces this code path when the status
531 * failure is a value other than FFER6. Do not call the offline
532 * twice. This is the adapter hardware error path.
533 */
534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
535 "%d:0457 Adapter Hardware Error "
536 "Data: x%x x%x x%x\n",
537 phba->brd_no, phba->work_hs,
538 phba->work_status[0], phba->work_status[1]);
539
540 lpfc_offline(phba);
541
542 /*
543 * Restart all traffic to this host. Since the fc_transport
544 * block functions (future) were not called in lpfc_offline,
545 * don't call them here.
546 */
547 scsi_unblock_requests(phba->host);
548 }
549}
550
551/************************************************************************/
552/* */
553/* lpfc_handle_latt */
554/* This routine will handle processing a Host Attention */
555/* Link Status event. This will be initialized */
556/* as a SLI layer callback routine. */
557/* */
558/************************************************************************/
559void
560lpfc_handle_latt(struct lpfc_hba * phba)
561{
562 struct lpfc_sli *psli = &phba->sli;
563 LPFC_MBOXQ_t *pmb;
564 volatile uint32_t control;
565 struct lpfc_dmabuf *mp;
566 int rc = -ENOMEM;
567
568 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
569 if (!pmb)
570 goto lpfc_handle_latt_err_exit;
571
572 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
573 if (!mp)
574 goto lpfc_handle_latt_free_pmb;
575
576 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
577 if (!mp->virt)
578 goto lpfc_handle_latt_free_mp;
579
580 rc = -EIO;
581
582
583 psli->slistat.link_event++;
584 lpfc_read_la(phba, pmb, mp);
585 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
586 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
587 if (rc == MBX_NOT_FINISHED)
588 goto lpfc_handle_latt_free_mp;
589
590 /* Clear Link Attention in HA REG */
591 spin_lock_irq(phba->host->host_lock);
592 writel(HA_LATT, phba->HAregaddr);
593 readl(phba->HAregaddr); /* flush */
594 spin_unlock_irq(phba->host->host_lock);
595
596 return;
597
598lpfc_handle_latt_free_mp:
599 kfree(mp);
600lpfc_handle_latt_free_pmb:
601 kfree(pmb);
602lpfc_handle_latt_err_exit:
603 /* Enable Link attention interrupts */
604 spin_lock_irq(phba->host->host_lock);
605 psli->sli_flag |= LPFC_PROCESS_LA;
606 control = readl(phba->HCregaddr);
607 control |= HC_LAINT_ENA;
608 writel(control, phba->HCregaddr);
609 readl(phba->HCregaddr); /* flush */
610
611 /* Clear Link Attention in HA REG */
612 writel(HA_LATT, phba->HAregaddr);
613 readl(phba->HAregaddr); /* flush */
614 spin_unlock_irq(phba->host->host_lock);
615 lpfc_linkdown(phba);
616 phba->hba_state = LPFC_HBA_ERROR;
617
618 /* The other case is an error from issue_mbox */
619 if (rc == -ENOMEM)
620 lpfc_printf_log(phba,
621 KERN_WARNING,
622 LOG_MBOX,
623 "%d:0300 READ_LA: no buffers\n",
624 phba->brd_no);
625
626 return;
627}
628
629/************************************************************************/
630/* */
631/* lpfc_parse_vpd */
632/* This routine will parse the VPD data */
633/* */
634/************************************************************************/
635static int
636lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd)
637{
638 uint8_t lenlo, lenhi;
639 uint32_t Length;
640 int i, j;
641 int finished = 0;
642 int index = 0;
643
644 if (!vpd)
645 return 0;
646
647 /* Vital Product */
648 lpfc_printf_log(phba,
649 KERN_INFO,
650 LOG_INIT,
651 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
652 phba->brd_no,
653 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
654 (uint32_t) vpd[3]);
655 do {
656 switch (vpd[index]) {
657 case 0x82:
658 index += 1;
659 lenlo = vpd[index];
660 index += 1;
661 lenhi = vpd[index];
662 index += 1;
663 i = ((((unsigned short)lenhi) << 8) + lenlo);
664 index += i;
665 break;
666 case 0x90:
667 index += 1;
668 lenlo = vpd[index];
669 index += 1;
670 lenhi = vpd[index];
671 index += 1;
672 Length = ((((unsigned short)lenhi) << 8) + lenlo);
673
674 while (Length > 0) {
675 /* Look for Serial Number */
676 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
677 index += 2;
678 i = vpd[index];
679 index += 1;
680 j = 0;
681 Length -= (3+i);
682 while(i--) {
683 phba->SerialNumber[j++] = vpd[index++];
684 if (j == 31)
685 break;
686 }
687 phba->SerialNumber[j] = 0;
688 continue;
689 }
690 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
691 phba->vpd_flag |= VPD_MODEL_DESC;
692 index += 2;
693 i = vpd[index];
694 index += 1;
695 j = 0;
696 Length -= (3+i);
697 while(i--) {
698 phba->ModelDesc[j++] = vpd[index++];
699 if (j == 255)
700 break;
701 }
702 phba->ModelDesc[j] = 0;
703 continue;
704 }
705 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
706 phba->vpd_flag |= VPD_MODEL_NAME;
707 index += 2;
708 i = vpd[index];
709 index += 1;
710 j = 0;
711 Length -= (3+i);
712 while(i--) {
713 phba->ModelName[j++] = vpd[index++];
714 if (j == 79)
715 break;
716 }
717 phba->ModelName[j] = 0;
718 continue;
719 }
720 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
721 phba->vpd_flag |= VPD_PROGRAM_TYPE;
722 index += 2;
723 i = vpd[index];
724 index += 1;
725 j = 0;
726 Length -= (3+i);
727 while(i--) {
728 phba->ProgramType[j++] = vpd[index++];
729 if (j == 255)
730 break;
731 }
732 phba->ProgramType[j] = 0;
733 continue;
734 }
735 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
736 phba->vpd_flag |= VPD_PORT;
737 index += 2;
738 i = vpd[index];
739 index += 1;
740 j = 0;
741 Length -= (3+i);
742 while(i--) {
743 phba->Port[j++] = vpd[index++];
744 if (j == 19)
745 break;
746 }
747 phba->Port[j] = 0;
748 continue;
749 }
750 else {
751 index += 2;
752 i = vpd[index];
753 index += 1;
754 index += i;
755 Length -= (3 + i);
756 }
757 }
758 finished = 0;
759 break;
760 case 0x78:
761 finished = 1;
762 break;
763 default:
764 index ++;
765 break;
766 }
767 } while (!finished && (index < 108));
768
769 return(1);
770}
771
772static void
773lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
774{
775 lpfc_vpd_t *vp;
776 uint32_t id;
777 char str[16];
778
779 vp = &phba->vpd;
780 pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
781
782 switch ((id >> 16) & 0xffff) {
783 case PCI_DEVICE_ID_SUPERFLY:
784 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
785 strcpy(str, "LP7000 1");
786 else
787 strcpy(str, "LP7000E 1");
788 break;
789 case PCI_DEVICE_ID_DRAGONFLY:
790 strcpy(str, "LP8000 1");
791 break;
792 case PCI_DEVICE_ID_CENTAUR:
793 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
794 strcpy(str, "LP9002 2");
795 else
796 strcpy(str, "LP9000 1");
797 break;
798 case PCI_DEVICE_ID_RFLY:
799 strcpy(str, "LP952 2");
800 break;
801 case PCI_DEVICE_ID_PEGASUS:
802 strcpy(str, "LP9802 2");
803 break;
804 case PCI_DEVICE_ID_THOR:
805 strcpy(str, "LP10000 2");
806 break;
807 case PCI_DEVICE_ID_VIPER:
808 strcpy(str, "LPX1000 10");
809 break;
810 case PCI_DEVICE_ID_PFLY:
811 strcpy(str, "LP982 2");
812 break;
813 case PCI_DEVICE_ID_TFLY:
814 strcpy(str, "LP1050 2");
815 break;
816 case PCI_DEVICE_ID_HELIOS:
817 strcpy(str, "LP11000 4");
818 break;
819 case PCI_DEVICE_ID_BMID:
820 strcpy(str, "LP1150 4");
821 break;
822 case PCI_DEVICE_ID_BSMB:
823 strcpy(str, "LP111 4");
824 break;
825 case PCI_DEVICE_ID_ZEPHYR:
826 strcpy(str, "LP11000e 4");
827 break;
828 case PCI_DEVICE_ID_ZMID:
829 strcpy(str, "LP1150e 4");
830 break;
831 case PCI_DEVICE_ID_ZSMB:
832 strcpy(str, "LP111e 4");
833 break;
834 case PCI_DEVICE_ID_LP101:
835 strcpy(str, "LP101 2");
836 break;
837 case PCI_DEVICE_ID_LP10000S:
838 strcpy(str, "LP10000-S 2");
839 break;
840 }
841 if (mdp)
842 sscanf(str, "%s", mdp);
843 if (descp)
844 sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre "
845 "Channel Adapter", str);
846}
847
848/**************************************************/
849/* lpfc_post_buffer */
850/* */
851/* This routine will post count buffers to the */
852/* ring with the QUE_RING_BUF_CN command. This */
853/* allows 3 buffers / command to be posted. */
854/* Returns the number of buffers NOT posted. */
855/**************************************************/
856int
857lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
858 int type)
859{
860 IOCB_t *icmd;
861 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
862 struct lpfc_iocbq *iocb = NULL;
863 struct lpfc_dmabuf *mp1, *mp2;
864
865 cnt += pring->missbufcnt;
866
867 /* While there are buffers to post */
868 while (cnt > 0) {
869 /* Allocate buffer for command iocb */
870 spin_lock_irq(phba->host->host_lock);
871 list_remove_head(lpfc_iocb_list, iocb, struct lpfc_iocbq, list);
872 spin_unlock_irq(phba->host->host_lock);
873 if (iocb == NULL) {
874 pring->missbufcnt = cnt;
875 return cnt;
876 }
877 memset(iocb, 0, sizeof (struct lpfc_iocbq));
878 icmd = &iocb->iocb;
879
880 /* 2 buffers can be posted per command */
881 /* Allocate buffer to post */
882 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
883 if (mp1)
884 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
885 &mp1->phys);
886 if (mp1 == 0 || mp1->virt == 0) {
887 if (mp1)
888 kfree(mp1);
889 spin_lock_irq(phba->host->host_lock);
890 list_add_tail(&iocb->list, lpfc_iocb_list);
891 spin_unlock_irq(phba->host->host_lock);
892 pring->missbufcnt = cnt;
893 return cnt;
894 }
895
896 INIT_LIST_HEAD(&mp1->list);
897 /* Allocate buffer to post */
898 if (cnt > 1) {
899 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
900 if (mp2)
901 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
902 &mp2->phys);
903 if (mp2 == 0 || mp2->virt == 0) {
904 if (mp2)
905 kfree(mp2);
906 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
907 kfree(mp1);
908 spin_lock_irq(phba->host->host_lock);
909 list_add_tail(&iocb->list, lpfc_iocb_list);
910 spin_unlock_irq(phba->host->host_lock);
911 pring->missbufcnt = cnt;
912 return cnt;
913 }
914
915 INIT_LIST_HEAD(&mp2->list);
916 } else {
917 mp2 = NULL;
918 }
919
920 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
921 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
922 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
923 icmd->ulpBdeCount = 1;
924 cnt--;
925 if (mp2) {
926 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
927 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
928 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
929 cnt--;
930 icmd->ulpBdeCount = 2;
931 }
932
933 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
934 icmd->ulpLe = 1;
935
936 spin_lock_irq(phba->host->host_lock);
937 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
938 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
939 kfree(mp1);
940 cnt++;
941 if (mp2) {
942 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
943 kfree(mp2);
944 cnt++;
945 }
946 list_add_tail(&iocb->list, lpfc_iocb_list);
947 pring->missbufcnt = cnt;
948 spin_unlock_irq(phba->host->host_lock);
949 return cnt;
950 }
951 spin_unlock_irq(phba->host->host_lock);
952 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
953 if (mp2) {
954 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
955 }
956 }
957 pring->missbufcnt = 0;
958 return 0;
959}
960
961/************************************************************************/
962/* */
963/* lpfc_post_rcv_buf */
964/* This routine post initial rcv buffers to the configured rings */
965/* */
966/************************************************************************/
967static int
968lpfc_post_rcv_buf(struct lpfc_hba * phba)
969{
970 struct lpfc_sli *psli = &phba->sli;
971
972 /* Ring 0, ELS / CT buffers */
973 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
974 /* Ring 2 - FCP no buffers needed */
975
976 return 0;
977}
978
979#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
980
981/************************************************************************/
982/* */
983/* lpfc_sha_init */
984/* */
985/************************************************************************/
986static void
987lpfc_sha_init(uint32_t * HashResultPointer)
988{
989 HashResultPointer[0] = 0x67452301;
990 HashResultPointer[1] = 0xEFCDAB89;
991 HashResultPointer[2] = 0x98BADCFE;
992 HashResultPointer[3] = 0x10325476;
993 HashResultPointer[4] = 0xC3D2E1F0;
994}
995
996/************************************************************************/
997/* */
998/* lpfc_sha_iterate */
999/* */
1000/************************************************************************/
1001static void
1002lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1003{
1004 int t;
1005 uint32_t TEMP;
1006 uint32_t A, B, C, D, E;
1007 t = 16;
1008 do {
1009 HashWorkingPointer[t] =
1010 S(1,
1011 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1012 8] ^
1013 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1014 } while (++t <= 79);
1015 t = 0;
1016 A = HashResultPointer[0];
1017 B = HashResultPointer[1];
1018 C = HashResultPointer[2];
1019 D = HashResultPointer[3];
1020 E = HashResultPointer[4];
1021
1022 do {
1023 if (t < 20) {
1024 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1025 } else if (t < 40) {
1026 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1027 } else if (t < 60) {
1028 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1029 } else {
1030 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1031 }
1032 TEMP += S(5, A) + E + HashWorkingPointer[t];
1033 E = D;
1034 D = C;
1035 C = S(30, B);
1036 B = A;
1037 A = TEMP;
1038 } while (++t <= 79);
1039
1040 HashResultPointer[0] += A;
1041 HashResultPointer[1] += B;
1042 HashResultPointer[2] += C;
1043 HashResultPointer[3] += D;
1044 HashResultPointer[4] += E;
1045
1046}
1047
1048/************************************************************************/
1049/* */
1050/* lpfc_challenge_key */
1051/* */
1052/************************************************************************/
1053static void
1054lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1055{
1056 *HashWorking = (*RandomChallenge ^ *HashWorking);
1057}
1058
1059/************************************************************************/
1060/* */
1061/* lpfc_hba_init */
1062/* */
1063/************************************************************************/
1064void
1065lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1066{
1067 int t;
1068 uint32_t *HashWorking;
1069 uint32_t *pwwnn = phba->wwnn;
1070
1071 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
1072 if (!HashWorking)
1073 return;
1074
1075 memset(HashWorking, 0, (80 * sizeof(uint32_t)));
1076 HashWorking[0] = HashWorking[78] = *pwwnn++;
1077 HashWorking[1] = HashWorking[79] = *pwwnn;
1078
1079 for (t = 0; t < 7; t++)
1080 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1081
1082 lpfc_sha_init(hbainit);
1083 lpfc_sha_iterate(hbainit, HashWorking);
1084 kfree(HashWorking);
1085}
1086
1087static void
1088lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
1089{
1090 struct lpfc_nodelist *ndlp, *next_ndlp;
1091
1092 /* clean up phba - lpfc specific */
1093 lpfc_can_disctmo(phba);
1094 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
1095 nlp_listp) {
1096 lpfc_nlp_remove(phba, ndlp);
1097 }
1098
1099 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1100 nlp_listp) {
1101 lpfc_nlp_remove(phba, ndlp);
1102 }
1103
1104 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
1105 nlp_listp) {
1106 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1107 }
1108
1109 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1110 nlp_listp) {
1111 lpfc_nlp_remove(phba, ndlp);
1112 }
1113
1114 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1115 nlp_listp) {
1116 lpfc_nlp_remove(phba, ndlp);
1117 }
1118
1119 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
1120 nlp_listp) {
1121 lpfc_nlp_remove(phba, ndlp);
1122 }
1123
1124 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1125 nlp_listp) {
1126 lpfc_nlp_remove(phba, ndlp);
1127 }
1128
1129 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1130 nlp_listp) {
1131 lpfc_nlp_remove(phba, ndlp);
1132 }
1133
1134 INIT_LIST_HEAD(&phba->fc_nlpmap_list);
1135 INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
1136 INIT_LIST_HEAD(&phba->fc_unused_list);
1137 INIT_LIST_HEAD(&phba->fc_plogi_list);
1138 INIT_LIST_HEAD(&phba->fc_adisc_list);
1139 INIT_LIST_HEAD(&phba->fc_reglogin_list);
1140 INIT_LIST_HEAD(&phba->fc_prli_list);
1141 INIT_LIST_HEAD(&phba->fc_npr_list);
1142
1143 phba->fc_map_cnt = 0;
1144 phba->fc_unmap_cnt = 0;
1145 phba->fc_plogi_cnt = 0;
1146 phba->fc_adisc_cnt = 0;
1147 phba->fc_reglogin_cnt = 0;
1148 phba->fc_prli_cnt = 0;
1149 phba->fc_npr_cnt = 0;
1150 phba->fc_unused_cnt= 0;
1151 return;
1152}
1153
1154static void
1155lpfc_establish_link_tmo(unsigned long ptr)
1156{
1157 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
1158 unsigned long iflag;
1159
1160
1161 /* Re-establishing Link, timer expired */
1162 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1163 "%d:1300 Re-establishing Link, timer expired "
1164 "Data: x%x x%x\n",
1165 phba->brd_no, phba->fc_flag, phba->hba_state);
1166 spin_lock_irqsave(phba->host->host_lock, iflag);
1167 phba->fc_flag &= ~FC_ESTABLISH_LINK;
1168 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1169}
1170
1171static int
1172lpfc_stop_timer(struct lpfc_hba * phba)
1173{
1174 struct lpfc_sli *psli = &phba->sli;
1175
1176 /* Instead of a timer, this has been converted to a
1177 * deferred procedding list.
1178 */
1179 while (!list_empty(&phba->freebufList)) {
1180
1181 struct lpfc_dmabuf *mp = NULL;
1182
1183 list_remove_head((&phba->freebufList), mp,
1184 struct lpfc_dmabuf, list);
1185 if (mp) {
1186 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1187 kfree(mp);
1188 }
1189 }
1190
1191 del_timer_sync(&phba->fc_estabtmo);
1192 del_timer_sync(&phba->fc_disctmo);
1193 del_timer_sync(&phba->fc_fdmitmo);
1194 del_timer_sync(&phba->els_tmofunc);
1195 psli = &phba->sli;
1196 del_timer_sync(&psli->mbox_tmo);
1197 return(1);
1198}
1199
1200int
1201lpfc_online(struct lpfc_hba * phba)
1202{
1203 if (!phba)
1204 return 0;
1205
1206 if (!(phba->fc_flag & FC_OFFLINE_MODE))
1207 return 0;
1208
1209 lpfc_printf_log(phba,
1210 KERN_WARNING,
1211 LOG_INIT,
1212 "%d:0458 Bring Adapter online\n",
1213 phba->brd_no);
1214
1215 if (!lpfc_sli_queue_setup(phba))
1216 return 1;
1217
1218 if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */
1219 return 1;
1220
1221 spin_lock_irq(phba->host->host_lock);
1222 phba->fc_flag &= ~FC_OFFLINE_MODE;
1223 spin_unlock_irq(phba->host->host_lock);
1224
1225 /*
1226 * Restart all traffic to this host. Since the fc_transport block
1227 * functions (future) were not called in lpfc_offline, don't call them
1228 * here.
1229 */
1230 scsi_unblock_requests(phba->host);
1231 return 0;
1232}
1233
1234int
1235lpfc_offline(struct lpfc_hba * phba)
1236{
1237 struct lpfc_sli_ring *pring;
1238 struct lpfc_sli *psli;
1239 unsigned long iflag;
1240 int i = 0;
1241
1242 if (!phba)
1243 return 0;
1244
1245 if (phba->fc_flag & FC_OFFLINE_MODE)
1246 return 0;
1247
1248 /*
1249 * Don't call the fc_transport block api (future). The device is
1250 * going offline and causing a timer to fire in the midlayer is
1251 * unproductive. Just block all new requests until the driver
1252 * comes back online.
1253 */
1254 scsi_block_requests(phba->host);
1255 psli = &phba->sli;
1256 pring = &psli->ring[psli->fcp_ring];
1257
1258 lpfc_linkdown(phba);
1259
1260 /* The linkdown event takes 30 seconds to timeout. */
1261 while (pring->txcmplq_cnt) {
1262 mdelay(10);
1263 if (i++ > 3000)
1264 break;
1265 }
1266
1267 /* stop all timers associated with this hba */
1268 lpfc_stop_timer(phba);
1269 phba->work_hba_events = 0;
1270
1271 lpfc_printf_log(phba,
1272 KERN_WARNING,
1273 LOG_INIT,
1274 "%d:0460 Bring Adapter offline\n",
1275 phba->brd_no);
1276
1277 /* Bring down the SLI Layer and cleanup. The HBA is offline
1278 now. */
1279 lpfc_sli_hba_down(phba);
1280 lpfc_cleanup(phba, 1);
1281 spin_lock_irqsave(phba->host->host_lock, iflag);
1282 phba->fc_flag |= FC_OFFLINE_MODE;
1283 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1284 return 0;
1285}
1286
1287/******************************************************************************
1288* Function name: lpfc_scsi_free
1289*
1290* Description: Called from lpfc_pci_remove_one free internal driver resources
1291*
1292******************************************************************************/
1293static int
1294lpfc_scsi_free(struct lpfc_hba * phba)
1295{
1296 struct lpfc_scsi_buf *sb, *sb_next;
1297 struct lpfc_iocbq *io, *io_next;
1298
1299 spin_lock_irq(phba->host->host_lock);
1300 /* Release all the lpfc_scsi_bufs maintained by this host. */
1301 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
1302 list_del(&sb->list);
1303 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
1304 sb->dma_handle);
1305 kfree(sb);
1306 phba->total_scsi_bufs--;
1307 }
1308
1309 /* Release all the lpfc_iocbq entries maintained by this host. */
1310 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
1311 list_del(&io->list);
1312 kfree(io);
1313 phba->total_iocbq_bufs--;
1314 }
1315
1316 spin_unlock_irq(phba->host->host_lock);
1317
1318 return 0;
1319}
1320
1321
1322static int __devinit
1323lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1324{
1325 struct Scsi_Host *host;
1326 struct lpfc_hba *phba;
1327 struct lpfc_sli *psli;
1328 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
1329 unsigned long bar0map_len, bar2map_len;
1330 int error = -ENODEV, retval;
1331 int i;
1332 u64 wwname;
1333
1334 if (pci_enable_device(pdev))
1335 goto out;
1336 if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
1337 goto out_disable_device;
1338
1339 host = scsi_host_alloc(&lpfc_template,
1340 sizeof (struct lpfc_hba) + sizeof (unsigned long));
1341 if (!host)
1342 goto out_release_regions;
1343
1344 phba = (struct lpfc_hba*)host->hostdata;
1345 memset(phba, 0, sizeof (struct lpfc_hba));
1346 phba->link_stats = (void *)&phba[1];
1347 phba->host = host;
1348
1349 phba->fc_flag |= FC_LOADING;
1350 phba->pcidev = pdev;
1351
1352 /* Assign an unused board number */
1353 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
1354 goto out_put_host;
1355
1356 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
1357 if (error)
1358 goto out_put_host;
1359
1360 host->unique_id = phba->brd_no;
1361
1362 INIT_LIST_HEAD(&phba->ctrspbuflist);
1363 INIT_LIST_HEAD(&phba->rnidrspbuflist);
1364 INIT_LIST_HEAD(&phba->freebufList);
1365
1366 /* Initialize timers used by driver */
1367 init_timer(&phba->fc_estabtmo);
1368 phba->fc_estabtmo.function = lpfc_establish_link_tmo;
1369 phba->fc_estabtmo.data = (unsigned long)phba;
1370 init_timer(&phba->fc_disctmo);
1371 phba->fc_disctmo.function = lpfc_disc_timeout;
1372 phba->fc_disctmo.data = (unsigned long)phba;
1373
1374 init_timer(&phba->fc_fdmitmo);
1375 phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
1376 phba->fc_fdmitmo.data = (unsigned long)phba;
1377 init_timer(&phba->els_tmofunc);
1378 phba->els_tmofunc.function = lpfc_els_timeout;
1379 phba->els_tmofunc.data = (unsigned long)phba;
1380 psli = &phba->sli;
1381 init_timer(&psli->mbox_tmo);
1382 psli->mbox_tmo.function = lpfc_mbox_timeout;
1383 psli->mbox_tmo.data = (unsigned long)phba;
1384
1385 /*
1386 * Get all the module params for configuring this host and then
1387 * establish the host parameters.
1388 */
1389 lpfc_get_cfgparam(phba);
1390
1391 host->max_id = LPFC_MAX_TARGET;
1392 host->max_lun = phba->cfg_max_luns;
1393 host->this_id = -1;
1394
1395 /* Initialize all internally managed lists. */
1396 INIT_LIST_HEAD(&phba->fc_nlpmap_list);
1397 INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
1398 INIT_LIST_HEAD(&phba->fc_unused_list);
1399 INIT_LIST_HEAD(&phba->fc_plogi_list);
1400 INIT_LIST_HEAD(&phba->fc_adisc_list);
1401 INIT_LIST_HEAD(&phba->fc_reglogin_list);
1402 INIT_LIST_HEAD(&phba->fc_prli_list);
1403 INIT_LIST_HEAD(&phba->fc_npr_list);
1404
1405
1406 pci_set_master(pdev);
1407 retval = pci_set_mwi(pdev);
1408 if (retval)
1409 dev_printk(KERN_WARNING, &pdev->dev,
1410 "Warning: pci_set_mwi returned %d\n", retval);
1411
1412 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
1413 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
1414 goto out_idr_remove;
1415
1416 /*
1417 * Get the bus address of Bar0 and Bar2 and the number of bytes
1418 * required by each mapping.
1419 */
1420 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
1421 bar0map_len = pci_resource_len(phba->pcidev, 0);
1422
1423 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
1424 bar2map_len = pci_resource_len(phba->pcidev, 2);
1425
1426 /* Map HBA SLIM and Control Registers to a kernel virtual address. */
1427 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
1428 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
1429
1430 /* Allocate memory for SLI-2 structures */
1431 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
1432 &phba->slim2p_mapping, GFP_KERNEL);
1433 if (!phba->slim2p)
1434 goto out_iounmap;
1435
1436
1437 /* Initialize the SLI Layer to run with lpfc HBAs. */
1438 lpfc_sli_setup(phba);
1439 lpfc_sli_queue_setup(phba);
1440
1441 error = lpfc_mem_alloc(phba);
1442 if (error)
1443 goto out_free_slim;
1444
1445 /* Initialize and populate the iocb list per host. */
1446 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
1447 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
1448 iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
1449 if (iocbq_entry == NULL) {
1450 printk(KERN_ERR "%s: only allocated %d iocbs of "
1451 "expected %d count. Unloading driver.\n",
1452 __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
1453 error = -ENOMEM;
1454 goto out_free_iocbq;
1455 }
1456
1457 memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
1458 spin_lock_irq(phba->host->host_lock);
1459 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
1460 phba->total_iocbq_bufs++;
1461 spin_unlock_irq(phba->host->host_lock);
1462 }
1463
1464 /* Initialize HBA structure */
1465 phba->fc_edtov = FF_DEF_EDTOV;
1466 phba->fc_ratov = FF_DEF_RATOV;
1467 phba->fc_altov = FF_DEF_ALTOV;
1468 phba->fc_arbtov = FF_DEF_ARBTOV;
1469
1470 INIT_LIST_HEAD(&phba->work_list);
1471 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
1472 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
1473
1474 /* Startup the kernel thread for this host adapter. */
1475 phba->worker_thread = kthread_run(lpfc_do_work, phba,
1476 "lpfc_worker_%d", phba->brd_no);
1477 if (IS_ERR(phba->worker_thread)) {
1478 error = PTR_ERR(phba->worker_thread);
1479 goto out_free_iocbq;
1480 }
1481
1482 /* We can rely on a queue depth attribute only after SLI HBA setup */
1483 host->can_queue = phba->cfg_hba_queue_depth - 10;
1484
1485 /* Tell the midlayer we support 16 byte commands */
1486 host->max_cmd_len = 16;
1487
1488 /* Initialize the list of scsi buffers used by driver for scsi IO. */
1489 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
1490
1491 host->transportt = lpfc_transport_template;
1492 host->hostdata[0] = (unsigned long)phba;
1493 pci_set_drvdata(pdev, host);
1494 error = scsi_add_host(host, &pdev->dev);
1495 if (error)
1496 goto out_kthread_stop;
1497
1498 error = lpfc_alloc_sysfs_attr(phba);
1499 if (error)
1500 goto out_kthread_stop;
1501
1502 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
1503 LPFC_DRIVER_NAME, phba);
1504 if (error) {
1505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1506 "%d:0451 Enable interrupt handler failed\n",
1507 phba->brd_no);
1508 goto out_free_sysfs_attr;
1509 }
1510 phba->MBslimaddr = phba->slim_memmap_p;
1511 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
1512 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
1513 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1514 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1515
1516 error = lpfc_sli_hba_setup(phba);
1517 if (error)
1518 goto out_free_irq;
1519
1520 /*
1521 * set fixed host attributes
1522 * Must done after lpfc_sli_hba_setup()
1523 */
1524
1525 memcpy(&wwname, &phba->fc_nodename, sizeof(u64));
1526 fc_host_node_name(host) = be64_to_cpu(wwname);
1527 memcpy(&wwname, &phba->fc_portname, sizeof(u64));
1528 fc_host_port_name(host) = be64_to_cpu(wwname);
1529 fc_host_supported_classes(host) = FC_COS_CLASS3;
1530
1531 memset(fc_host_supported_fc4s(host), 0,
1532 sizeof(fc_host_supported_fc4s(host)));
1533 fc_host_supported_fc4s(host)[2] = 1;
1534 fc_host_supported_fc4s(host)[7] = 1;
1535
1536 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
1537
1538 fc_host_supported_speeds(host) = 0;
1539 switch (FC_JEDEC_ID(phba->vpd.rev.biuRev)) {
1540 case VIPER_JEDEC_ID:
1541 fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT;
1542 break;
1543 case HELIOS_JEDEC_ID:
1544 fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT;
1545 /* Fall through */
1546 case CENTAUR_2G_JEDEC_ID:
1547 case PEGASUS_JEDEC_ID:
1548 case THOR_JEDEC_ID:
1549 fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT;
1550 /* Fall through */
1551 default:
1552 fc_host_supported_speeds(host) = FC_PORTSPEED_1GBIT;
1553 }
1554
1555 fc_host_maxframe_size(host) =
1556 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
1557 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
1558
1559 /* This value is also unchanging */
1560 memset(fc_host_active_fc4s(host), 0,
1561 sizeof(fc_host_active_fc4s(host)));
1562 fc_host_active_fc4s(host)[2] = 1;
1563 fc_host_active_fc4s(host)[7] = 1;
1564
1565 spin_lock_irq(phba->host->host_lock);
1566 phba->fc_flag &= ~FC_LOADING;
1567 spin_unlock_irq(phba->host->host_lock);
1568 return 0;
1569
1570out_free_irq:
1571 lpfc_stop_timer(phba);
1572 phba->work_hba_events = 0;
1573 free_irq(phba->pcidev->irq, phba);
1574out_free_sysfs_attr:
1575 lpfc_free_sysfs_attr(phba);
1576out_kthread_stop:
1577 kthread_stop(phba->worker_thread);
1578out_free_iocbq:
1579 list_for_each_entry_safe(iocbq_entry, iocbq_next,
1580 &phba->lpfc_iocb_list, list) {
1581 spin_lock_irq(phba->host->host_lock);
1582 kfree(iocbq_entry);
1583 phba->total_iocbq_bufs--;
1584 spin_unlock_irq(phba->host->host_lock);
1585 }
1586 lpfc_mem_free(phba);
1587out_free_slim:
1588 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
1589 phba->slim2p_mapping);
1590out_iounmap:
1591 iounmap(phba->ctrl_regs_memmap_p);
1592 iounmap(phba->slim_memmap_p);
1593out_idr_remove:
1594 idr_remove(&lpfc_hba_index, phba->brd_no);
1595out_put_host:
1596 scsi_host_put(host);
1597out_release_regions:
1598 pci_release_regions(pdev);
1599out_disable_device:
1600 pci_disable_device(pdev);
1601out:
1602 return error;
1603}
1604
1605static void __devexit
1606lpfc_pci_remove_one(struct pci_dev *pdev)
1607{
1608 struct Scsi_Host *host = pci_get_drvdata(pdev);
1609 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0];
1610 unsigned long iflag;
1611
1612 lpfc_free_sysfs_attr(phba);
1613
1614 spin_lock_irqsave(phba->host->host_lock, iflag);
1615 phba->fc_flag |= FC_UNLOADING;
1616
1617 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1618
1619 fc_remove_host(phba->host);
1620 scsi_remove_host(phba->host);
1621
1622 kthread_stop(phba->worker_thread);
1623
1624 /*
1625 * Bring down the SLI Layer. This step disable all interrupts,
1626 * clears the rings, discards all mailbox commands, and resets
1627 * the HBA.
1628 */
1629 lpfc_sli_hba_down(phba);
1630
1631 /* Release the irq reservation */
1632 free_irq(phba->pcidev->irq, phba);
1633
1634 lpfc_cleanup(phba, 0);
1635 lpfc_stop_timer(phba);
1636 phba->work_hba_events = 0;
1637
1638 /*
1639 * Call scsi_free before mem_free since scsi bufs are released to their
1640 * corresponding pools here.
1641 */
1642 lpfc_scsi_free(phba);
1643 lpfc_mem_free(phba);
1644
1645 /* Free resources associated with SLI2 interface */
1646 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
1647 phba->slim2p, phba->slim2p_mapping);
1648
1649 /* unmap adapter SLIM and Control Registers */
1650 iounmap(phba->ctrl_regs_memmap_p);
1651 iounmap(phba->slim_memmap_p);
1652
1653 pci_release_regions(phba->pcidev);
1654 pci_disable_device(phba->pcidev);
1655
1656 idr_remove(&lpfc_hba_index, phba->brd_no);
1657 scsi_host_put(phba->host);
1658
1659 pci_set_drvdata(pdev, NULL);
1660}
1661
1662static struct pci_device_id lpfc_id_table[] = {
1663 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
1664 PCI_ANY_ID, PCI_ANY_ID, },
1665 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
1666 PCI_ANY_ID, PCI_ANY_ID, },
1667 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
1668 PCI_ANY_ID, PCI_ANY_ID, },
1669 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
1670 PCI_ANY_ID, PCI_ANY_ID, },
1671 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
1672 PCI_ANY_ID, PCI_ANY_ID, },
1673 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
1674 PCI_ANY_ID, PCI_ANY_ID, },
1675 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
1676 PCI_ANY_ID, PCI_ANY_ID, },
1677 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
1678 PCI_ANY_ID, PCI_ANY_ID, },
1679 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
1680 PCI_ANY_ID, PCI_ANY_ID, },
1681 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
1682 PCI_ANY_ID, PCI_ANY_ID, },
1683 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
1684 PCI_ANY_ID, PCI_ANY_ID, },
1685 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
1686 PCI_ANY_ID, PCI_ANY_ID, },
1687 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
1688 PCI_ANY_ID, PCI_ANY_ID, },
1689 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
1690 PCI_ANY_ID, PCI_ANY_ID, },
1691 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
1692 PCI_ANY_ID, PCI_ANY_ID, },
1693 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
1694 PCI_ANY_ID, PCI_ANY_ID, },
1695 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
1696 PCI_ANY_ID, PCI_ANY_ID, },
1697 { 0 }
1698};
1699
1700MODULE_DEVICE_TABLE(pci, lpfc_id_table);
1701
1702static struct pci_driver lpfc_driver = {
1703 .name = LPFC_DRIVER_NAME,
1704 .id_table = lpfc_id_table,
1705 .probe = lpfc_pci_probe_one,
1706 .remove = __devexit_p(lpfc_pci_remove_one),
1707};
1708
1709static int __init
1710lpfc_init(void)
1711{
1712 int error = 0;
1713
1714 printk(LPFC_MODULE_DESC "\n");
1715
1716 lpfc_transport_template =
1717 fc_attach_transport(&lpfc_transport_functions);
1718 if (!lpfc_transport_template)
1719 return -ENOMEM;
1720 error = pci_register_driver(&lpfc_driver);
1721 if (error)
1722 fc_release_transport(lpfc_transport_template);
1723
1724 return error;
1725}
1726
1727static void __exit
1728lpfc_exit(void)
1729{
1730 pci_unregister_driver(&lpfc_driver);
1731 fc_release_transport(lpfc_transport_template);
1732}
1733
1734module_init(lpfc_init);
1735module_exit(lpfc_exit);
1736MODULE_LICENSE("GPL");
1737MODULE_DESCRIPTION(LPFC_MODULE_DESC);
1738MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
1739MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
new file mode 100644
index 000000000000..a85268880fae
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -0,0 +1,41 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_logmsg.h 1.32 2005/01/25 17:52:01EST sf_support Exp $
23 */
24
25#define LOG_ELS 0x1 /* ELS events */
26#define LOG_DISCOVERY 0x2 /* Link discovery events */
27#define LOG_MBOX 0x4 /* Mailbox events */
28#define LOG_INIT 0x8 /* Initialization events */
29#define LOG_LINK_EVENT 0x10 /* Link events */
30#define LOG_IP 0x20 /* IP traffic history */
31#define LOG_FCP 0x40 /* FCP traffic history */
32#define LOG_NODE 0x80 /* Node table events */
33#define LOG_MISC 0x400 /* Miscellaneous events */
34#define LOG_SLI 0x800 /* SLI events */
35#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */
36#define LOG_LIBDFC 0x2000 /* Libdfc events */
37#define LOG_ALL_MSG 0xffff /* LOG all messages */
38
39#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
40 { if (((mask) &(phba)->cfg_log_verbose) || (level[1] <= '3')) \
41 dev_printk(level, &((phba)->pcidev)->dev, fmt, ##arg); }
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
new file mode 100644
index 000000000000..8712a80fe747
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -0,0 +1,646 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_mbox.c 1.85 2005/04/13 11:59:11EDT sf_support Exp $
23 */
24
25#include <linux/blkdev.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include "lpfc_hw.h"
30#include "lpfc_sli.h"
31#include "lpfc_disc.h"
32#include "lpfc_scsi.h"
33#include "lpfc.h"
34#include "lpfc_logmsg.h"
35#include "lpfc_crtn.h"
36#include "lpfc_compat.h"
37
38/**********************************************/
39
40/* mailbox command */
41/**********************************************/
42void
43lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
44{
45 MAILBOX_t *mb;
46 void *ctx;
47
48 mb = &pmb->mb;
49 ctx = pmb->context2;
50
51 /* Setup to dump VPD region */
52 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
53 mb->mbxCommand = MBX_DUMP_MEMORY;
54 mb->un.varDmp.cv = 1;
55 mb->un.varDmp.type = DMP_NV_PARAMS;
56 mb->un.varDmp.entry_index = offset;
57 mb->un.varDmp.region_id = DMP_REGION_VPD;
58 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
59 mb->un.varDmp.co = 0;
60 mb->un.varDmp.resp_offset = 0;
61 pmb->context2 = ctx;
62 mb->mbxOwner = OWN_HOST;
63 return;
64}
65
66/**********************************************/
67/* lpfc_read_nv Issue a READ NVPARAM */
68/* mailbox command */
69/**********************************************/
70void
71lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
72{
73 MAILBOX_t *mb;
74
75 mb = &pmb->mb;
76 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
77 mb->mbxCommand = MBX_READ_NV;
78 mb->mbxOwner = OWN_HOST;
79 return;
80}
81
82/**********************************************/
83/* lpfc_read_la Issue a READ LA */
84/* mailbox command */
85/**********************************************/
86int
87lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
88{
89 MAILBOX_t *mb;
90 struct lpfc_sli *psli;
91
92 psli = &phba->sli;
93 mb = &pmb->mb;
94 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
95
96 INIT_LIST_HEAD(&mp->list);
97 mb->mbxCommand = MBX_READ_LA64;
98 mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
99 mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
100 mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys);
101
102 /* Save address for later completion and set the owner to host so that
103 * the FW knows this mailbox is available for processing.
104 */
105 pmb->context1 = (uint8_t *) mp;
106 mb->mbxOwner = OWN_HOST;
107 return (0);
108}
109
110/**********************************************/
111/* lpfc_clear_la Issue a CLEAR LA */
112/* mailbox command */
113/**********************************************/
114void
115lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
116{
117 MAILBOX_t *mb;
118
119 mb = &pmb->mb;
120 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
121
122 mb->un.varClearLA.eventTag = phba->fc_eventTag;
123 mb->mbxCommand = MBX_CLEAR_LA;
124 mb->mbxOwner = OWN_HOST;
125 return;
126}
127
128/**************************************************/
129/* lpfc_config_link Issue a CONFIG LINK */
130/* mailbox command */
131/**************************************************/
132void
133lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
134{
135 MAILBOX_t *mb = &pmb->mb;
136 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
137
138 /* NEW_FEATURE
139 * SLI-2, Coalescing Response Feature.
140 */
141 if (phba->cfg_cr_delay) {
142 mb->un.varCfgLnk.cr = 1;
143 mb->un.varCfgLnk.ci = 1;
144 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
145 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
146 }
147
148 mb->un.varCfgLnk.myId = phba->fc_myDID;
149 mb->un.varCfgLnk.edtov = phba->fc_edtov;
150 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
151 mb->un.varCfgLnk.ratov = phba->fc_ratov;
152 mb->un.varCfgLnk.rttov = phba->fc_rttov;
153 mb->un.varCfgLnk.altov = phba->fc_altov;
154 mb->un.varCfgLnk.crtov = phba->fc_crtov;
155 mb->un.varCfgLnk.citov = phba->fc_citov;
156
157 if (phba->cfg_ack0)
158 mb->un.varCfgLnk.ack0_enable = 1;
159
160 mb->mbxCommand = MBX_CONFIG_LINK;
161 mb->mbxOwner = OWN_HOST;
162 return;
163}
164
165/**********************************************/
166/* lpfc_init_link Issue an INIT LINK */
167/* mailbox command */
168/**********************************************/
169void
170lpfc_init_link(struct lpfc_hba * phba,
171 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
172{
173 lpfc_vpd_t *vpd;
174 struct lpfc_sli *psli;
175 MAILBOX_t *mb;
176
177 mb = &pmb->mb;
178 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
179
180 psli = &phba->sli;
181 switch (topology) {
182 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
183 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
184 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
185 break;
186 case FLAGS_TOPOLOGY_MODE_PT_PT:
187 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
188 break;
189 case FLAGS_TOPOLOGY_MODE_LOOP:
190 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
191 break;
192 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
193 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
194 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
195 break;
196 }
197
198 /* NEW_FEATURE
199 * Setting up the link speed
200 */
201 vpd = &phba->vpd;
202 if (vpd->rev.feaLevelHigh >= 0x02){
203 switch(linkspeed){
204 case LINK_SPEED_1G:
205 case LINK_SPEED_2G:
206 case LINK_SPEED_4G:
207 mb->un.varInitLnk.link_flags |=
208 FLAGS_LINK_SPEED;
209 mb->un.varInitLnk.link_speed = linkspeed;
210 break;
211 case LINK_SPEED_AUTO:
212 default:
213 mb->un.varInitLnk.link_speed =
214 LINK_SPEED_AUTO;
215 break;
216 }
217
218 }
219 else
220 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
221
222 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
223 mb->mbxOwner = OWN_HOST;
224 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
225 return;
226}
227
228/**********************************************/
229/* lpfc_read_sparam Issue a READ SPARAM */
230/* mailbox command */
231/**********************************************/
232int
233lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
234{
235 struct lpfc_dmabuf *mp;
236 MAILBOX_t *mb;
237 struct lpfc_sli *psli;
238
239 psli = &phba->sli;
240 mb = &pmb->mb;
241 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
242
243 mb->mbxOwner = OWN_HOST;
244
245 /* Get a buffer to hold the HBAs Service Parameters */
246
247 if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
248 ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
249 if (mp)
250 kfree(mp);
251 mb->mbxCommand = MBX_READ_SPARM64;
252 /* READ_SPARAM: no buffers */
253 lpfc_printf_log(phba,
254 KERN_WARNING,
255 LOG_MBOX,
256 "%d:0301 READ_SPARAM: no buffers\n",
257 phba->brd_no);
258 return (1);
259 }
260 INIT_LIST_HEAD(&mp->list);
261 mb->mbxCommand = MBX_READ_SPARM64;
262 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
263 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
264 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
265
266 /* save address for completion */
267 pmb->context1 = mp;
268
269 return (0);
270}
271
272/********************************************/
273/* lpfc_unreg_did Issue a UNREG_DID */
274/* mailbox command */
275/********************************************/
276void
277lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
278{
279 MAILBOX_t *mb;
280
281 mb = &pmb->mb;
282 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
283
284 mb->un.varUnregDID.did = did;
285
286 mb->mbxCommand = MBX_UNREG_D_ID;
287 mb->mbxOwner = OWN_HOST;
288 return;
289}
290
291/***********************************************/
292
293/* command to write slim */
294/***********************************************/
295void
296lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
297 uint32_t value)
298{
299 MAILBOX_t *mb;
300
301 mb = &pmb->mb;
302 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
303
304 /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
305 /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
306
307 /*
308 * Always turn on DELAYED ABTS for ELS timeouts
309 */
310 if ((addr == 0x052198) && (value == 0))
311 value = 1;
312
313 mb->un.varWords[0] = addr;
314 mb->un.varWords[1] = value;
315
316 mb->mbxCommand = MBX_SET_SLIM;
317 mb->mbxOwner = OWN_HOST;
318 return;
319}
320
321/**********************************************/
322/* lpfc_read_nv Issue a READ CONFIG */
323/* mailbox command */
324/**********************************************/
325void
326lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
327{
328 MAILBOX_t *mb;
329
330 mb = &pmb->mb;
331 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
332
333 mb->mbxCommand = MBX_READ_CONFIG;
334 mb->mbxOwner = OWN_HOST;
335 return;
336}
337
338/********************************************/
339/* lpfc_reg_login Issue a REG_LOGIN */
340/* mailbox command */
341/********************************************/
342int
343lpfc_reg_login(struct lpfc_hba * phba,
344 uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag)
345{
346 uint8_t *sparam;
347 struct lpfc_dmabuf *mp;
348 MAILBOX_t *mb;
349 struct lpfc_sli *psli;
350
351 psli = &phba->sli;
352 mb = &pmb->mb;
353 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
354
355 mb->un.varRegLogin.rpi = 0;
356 mb->un.varRegLogin.did = did;
357 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
358
359 mb->mbxOwner = OWN_HOST;
360
361 /* Get a buffer to hold NPorts Service Parameters */
362 if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == NULL) ||
363 ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
364 if (mp)
365 kfree(mp);
366
367 mb->mbxCommand = MBX_REG_LOGIN64;
368 /* REG_LOGIN: no buffers */
369 lpfc_printf_log(phba,
370 KERN_WARNING,
371 LOG_MBOX,
372 "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n",
373 phba->brd_no,
374 (uint32_t) did, (uint32_t) flag);
375 return (1);
376 }
377 INIT_LIST_HEAD(&mp->list);
378 sparam = mp->virt;
379
380 /* Copy param's into a new buffer */
381 memcpy(sparam, param, sizeof (struct serv_parm));
382
383 /* save address for completion */
384 pmb->context1 = (uint8_t *) mp;
385
386 mb->mbxCommand = MBX_REG_LOGIN64;
387 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
388 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
389 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
390
391 return (0);
392}
393
394/**********************************************/
395/* lpfc_unreg_login Issue a UNREG_LOGIN */
396/* mailbox command */
397/**********************************************/
398void
399lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
400{
401 MAILBOX_t *mb;
402
403 mb = &pmb->mb;
404 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
405
406 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
407 mb->un.varUnregLogin.rsvd1 = 0;
408
409 mb->mbxCommand = MBX_UNREG_LOGIN;
410 mb->mbxOwner = OWN_HOST;
411 return;
412}
413
414static void
415lpfc_config_pcb_setup(struct lpfc_hba * phba)
416{
417 struct lpfc_sli *psli = &phba->sli;
418 struct lpfc_sli_ring *pring;
419 PCB_t *pcbp = &phba->slim2p->pcb;
420 dma_addr_t pdma_addr;
421 uint32_t offset;
422 uint32_t iocbCnt;
423 int i;
424
425 psli->MBhostaddr = (uint32_t *)&phba->slim2p->mbx;
426 pcbp->maxRing = (psli->num_rings - 1);
427
428 iocbCnt = 0;
429 for (i = 0; i < psli->num_rings; i++) {
430 pring = &psli->ring[i];
431 /* A ring MUST have both cmd and rsp entries defined to be
432 valid */
433 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
434 pcbp->rdsc[i].cmdEntries = 0;
435 pcbp->rdsc[i].rspEntries = 0;
436 pcbp->rdsc[i].cmdAddrHigh = 0;
437 pcbp->rdsc[i].rspAddrHigh = 0;
438 pcbp->rdsc[i].cmdAddrLow = 0;
439 pcbp->rdsc[i].rspAddrLow = 0;
440 pring->cmdringaddr = NULL;
441 pring->rspringaddr = NULL;
442 continue;
443 }
444 /* Command ring setup for ring */
445 pring->cmdringaddr =
446 (void *)&phba->slim2p->IOCBs[iocbCnt];
447 pcbp->rdsc[i].cmdEntries = pring->numCiocb;
448
449 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
450 (uint8_t *)phba->slim2p;
451 pdma_addr = phba->slim2p_mapping + offset;
452 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
453 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
454 iocbCnt += pring->numCiocb;
455
456 /* Response ring setup for ring */
457 pring->rspringaddr =
458 (void *)&phba->slim2p->IOCBs[iocbCnt];
459
460 pcbp->rdsc[i].rspEntries = pring->numRiocb;
461 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
462 (uint8_t *)phba->slim2p;
463 pdma_addr = phba->slim2p_mapping + offset;
464 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
465 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
466 iocbCnt += pring->numRiocb;
467 }
468}
469
470void
471lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
472{
473 MAILBOX_t *mb;
474
475 mb = &pmb->mb;
476 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
477 mb->un.varRdRev.cv = 1;
478 mb->mbxCommand = MBX_READ_REV;
479 mb->mbxOwner = OWN_HOST;
480 return;
481}
482
483void
484lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
485{
486 int i;
487 MAILBOX_t *mb = &pmb->mb;
488 struct lpfc_sli *psli;
489 struct lpfc_sli_ring *pring;
490
491 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
492
493 mb->un.varCfgRing.ring = ring;
494 mb->un.varCfgRing.maxOrigXchg = 0;
495 mb->un.varCfgRing.maxRespXchg = 0;
496 mb->un.varCfgRing.recvNotify = 1;
497
498 psli = &phba->sli;
499 pring = &psli->ring[ring];
500 mb->un.varCfgRing.numMask = pring->num_mask;
501 mb->mbxCommand = MBX_CONFIG_RING;
502 mb->mbxOwner = OWN_HOST;
503
504 /* Is this ring configured for a specific profile */
505 if (pring->prt[0].profile) {
506 mb->un.varCfgRing.profile = pring->prt[0].profile;
507 return;
508 }
509
510 /* Otherwise we setup specific rctl / type masks for this ring */
511 for (i = 0; i < pring->num_mask; i++) {
512 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
513 if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ)
514 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
515 else
516 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
517 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
518 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
519 }
520
521 return;
522}
523
524void
525lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
526{
527 MAILBOX_t *mb = &pmb->mb;
528 dma_addr_t pdma_addr;
529 uint32_t bar_low, bar_high;
530 size_t offset;
531 HGP hgp;
532 void __iomem *to_slim;
533
534 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
535 mb->mbxCommand = MBX_CONFIG_PORT;
536 mb->mbxOwner = OWN_HOST;
537
538 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
539
540 offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p;
541 pdma_addr = phba->slim2p_mapping + offset;
542 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
543 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
544
545 /* Now setup pcb */
546 phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
547 phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
548
549 /* Setup Mailbox pointers */
550 phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t);
551 offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
552 pdma_addr = phba->slim2p_mapping + offset;
553 phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
554 phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
555
556 /*
557 * Setup Host Group ring pointer.
558 *
559 * For efficiency reasons, the ring get/put pointers can be
560 * placed in adapter memory (SLIM) rather than in host memory.
561 * This allows firmware to avoid PCI reads/writes when updating
562 * and checking pointers.
563 *
564 * The firmware recognizes the use of SLIM memory by comparing
565 * the address of the get/put pointers structure with that of
566 * the SLIM BAR (BAR0).
567 *
568 * Caution: be sure to use the PCI config space value of BAR0/BAR1
569 * (the hardware's view of the base address), not the OS's
570 * value of pci_resource_start() as the OS value may be a cookie
571 * for ioremap/iomap.
572 */
573
574
575 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
576 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
577
578
579 /* mask off BAR0's flag bits 0 - 3 */
580 phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
581 (SLIMOFF*sizeof(uint32_t));
582 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
583 phba->slim2p->pcb.hgpAddrHigh = bar_high;
584 else
585 phba->slim2p->pcb.hgpAddrHigh = 0;
586 /* write HGP data to SLIM at the required longword offset */
587 memset(&hgp, 0, sizeof(HGP));
588 to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
589 lpfc_memcpy_to_slim(to_slim, &hgp, sizeof (HGP));
590
591 /* Setup Port Group ring pointer */
592 offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
593 (uint8_t *)phba->slim2p;
594 pdma_addr = phba->slim2p_mapping + offset;
595 phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
596 phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
597
598 /* Use callback routine to setp rings in the pcb */
599 lpfc_config_pcb_setup(phba);
600
601 /* special handling for LC HBAs */
602 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
603 uint32_t hbainit[5];
604
605 lpfc_hba_init(phba, hbainit);
606
607 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
608 }
609
610 /* Swap PCB if needed */
611 lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
612 sizeof (PCB_t));
613
614 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
615 "%d:0405 Service Level Interface (SLI) 2 selected\n",
616 phba->brd_no);
617}
618
619void
620lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
621{
622 struct lpfc_sli *psli;
623
624 psli = &phba->sli;
625
626 list_add_tail(&mbq->list, &psli->mboxq);
627
628 psli->mboxq_cnt++;
629
630 return;
631}
632
633LPFC_MBOXQ_t *
634lpfc_mbox_get(struct lpfc_hba * phba)
635{
636 LPFC_MBOXQ_t *mbq = NULL;
637 struct lpfc_sli *psli = &phba->sli;
638
639 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t,
640 list);
641 if (mbq) {
642 psli->mboxq_cnt--;
643 }
644
645 return mbq;
646}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
new file mode 100644
index 000000000000..4397e1160712
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -0,0 +1,179 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_mem.c 1.79 2005/04/13 14:25:50EDT sf_support Exp $
23 */
24
25#include <linux/mempool.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include "lpfc_hw.h"
30#include "lpfc_sli.h"
31#include "lpfc_disc.h"
32#include "lpfc_scsi.h"
33#include "lpfc.h"
34#include "lpfc_crtn.h"
35
36#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
37#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
38
39static void *
40lpfc_pool_kmalloc(unsigned int gfp_flags, void *data)
41{
42 return kmalloc((unsigned long)data, gfp_flags);
43}
44
45static void
46lpfc_pool_kfree(void *obj, void *data)
47{
48 kfree(obj);
49}
50
51int
52lpfc_mem_alloc(struct lpfc_hba * phba)
53{
54 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
55 int i;
56
57 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
58 phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0);
59 if (!phba->lpfc_scsi_dma_buf_pool)
60 goto fail;
61
62 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
63 LPFC_BPL_SIZE, 8,0);
64 if (!phba->lpfc_mbuf_pool)
65 goto fail_free_dma_buf_pool;
66
67 pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
68 LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
69 pool->max_count = 0;
70 pool->current_count = 0;
71 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
72 pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
73 GFP_KERNEL, &pool->elements[i].phys);
74 if (!pool->elements[i].virt)
75 goto fail_free_mbuf_pool;
76 pool->max_count++;
77 pool->current_count++;
78 }
79
80 phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
81 lpfc_pool_kmalloc, lpfc_pool_kfree,
82 (void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
83 if (!phba->mbox_mem_pool)
84 goto fail_free_mbuf_pool;
85
86 phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
87 lpfc_pool_kmalloc, lpfc_pool_kfree,
88 (void *)(unsigned long)sizeof(struct lpfc_nodelist));
89 if (!phba->nlp_mem_pool)
90 goto fail_free_mbox_pool;
91
92 return 0;
93
94 fail_free_mbox_pool:
95 mempool_destroy(phba->mbox_mem_pool);
96 fail_free_mbuf_pool:
97 while (--i)
98 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
99 pool->elements[i].phys);
100 kfree(pool->elements);
101 pci_pool_destroy(phba->lpfc_mbuf_pool);
102 fail_free_dma_buf_pool:
103 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
104 fail:
105 return -ENOMEM;
106}
107
108void
109lpfc_mem_free(struct lpfc_hba * phba)
110{
111 struct lpfc_sli *psli = &phba->sli;
112 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
113 LPFC_MBOXQ_t *mbox, *next_mbox;
114 struct lpfc_dmabuf *mp;
115 int i;
116
117 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
118 mp = (struct lpfc_dmabuf *) (mbox->context1);
119 if (mp) {
120 lpfc_mbuf_free(phba, mp->virt, mp->phys);
121 kfree(mp);
122 }
123 list_del(&mbox->list);
124 mempool_free(mbox, phba->mbox_mem_pool);
125 }
126
127 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
128 if (psli->mbox_active) {
129 mbox = psli->mbox_active;
130 mp = (struct lpfc_dmabuf *) (mbox->context1);
131 if (mp) {
132 lpfc_mbuf_free(phba, mp->virt, mp->phys);
133 kfree(mp);
134 }
135 mempool_free(mbox, phba->mbox_mem_pool);
136 psli->mbox_active = NULL;
137 }
138
139 for (i = 0; i < pool->current_count; i++)
140 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
141 pool->elements[i].phys);
142 kfree(pool->elements);
143 mempool_destroy(phba->nlp_mem_pool);
144 mempool_destroy(phba->mbox_mem_pool);
145
146 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
147 pci_pool_destroy(phba->lpfc_mbuf_pool);
148}
149
150void *
151lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
152{
153 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
154 void *ret;
155
156 ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
157
158 if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) {
159 pool->current_count--;
160 ret = pool->elements[pool->current_count].virt;
161 *handle = pool->elements[pool->current_count].phys;
162 }
163 return ret;
164}
165
166void
167lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
168{
169 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
170
171 if (pool->current_count < pool->max_count) {
172 pool->elements[pool->current_count].virt = virt;
173 pool->elements[pool->current_count].phys = dma;
174 pool->current_count++;
175 } else {
176 pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
177 }
178 return;
179}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
new file mode 100644
index 000000000000..e7470a4738c5
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -0,0 +1,1842 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_nportdisc.c 1.179 2005/04/13 11:59:13EDT sf_support Exp $
23 */
24
25#include <linux/blkdev.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h>
32
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_disc.h"
36#include "lpfc_scsi.h"
37#include "lpfc.h"
38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h"
40
41
42/* Called to verify a rcv'ed ADISC was intended for us. */
43static int
44lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
45 struct lpfc_name * nn, struct lpfc_name * pn)
46{
47 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
48 * table entry for that node.
49 */
50 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
51 return (0);
52
53 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
54 return (0);
55
56 /* we match, return success */
57 return (1);
58}
59
60
61int
62lpfc_check_sparm(struct lpfc_hba * phba,
63 struct lpfc_nodelist * ndlp, struct serv_parm * sp,
64 uint32_t class)
65{
66 volatile struct serv_parm *hsp = &phba->fc_sparam;
67 /* First check for supported version */
68
69 /* Next check for class validity */
70 if (sp->cls1.classValid) {
71
72 if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
73 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
74 if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
75 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
76 } else if (class == CLASS1) {
77 return (0);
78 }
79
80 if (sp->cls2.classValid) {
81
82 if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
83 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
84 if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
85 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
86 } else if (class == CLASS2) {
87 return (0);
88 }
89
90 if (sp->cls3.classValid) {
91
92 if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
93 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
94 if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
95 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
96 } else if (class == CLASS3) {
97 return (0);
98 }
99
100 if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
101 sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
102 if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
103 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
104
105 /* If check is good, copy wwpn wwnn into ndlp */
106 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
107 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
108 return (1);
109}
110
111static void *
112lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
113 struct lpfc_iocbq *cmdiocb,
114 struct lpfc_iocbq *rspiocb)
115{
116 struct lpfc_dmabuf *pcmd, *prsp;
117 uint32_t *lp;
118 void *ptr = NULL;
119 IOCB_t *irsp;
120
121 irsp = &rspiocb->iocb;
122 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
123
124 /* For lpfc_els_abort, context2 could be zero'ed to delay
125 * freeing associated memory till after ABTS completes.
126 */
127 if (pcmd) {
128 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
129 list);
130 if (prsp) {
131 lp = (uint32_t *) prsp->virt;
132 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
133 }
134 }
135 else {
136 /* Force ulpStatus error since we are returning NULL ptr */
137 if (!(irsp->ulpStatus)) {
138 irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
139 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
140 }
141 ptr = NULL;
142 }
143 return (ptr);
144}
145
146
147/*
148 * Free resources / clean up outstanding I/Os
149 * associated with a LPFC_NODELIST entry. This
150 * routine effectively results in a "software abort".
151 */
152int
153lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
154 int send_abts)
155{
156 struct lpfc_sli *psli;
157 struct lpfc_sli_ring *pring;
158 struct lpfc_iocbq *iocb, *next_iocb;
159 IOCB_t *icmd;
160 int found = 0;
161
162 /* Abort outstanding I/O on NPort <nlp_DID> */
163 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
164 "%d:0201 Abort outstanding I/O on NPort x%x "
165 "Data: x%x x%x x%x\n",
166 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
167 ndlp->nlp_state, ndlp->nlp_rpi);
168
169 psli = &phba->sli;
170 pring = &psli->ring[LPFC_ELS_RING];
171
172 /* First check the txq */
173 do {
174 found = 0;
175 spin_lock_irq(phba->host->host_lock);
176 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
177 /* Check to see if iocb matches the nport we are looking
178 for */
179 if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
180 found = 1;
181 /* It matches, so deque and call compl with an
182 error */
183 list_del(&iocb->list);
184 pring->txq_cnt--;
185 if (iocb->iocb_cmpl) {
186 icmd = &iocb->iocb;
187 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
188 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
189 spin_unlock_irq(phba->host->host_lock);
190 (iocb->iocb_cmpl) (phba, iocb, iocb);
191 spin_lock_irq(phba->host->host_lock);
192 } else {
193 list_add_tail(&iocb->list,
194 &phba->lpfc_iocb_list);
195 }
196 break;
197 }
198 }
199 spin_unlock_irq(phba->host->host_lock);
200 } while (found);
201
202 /* Everything on txcmplq will be returned by firmware
203 * with a no rpi / linkdown / abort error. For ring 0,
204 * ELS discovery, we want to get rid of it right here.
205 */
206 /* Next check the txcmplq */
207 do {
208 found = 0;
209 spin_lock_irq(phba->host->host_lock);
210 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
211 list) {
212 /* Check to see if iocb matches the nport we are looking
213 for */
214 if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
215 found = 1;
216 /* It matches, so deque and call compl with an
217 error */
218 list_del(&iocb->list);
219 pring->txcmplq_cnt--;
220
221 icmd = &iocb->iocb;
222 /* If the driver is completing an ELS
223 * command early, flush it out of the firmware.
224 */
225 if (send_abts &&
226 (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
227 (icmd->un.elsreq64.bdl.ulpIoTag32)) {
228 lpfc_sli_issue_abort_iotag32(phba,
229 pring, iocb);
230 }
231 if (iocb->iocb_cmpl) {
232 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
233 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
234 spin_unlock_irq(phba->host->host_lock);
235 (iocb->iocb_cmpl) (phba, iocb, iocb);
236 spin_lock_irq(phba->host->host_lock);
237 } else {
238 list_add_tail(&iocb->list,
239 &phba->lpfc_iocb_list);
240 }
241 break;
242 }
243 }
244 spin_unlock_irq(phba->host->host_lock);
245 } while(found);
246
247 /* If we are delaying issuing an ELS command, cancel it */
248 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
249 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
250 del_timer_sync(&ndlp->nlp_delayfunc);
251 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
252 list_del_init(&ndlp->els_retry_evt.evt_listp);
253 }
254 return (0);
255}
256
257static int
258lpfc_rcv_plogi(struct lpfc_hba * phba,
259 struct lpfc_nodelist * ndlp,
260 struct lpfc_iocbq *cmdiocb)
261{
262 struct lpfc_dmabuf *pcmd;
263 uint32_t *lp;
264 IOCB_t *icmd;
265 struct serv_parm *sp;
266 LPFC_MBOXQ_t *mbox;
267 struct ls_rjt stat;
268 int rc;
269
270 memset(&stat, 0, sizeof (struct ls_rjt));
271 if (phba->hba_state <= LPFC_FLOGI) {
272 /* Before responding to PLOGI, check for pt2pt mode.
273 * If we are pt2pt, with an outstanding FLOGI, abort
274 * the FLOGI and resend it first.
275 */
276 if (phba->fc_flag & FC_PT2PT) {
277 lpfc_els_abort_flogi(phba);
278 if (!(phba->fc_flag & FC_PT2PT_PLOGI)) {
279 /* If the other side is supposed to initiate
280 * the PLOGI anyway, just ACC it now and
281 * move on with discovery.
282 */
283 phba->fc_edtov = FF_DEF_EDTOV;
284 phba->fc_ratov = FF_DEF_RATOV;
285 /* Start discovery - this should just do
286 CLEAR_LA */
287 lpfc_disc_start(phba);
288 }
289 else {
290 lpfc_initial_flogi(phba);
291 }
292 }
293 else {
294 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
295 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
296 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb,
297 ndlp);
298 return 0;
299 }
300 }
301 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
302 lp = (uint32_t *) pcmd->virt;
303 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
304 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
305 /* Reject this request because invalid parameters */
306 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
307 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
308 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
309 return (0);
310 }
311 icmd = &cmdiocb->iocb;
312
313 /* PLOGI chkparm OK */
314 lpfc_printf_log(phba,
315 KERN_INFO,
316 LOG_ELS,
317 "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
318 phba->brd_no,
319 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
320 ndlp->nlp_rpi);
321
322 if ((phba->cfg_fcp_class == 2) &&
323 (sp->cls2.classValid)) {
324 ndlp->nlp_fcp_info |= CLASS2;
325 } else {
326 ndlp->nlp_fcp_info |= CLASS3;
327 }
328 ndlp->nlp_class_sup = 0;
329 if (sp->cls1.classValid)
330 ndlp->nlp_class_sup |= FC_COS_CLASS1;
331 if (sp->cls2.classValid)
332 ndlp->nlp_class_sup |= FC_COS_CLASS2;
333 if (sp->cls3.classValid)
334 ndlp->nlp_class_sup |= FC_COS_CLASS3;
335 if (sp->cls4.classValid)
336 ndlp->nlp_class_sup |= FC_COS_CLASS4;
337 ndlp->nlp_maxframe =
338 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
339
340 /* no need to reg_login if we are already in one of these states */
341 switch(ndlp->nlp_state) {
342 case NLP_STE_NPR_NODE:
343 if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
344 break;
345 case NLP_STE_REG_LOGIN_ISSUE:
346 case NLP_STE_PRLI_ISSUE:
347 case NLP_STE_UNMAPPED_NODE:
348 case NLP_STE_MAPPED_NODE:
349 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
350 return (1);
351 }
352
353 if ((phba->fc_flag & FC_PT2PT)
354 && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
355 /* rcv'ed PLOGI decides what our NPortId will be */
356 phba->fc_myDID = icmd->un.rcvels.parmRo;
357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
358 if (mbox == NULL)
359 goto out;
360 lpfc_config_link(phba, mbox);
361 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
362 rc = lpfc_sli_issue_mbox
363 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
364 if (rc == MBX_NOT_FINISHED) {
365 mempool_free( mbox, phba->mbox_mem_pool);
366 goto out;
367 }
368
369 lpfc_can_disctmo(phba);
370 }
371 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
372 if (mbox == NULL)
373 goto out;
374
375 if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
376 (uint8_t *) sp, mbox, 0)) {
377 mempool_free( mbox, phba->mbox_mem_pool);
378 goto out;
379 }
380
381 /* ACC PLOGI rsp command needs to execute first,
382 * queue this mbox command to be processed later.
383 */
384 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
385 mbox->context2 = ndlp;
386 ndlp->nlp_flag |= NLP_ACC_REGLOGIN;
387
388 /* If there is an outstanding PLOGI issued, abort it before
389 * sending ACC rsp to PLOGI recieved.
390 */
391 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
392 /* software abort outstanding PLOGI */
393 lpfc_els_abort(phba, ndlp, 1);
394 }
395 ndlp->nlp_flag |= NLP_RCV_PLOGI;
396 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
397 return (1);
398
399out:
400 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
401 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
402 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
403 return (0);
404}
405
406static int
407lpfc_rcv_padisc(struct lpfc_hba * phba,
408 struct lpfc_nodelist * ndlp,
409 struct lpfc_iocbq *cmdiocb)
410{
411 struct lpfc_dmabuf *pcmd;
412 struct serv_parm *sp;
413 struct lpfc_name *pnn, *ppn;
414 struct ls_rjt stat;
415 ADISC *ap;
416 IOCB_t *icmd;
417 uint32_t *lp;
418 uint32_t cmd;
419
420 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
421 lp = (uint32_t *) pcmd->virt;
422
423 cmd = *lp++;
424 if (cmd == ELS_CMD_ADISC) {
425 ap = (ADISC *) lp;
426 pnn = (struct lpfc_name *) & ap->nodeName;
427 ppn = (struct lpfc_name *) & ap->portName;
428 } else {
429 sp = (struct serv_parm *) lp;
430 pnn = (struct lpfc_name *) & sp->nodeName;
431 ppn = (struct lpfc_name *) & sp->portName;
432 }
433
434 icmd = &cmdiocb->iocb;
435 if ((icmd->ulpStatus == 0) &&
436 (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
437 if (cmd == ELS_CMD_ADISC) {
438 lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
439 }
440 else {
441 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
442 NULL, 0);
443 }
444 return (1);
445 }
446 /* Reject this request because invalid parameters */
447 stat.un.b.lsRjtRsvd0 = 0;
448 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
449 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
450 stat.un.b.vendorUnique = 0;
451 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
452
453 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
454 /* 1 sec timeout */
455 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
456
457 spin_lock_irq(phba->host->host_lock);
458 ndlp->nlp_flag |= NLP_DELAY_TMO;
459 spin_unlock_irq(phba->host->host_lock);
460 ndlp->nlp_state = NLP_STE_NPR_NODE;
461 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
462 return (0);
463}
464
465static int
466lpfc_rcv_logo(struct lpfc_hba * phba,
467 struct lpfc_nodelist * ndlp,
468 struct lpfc_iocbq *cmdiocb)
469{
470 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
471 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
472 * PLOGIs during LOGO storms from a device.
473 */
474 ndlp->nlp_flag |= NLP_LOGO_ACC;
475 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
476
477 if (!(ndlp->nlp_type & NLP_FABRIC)) {
478 /* Only try to re-login if this is NOT a Fabric Node */
479 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
480 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
481 spin_lock_irq(phba->host->host_lock);
482 ndlp->nlp_flag |= NLP_DELAY_TMO;
483 spin_unlock_irq(phba->host->host_lock);
484 }
485
486 ndlp->nlp_state = NLP_STE_NPR_NODE;
487 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
488
489 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
490 /* The driver has to wait until the ACC completes before it continues
491 * processing the LOGO. The action will resume in
492 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
493 * unreg_login, the driver waits so the ACC does not get aborted.
494 */
495 return (0);
496}
497
498static void
499lpfc_rcv_prli(struct lpfc_hba * phba,
500 struct lpfc_nodelist * ndlp,
501 struct lpfc_iocbq *cmdiocb)
502{
503 struct lpfc_dmabuf *pcmd;
504 uint32_t *lp;
505 PRLI *npr;
506 struct fc_rport *rport = ndlp->rport;
507 u32 roles;
508
509 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
510 lp = (uint32_t *) pcmd->virt;
511 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
512
513 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
514 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
515 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
516 (npr->prliType == PRLI_FCP_TYPE)) {
517 if (npr->initiatorFunc)
518 ndlp->nlp_type |= NLP_FCP_INITIATOR;
519 if (npr->targetFunc)
520 ndlp->nlp_type |= NLP_FCP_TARGET;
521 if (npr->Retry)
522 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
523 }
524 if (rport) {
525 /* We need to update the rport role values */
526 roles = FC_RPORT_ROLE_UNKNOWN;
527 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
528 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
529 if (ndlp->nlp_type & NLP_FCP_TARGET)
530 roles |= FC_RPORT_ROLE_FCP_TARGET;
531 fc_remote_port_rolechg(rport, roles);
532 }
533}
534
535static uint32_t
536lpfc_disc_set_adisc(struct lpfc_hba * phba,
537 struct lpfc_nodelist * ndlp)
538{
539 /* Check config parameter use-adisc or FCP-2 */
540 if ((phba->cfg_use_adisc == 0) &&
541 !(phba->fc_flag & FC_RSCN_MODE)) {
542 if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
543 return (0);
544 }
545 spin_lock_irq(phba->host->host_lock);
546 ndlp->nlp_flag |= NLP_NPR_ADISC;
547 spin_unlock_irq(phba->host->host_lock);
548 return (1);
549}
550
551static uint32_t
552lpfc_disc_noop(struct lpfc_hba * phba,
553 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
554{
555 /* This routine does nothing, just return the current state */
556 return (ndlp->nlp_state);
557}
558
559static uint32_t
560lpfc_disc_illegal(struct lpfc_hba * phba,
561 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
562{
563 lpfc_printf_log(phba,
564 KERN_ERR,
565 LOG_DISCOVERY,
566 "%d:0253 Illegal State Transition: node x%x event x%x, "
567 "state x%x Data: x%x x%x\n",
568 phba->brd_no,
569 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
570 ndlp->nlp_flag);
571 return (ndlp->nlp_state);
572}
573
574/* Start of Discovery State Machine routines */
575
576static uint32_t
577lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
578 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
579{
580 struct lpfc_iocbq *cmdiocb;
581
582 cmdiocb = (struct lpfc_iocbq *) arg;
583
584 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
585 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
586 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
587 return (ndlp->nlp_state);
588 }
589 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
590 return (NLP_STE_FREED_NODE);
591}
592
593static uint32_t
594lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
595 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
596{
597 lpfc_issue_els_logo(phba, ndlp, 0);
598 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
599 return (ndlp->nlp_state);
600}
601
602static uint32_t
603lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
604 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
605{
606 struct lpfc_iocbq *cmdiocb;
607
608 cmdiocb = (struct lpfc_iocbq *) arg;
609
610 spin_lock_irq(phba->host->host_lock);
611 ndlp->nlp_flag |= NLP_LOGO_ACC;
612 spin_unlock_irq(phba->host->host_lock);
613 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
614 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
615
616 return (ndlp->nlp_state);
617}
618
619static uint32_t
620lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
621 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
622{
623 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
624 return (NLP_STE_FREED_NODE);
625}
626
627static uint32_t
628lpfc_device_rm_unused_node(struct lpfc_hba * phba,
629 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
630{
631 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
632 return (NLP_STE_FREED_NODE);
633}
634
635static uint32_t
636lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
637 void *arg, uint32_t evt)
638{
639 struct lpfc_iocbq *cmdiocb = arg;
640 struct lpfc_dmabuf *pcmd;
641 struct serv_parm *sp;
642 uint32_t *lp;
643 struct ls_rjt stat;
644 int port_cmp;
645
646 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
647 lp = (uint32_t *) pcmd->virt;
648 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
649
650 memset(&stat, 0, sizeof (struct ls_rjt));
651
652 /* For a PLOGI, we only accept if our portname is less
653 * than the remote portname.
654 */
655 phba->fc_stat.elsLogiCol++;
656 port_cmp = memcmp(&phba->fc_portname, &sp->portName,
657 sizeof (struct lpfc_name));
658
659 if (port_cmp >= 0) {
660 /* Reject this request because the remote node will accept
661 ours */
662 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
663 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
664 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
665 }
666 else {
667 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
668 } /* if our portname was less */
669
670 return (ndlp->nlp_state);
671}
672
673static uint32_t
674lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
675 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
676{
677 struct lpfc_iocbq *cmdiocb;
678
679 cmdiocb = (struct lpfc_iocbq *) arg;
680
681 /* software abort outstanding PLOGI */
682 lpfc_els_abort(phba, ndlp, 1);
683 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
684 spin_lock_irq(phba->host->host_lock);
685 ndlp->nlp_flag |= NLP_DELAY_TMO;
686 spin_unlock_irq(phba->host->host_lock);
687
688 if (evt == NLP_EVT_RCV_LOGO) {
689 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
690 }
691 else {
692 lpfc_issue_els_logo(phba, ndlp, 0);
693 }
694
695 /* Put ndlp in npr list set plogi timer for 1 sec */
696 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
697 ndlp->nlp_state = NLP_STE_NPR_NODE;
698 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
699
700 return (ndlp->nlp_state);
701}
702
703static uint32_t
704lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
705 struct lpfc_nodelist * ndlp, void *arg,
706 uint32_t evt)
707{
708 struct lpfc_iocbq *cmdiocb, *rspiocb;
709 struct lpfc_dmabuf *pcmd, *prsp;
710 uint32_t *lp;
711 IOCB_t *irsp;
712 struct serv_parm *sp;
713 LPFC_MBOXQ_t *mbox;
714
715 cmdiocb = (struct lpfc_iocbq *) arg;
716 rspiocb = cmdiocb->context_un.rsp_iocb;
717
718 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
719 return (ndlp->nlp_state);
720 }
721
722 irsp = &rspiocb->iocb;
723
724 if (irsp->ulpStatus)
725 goto out;
726
727 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
728
729 prsp = list_get_first(&pcmd->list,
730 struct lpfc_dmabuf,
731 list);
732 lp = (uint32_t *) prsp->virt;
733
734 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
735 if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3))
736 goto out;
737
738 /* PLOGI chkparm OK */
739 lpfc_printf_log(phba,
740 KERN_INFO,
741 LOG_ELS,
742 "%d:0121 PLOGI chkparm OK "
743 "Data: x%x x%x x%x x%x\n",
744 phba->brd_no,
745 ndlp->nlp_DID, ndlp->nlp_state,
746 ndlp->nlp_flag, ndlp->nlp_rpi);
747
748 if ((phba->cfg_fcp_class == 2) &&
749 (sp->cls2.classValid)) {
750 ndlp->nlp_fcp_info |= CLASS2;
751 } else {
752 ndlp->nlp_fcp_info |= CLASS3;
753 }
754 ndlp->nlp_class_sup = 0;
755 if (sp->cls1.classValid)
756 ndlp->nlp_class_sup |= FC_COS_CLASS1;
757 if (sp->cls2.classValid)
758 ndlp->nlp_class_sup |= FC_COS_CLASS2;
759 if (sp->cls3.classValid)
760 ndlp->nlp_class_sup |= FC_COS_CLASS3;
761 if (sp->cls4.classValid)
762 ndlp->nlp_class_sup |= FC_COS_CLASS4;
763 ndlp->nlp_maxframe =
764 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
765 sp->cmn.bbRcvSizeLsb;
766
767 if (!(mbox = mempool_alloc(phba->mbox_mem_pool,
768 GFP_KERNEL)))
769 goto out;
770
771 lpfc_unreg_rpi(phba, ndlp);
772 if (lpfc_reg_login
773 (phba, irsp->un.elsreq64.remoteID,
774 (uint8_t *) sp, mbox, 0) == 0) {
775 /* set_slim mailbox command needs to
776 * execute first, queue this command to
777 * be processed later.
778 */
779 switch(ndlp->nlp_DID) {
780 case NameServer_DID:
781 mbox->mbox_cmpl =
782 lpfc_mbx_cmpl_ns_reg_login;
783 break;
784 case FDMI_DID:
785 mbox->mbox_cmpl =
786 lpfc_mbx_cmpl_fdmi_reg_login;
787 break;
788 default:
789 mbox->mbox_cmpl =
790 lpfc_mbx_cmpl_reg_login;
791 }
792 mbox->context2 = ndlp;
793 if (lpfc_sli_issue_mbox(phba, mbox,
794 (MBX_NOWAIT | MBX_STOP_IOCB))
795 != MBX_NOT_FINISHED) {
796 ndlp->nlp_state =
797 NLP_STE_REG_LOGIN_ISSUE;
798 lpfc_nlp_list(phba, ndlp,
799 NLP_REGLOGIN_LIST);
800 return (ndlp->nlp_state);
801 }
802 mempool_free(mbox, phba->mbox_mem_pool);
803 } else {
804 mempool_free(mbox, phba->mbox_mem_pool);
805 }
806
807
808 out:
809 /* Free this node since the driver cannot login or has the wrong
810 sparm */
811 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
812 return (NLP_STE_FREED_NODE);
813}
814
815static uint32_t
816lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
817 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
818{
819 /* software abort outstanding PLOGI */
820 lpfc_els_abort(phba, ndlp, 1);
821
822 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
823 return (NLP_STE_FREED_NODE);
824}
825
826static uint32_t
827lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
828 struct lpfc_nodelist * ndlp, void *arg,
829 uint32_t evt)
830{
831 /* software abort outstanding PLOGI */
832 lpfc_els_abort(phba, ndlp, 1);
833
834 ndlp->nlp_state = NLP_STE_NPR_NODE;
835 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
836 spin_lock_irq(phba->host->host_lock);
837 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
838 spin_unlock_irq(phba->host->host_lock);
839
840 return (ndlp->nlp_state);
841}
842
843static uint32_t
844lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
845 struct lpfc_nodelist * ndlp, void *arg,
846 uint32_t evt)
847{
848 struct lpfc_iocbq *cmdiocb;
849
850 /* software abort outstanding ADISC */
851 lpfc_els_abort(phba, ndlp, 1);
852
853 cmdiocb = (struct lpfc_iocbq *) arg;
854
855 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
856 return (ndlp->nlp_state);
857 }
858 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
859 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
860 lpfc_issue_els_plogi(phba, ndlp, 0);
861
862 return (ndlp->nlp_state);
863}
864
865static uint32_t
866lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
867 struct lpfc_nodelist * ndlp, void *arg,
868 uint32_t evt)
869{
870 struct lpfc_iocbq *cmdiocb;
871
872 cmdiocb = (struct lpfc_iocbq *) arg;
873
874 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
875 return (ndlp->nlp_state);
876}
877
878static uint32_t
879lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
880 struct lpfc_nodelist * ndlp, void *arg,
881 uint32_t evt)
882{
883 struct lpfc_iocbq *cmdiocb;
884
885 cmdiocb = (struct lpfc_iocbq *) arg;
886
887 /* software abort outstanding ADISC */
888 lpfc_els_abort(phba, ndlp, 0);
889
890 lpfc_rcv_logo(phba, ndlp, cmdiocb);
891 return (ndlp->nlp_state);
892}
893
894static uint32_t
895lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
896 struct lpfc_nodelist * ndlp, void *arg,
897 uint32_t evt)
898{
899 struct lpfc_iocbq *cmdiocb;
900
901 cmdiocb = (struct lpfc_iocbq *) arg;
902
903 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
904 return (ndlp->nlp_state);
905}
906
907static uint32_t
908lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
909 struct lpfc_nodelist * ndlp, void *arg,
910 uint32_t evt)
911{
912 struct lpfc_iocbq *cmdiocb;
913
914 cmdiocb = (struct lpfc_iocbq *) arg;
915
916 /* Treat like rcv logo */
917 lpfc_rcv_logo(phba, ndlp, cmdiocb);
918 return (ndlp->nlp_state);
919}
920
921static uint32_t
922lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
923 struct lpfc_nodelist * ndlp, void *arg,
924 uint32_t evt)
925{
926 struct lpfc_iocbq *cmdiocb, *rspiocb;
927 IOCB_t *irsp;
928 ADISC *ap;
929
930 cmdiocb = (struct lpfc_iocbq *) arg;
931 rspiocb = cmdiocb->context_un.rsp_iocb;
932
933 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
934 irsp = &rspiocb->iocb;
935
936 if ((irsp->ulpStatus) ||
937 (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
938 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
939 /* 1 sec timeout */
940 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
941 spin_lock_irq(phba->host->host_lock);
942 ndlp->nlp_flag |= NLP_DELAY_TMO;
943 spin_unlock_irq(phba->host->host_lock);
944
945 memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
946 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
947
948 ndlp->nlp_state = NLP_STE_NPR_NODE;
949 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
950 lpfc_unreg_rpi(phba, ndlp);
951 return (ndlp->nlp_state);
952 }
953 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
954 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
955 return (ndlp->nlp_state);
956}
957
958static uint32_t
959lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
960 struct lpfc_nodelist * ndlp, void *arg,
961 uint32_t evt)
962{
963 /* software abort outstanding ADISC */
964 lpfc_els_abort(phba, ndlp, 1);
965
966 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
967 return (NLP_STE_FREED_NODE);
968}
969
970static uint32_t
971lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
972 struct lpfc_nodelist * ndlp, void *arg,
973 uint32_t evt)
974{
975 /* software abort outstanding ADISC */
976 lpfc_els_abort(phba, ndlp, 1);
977
978 ndlp->nlp_state = NLP_STE_NPR_NODE;
979 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
980 spin_lock_irq(phba->host->host_lock);
981 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
982 spin_unlock_irq(phba->host->host_lock);
983
984 lpfc_disc_set_adisc(phba, ndlp);
985 return (ndlp->nlp_state);
986}
987
988static uint32_t
989lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
990 struct lpfc_nodelist * ndlp, void *arg,
991 uint32_t evt)
992{
993 struct lpfc_iocbq *cmdiocb;
994
995 cmdiocb = (struct lpfc_iocbq *) arg;
996
997 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
998 return (ndlp->nlp_state);
999}
1000
1001static uint32_t
1002lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
1003 struct lpfc_nodelist * ndlp, void *arg,
1004 uint32_t evt)
1005{
1006 struct lpfc_iocbq *cmdiocb;
1007
1008 cmdiocb = (struct lpfc_iocbq *) arg;
1009
1010 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1011 return (ndlp->nlp_state);
1012}
1013
1014static uint32_t
1015lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1016 struct lpfc_nodelist * ndlp, void *arg,
1017 uint32_t evt)
1018{
1019 struct lpfc_iocbq *cmdiocb;
1020
1021 cmdiocb = (struct lpfc_iocbq *) arg;
1022
1023 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1024 return (ndlp->nlp_state);
1025}
1026
1027static uint32_t
1028lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
1029 struct lpfc_nodelist * ndlp, void *arg,
1030 uint32_t evt)
1031{
1032 struct lpfc_iocbq *cmdiocb;
1033
1034 cmdiocb = (struct lpfc_iocbq *) arg;
1035
1036 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1037 return (ndlp->nlp_state);
1038}
1039
1040static uint32_t
1041lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
1042 struct lpfc_nodelist * ndlp, void *arg,
1043 uint32_t evt)
1044{
1045 struct lpfc_iocbq *cmdiocb;
1046
1047 cmdiocb = (struct lpfc_iocbq *) arg;
1048 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1049 return (ndlp->nlp_state);
1050}
1051
1052static uint32_t
1053lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1054 struct lpfc_nodelist * ndlp,
1055 void *arg, uint32_t evt)
1056{
1057 LPFC_MBOXQ_t *pmb;
1058 MAILBOX_t *mb;
1059 uint32_t did;
1060
1061 pmb = (LPFC_MBOXQ_t *) arg;
1062 mb = &pmb->mb;
1063 did = mb->un.varWords[1];
1064 if (mb->mbxStatus) {
1065 /* RegLogin failed */
1066 lpfc_printf_log(phba,
1067 KERN_ERR,
1068 LOG_DISCOVERY,
1069 "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
1070 phba->brd_no,
1071 did, mb->mbxStatus, phba->hba_state);
1072
1073 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1074 spin_lock_irq(phba->host->host_lock);
1075 ndlp->nlp_flag |= NLP_DELAY_TMO;
1076 spin_unlock_irq(phba->host->host_lock);
1077
1078 lpfc_issue_els_logo(phba, ndlp, 0);
1079 /* Put ndlp in npr list set plogi timer for 1 sec */
1080 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
1081 ndlp->nlp_state = NLP_STE_NPR_NODE;
1082 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1083 return (ndlp->nlp_state);
1084 }
1085
1086 if (ndlp->nlp_rpi != 0)
1087 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1088
1089 ndlp->nlp_rpi = mb->un.varWords[0];
1090 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1091
1092 /* Only if we are not a fabric nport do we issue PRLI */
1093 if (!(ndlp->nlp_type & NLP_FABRIC)) {
1094 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1095 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1096 lpfc_issue_els_prli(phba, ndlp, 0);
1097 } else {
1098 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1099 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1100 }
1101 return (ndlp->nlp_state);
1102}
1103
1104static uint32_t
1105lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1106 struct lpfc_nodelist * ndlp, void *arg,
1107 uint32_t evt)
1108{
1109 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1110 return (NLP_STE_FREED_NODE);
1111}
1112
1113static uint32_t
1114lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1115 struct lpfc_nodelist * ndlp, void *arg,
1116 uint32_t evt)
1117{
1118 ndlp->nlp_state = NLP_STE_NPR_NODE;
1119 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1120 spin_lock_irq(phba->host->host_lock);
1121 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1122 spin_unlock_irq(phba->host->host_lock);
1123 return (ndlp->nlp_state);
1124}
1125
1126static uint32_t
1127lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
1128 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1129{
1130 struct lpfc_iocbq *cmdiocb;
1131
1132 cmdiocb = (struct lpfc_iocbq *) arg;
1133
1134 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1135 return (ndlp->nlp_state);
1136}
1137
1138static uint32_t
1139lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
1140 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1141{
1142 struct lpfc_iocbq *cmdiocb;
1143
1144 cmdiocb = (struct lpfc_iocbq *) arg;
1145
1146 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1147 return (ndlp->nlp_state);
1148}
1149
1150static uint32_t
1151lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1152 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1153{
1154 struct lpfc_iocbq *cmdiocb;
1155
1156 cmdiocb = (struct lpfc_iocbq *) arg;
1157
1158 /* Software abort outstanding PRLI before sending acc */
1159 lpfc_els_abort(phba, ndlp, 1);
1160
1161 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1162 return (ndlp->nlp_state);
1163}
1164
1165static uint32_t
1166lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
1167 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1168{
1169 struct lpfc_iocbq *cmdiocb;
1170
1171 cmdiocb = (struct lpfc_iocbq *) arg;
1172
1173 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1174 return (ndlp->nlp_state);
1175}
1176
1177/* This routine is envoked when we rcv a PRLO request from a nport
1178 * we are logged into. We should send back a PRLO rsp setting the
1179 * appropriate bits.
1180 * NEXT STATE = PRLI_ISSUE
1181 */
1182static uint32_t
1183lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
1184 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1185{
1186 struct lpfc_iocbq *cmdiocb;
1187
1188 cmdiocb = (struct lpfc_iocbq *) arg;
1189 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1190 return (ndlp->nlp_state);
1191}
1192
1193static uint32_t
1194lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1195 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1196{
1197 struct lpfc_iocbq *cmdiocb, *rspiocb;
1198 IOCB_t *irsp;
1199 PRLI *npr;
1200
1201 cmdiocb = (struct lpfc_iocbq *) arg;
1202 rspiocb = cmdiocb->context_un.rsp_iocb;
1203 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1204
1205 irsp = &rspiocb->iocb;
1206 if (irsp->ulpStatus) {
1207 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1208 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1209 return (ndlp->nlp_state);
1210 }
1211
1212 /* Check out PRLI rsp */
1213 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1214 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1215 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1216 (npr->prliType == PRLI_FCP_TYPE)) {
1217 if (npr->initiatorFunc)
1218 ndlp->nlp_type |= NLP_FCP_INITIATOR;
1219 if (npr->targetFunc)
1220 ndlp->nlp_type |= NLP_FCP_TARGET;
1221 if (npr->Retry)
1222 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1223 }
1224
1225 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
1226 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
1227 return (ndlp->nlp_state);
1228}
1229
1230/*! lpfc_device_rm_prli_issue
1231 *
1232 * \pre
1233 * \post
1234 * \param phba
1235 * \param ndlp
1236 * \param arg
1237 * \param evt
1238 * \return uint32_t
1239 *
1240 * \b Description:
1241 * This routine is envoked when we a request to remove a nport we are in the
1242 * process of PRLIing. We should software abort outstanding prli, unreg
1243 * login, send a logout. We will change node state to UNUSED_NODE, put it
1244 * on plogi list so it can be freed when LOGO completes.
1245 *
1246 */
1247static uint32_t
1248lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1249 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1250{
1251 /* software abort outstanding PRLI */
1252 lpfc_els_abort(phba, ndlp, 1);
1253
1254 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1255 return (NLP_STE_FREED_NODE);
1256}
1257
1258
1259/*! lpfc_device_recov_prli_issue
1260 *
1261 * \pre
1262 * \post
1263 * \param phba
1264 * \param ndlp
1265 * \param arg
1266 * \param evt
1267 * \return uint32_t
1268 *
1269 * \b Description:
1270 * The routine is envoked when the state of a device is unknown, like
1271 * during a link down. We should remove the nodelist entry from the
1272 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
1273 * outstanding PRLI command, then free the node entry.
1274 */
1275static uint32_t
1276lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1277 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1278{
1279 /* software abort outstanding PRLI */
1280 lpfc_els_abort(phba, ndlp, 1);
1281
1282 ndlp->nlp_state = NLP_STE_NPR_NODE;
1283 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1284 spin_lock_irq(phba->host->host_lock);
1285 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1286 spin_unlock_irq(phba->host->host_lock);
1287 return (ndlp->nlp_state);
1288}
1289
1290static uint32_t
1291lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
1292 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1293{
1294 struct lpfc_iocbq *cmdiocb;
1295
1296 cmdiocb = (struct lpfc_iocbq *) arg;
1297
1298 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1299 return (ndlp->nlp_state);
1300}
1301
1302static uint32_t
1303lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
1304 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1305{
1306 struct lpfc_iocbq *cmdiocb;
1307
1308 cmdiocb = (struct lpfc_iocbq *) arg;
1309
1310 lpfc_rcv_prli(phba, ndlp, cmdiocb);
1311 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1312 return (ndlp->nlp_state);
1313}
1314
1315static uint32_t
1316lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
1317 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1318{
1319 struct lpfc_iocbq *cmdiocb;
1320
1321 cmdiocb = (struct lpfc_iocbq *) arg;
1322
1323 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1324 return (ndlp->nlp_state);
1325}
1326
1327static uint32_t
1328lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
1329 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1330{
1331 struct lpfc_iocbq *cmdiocb;
1332
1333 cmdiocb = (struct lpfc_iocbq *) arg;
1334
1335 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1336 return (ndlp->nlp_state);
1337}
1338
1339static uint32_t
1340lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
1341 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1342{
1343 struct lpfc_iocbq *cmdiocb;
1344
1345 cmdiocb = (struct lpfc_iocbq *) arg;
1346
1347 /* Treat like rcv logo */
1348 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1349 return (ndlp->nlp_state);
1350}
1351
1352static uint32_t
1353lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1354 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1355{
1356 ndlp->nlp_state = NLP_STE_NPR_NODE;
1357 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1358 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1359 lpfc_disc_set_adisc(phba, ndlp);
1360
1361 return (ndlp->nlp_state);
1362}
1363
1364static uint32_t
1365lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
1366 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1367{
1368 struct lpfc_iocbq *cmdiocb;
1369
1370 cmdiocb = (struct lpfc_iocbq *) arg;
1371
1372 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1373 return (ndlp->nlp_state);
1374}
1375
1376static uint32_t
1377lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
1378 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1379{
1380 struct lpfc_iocbq *cmdiocb;
1381
1382 cmdiocb = (struct lpfc_iocbq *) arg;
1383
1384 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1385 return (ndlp->nlp_state);
1386}
1387
1388static uint32_t
1389lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
1390 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1391{
1392 struct lpfc_iocbq *cmdiocb;
1393
1394 cmdiocb = (struct lpfc_iocbq *) arg;
1395
1396 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1397 return (ndlp->nlp_state);
1398}
1399
1400static uint32_t
1401lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
1402 struct lpfc_nodelist * ndlp, void *arg,
1403 uint32_t evt)
1404{
1405 struct lpfc_iocbq *cmdiocb;
1406
1407 cmdiocb = (struct lpfc_iocbq *) arg;
1408
1409 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1410 return (ndlp->nlp_state);
1411}
1412
1413static uint32_t
1414lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
1415 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1416{
1417 struct lpfc_iocbq *cmdiocb;
1418
1419 cmdiocb = (struct lpfc_iocbq *) arg;
1420
1421 /* flush the target */
1422 spin_lock_irq(phba->host->host_lock);
1423 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1424 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
1425 spin_unlock_irq(phba->host->host_lock);
1426
1427 /* Treat like rcv logo */
1428 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1429 return (ndlp->nlp_state);
1430}
1431
1432static uint32_t
1433lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1434 struct lpfc_nodelist * ndlp, void *arg,
1435 uint32_t evt)
1436{
1437 ndlp->nlp_state = NLP_STE_NPR_NODE;
1438 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1439 spin_lock_irq(phba->host->host_lock);
1440 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1441 spin_unlock_irq(phba->host->host_lock);
1442 lpfc_disc_set_adisc(phba, ndlp);
1443 return (ndlp->nlp_state);
1444}
1445
1446static uint32_t
1447lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
1448 struct lpfc_nodelist * ndlp, void *arg,
1449 uint32_t evt)
1450{
1451 struct lpfc_iocbq *cmdiocb;
1452
1453 cmdiocb = (struct lpfc_iocbq *) arg;
1454
1455 /* Ignore PLOGI if we have an outstanding LOGO */
1456 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1457 return (ndlp->nlp_state);
1458 }
1459
1460 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
1461 spin_lock_irq(phba->host->host_lock);
1462 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1463 spin_unlock_irq(phba->host->host_lock);
1464 return (ndlp->nlp_state);
1465 }
1466
1467 /* send PLOGI immediately, move to PLOGI issue state */
1468 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1469 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1470 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1471 lpfc_issue_els_plogi(phba, ndlp, 0);
1472 }
1473 return (ndlp->nlp_state);
1474}
1475
1476static uint32_t
1477lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
1478 struct lpfc_nodelist * ndlp, void *arg,
1479 uint32_t evt)
1480{
1481 struct lpfc_iocbq *cmdiocb;
1482 struct ls_rjt stat;
1483
1484 cmdiocb = (struct lpfc_iocbq *) arg;
1485
1486 memset(&stat, 0, sizeof (struct ls_rjt));
1487 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1488 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1489 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
1490
1491 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1492 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1493 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1494 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1495 lpfc_issue_els_adisc(phba, ndlp, 0);
1496 } else {
1497 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1498 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1499 lpfc_issue_els_plogi(phba, ndlp, 0);
1500 }
1501 }
1502 return (ndlp->nlp_state);
1503}
1504
1505static uint32_t
1506lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
1507 struct lpfc_nodelist * ndlp, void *arg,
1508 uint32_t evt)
1509{
1510 struct lpfc_iocbq *cmdiocb;
1511
1512 cmdiocb = (struct lpfc_iocbq *) arg;
1513
1514 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1515 return (ndlp->nlp_state);
1516}
1517
1518static uint32_t
1519lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1520 struct lpfc_nodelist * ndlp, void *arg,
1521 uint32_t evt)
1522{
1523 struct lpfc_iocbq *cmdiocb;
1524
1525 cmdiocb = (struct lpfc_iocbq *) arg;
1526
1527 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1528
1529 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1530 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1531 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1532 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1533 lpfc_issue_els_adisc(phba, ndlp, 0);
1534 } else {
1535 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1536 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1537 lpfc_issue_els_plogi(phba, ndlp, 0);
1538 }
1539 }
1540 return (ndlp->nlp_state);
1541}
1542
1543static uint32_t
1544lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
1545 struct lpfc_nodelist * ndlp, void *arg,
1546 uint32_t evt)
1547{
1548 struct lpfc_iocbq *cmdiocb;
1549
1550 cmdiocb = (struct lpfc_iocbq *) arg;
1551
1552 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1553
1554 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1555 if (ndlp->nlp_last_elscmd == (unsigned long)ELS_CMD_PLOGI) {
1556 return (ndlp->nlp_state);
1557 } else {
1558 spin_lock_irq(phba->host->host_lock);
1559 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1560 spin_unlock_irq(phba->host->host_lock);
1561 del_timer_sync(&ndlp->nlp_delayfunc);
1562 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1563 list_del_init(&ndlp->els_retry_evt.evt_listp);
1564 }
1565 }
1566
1567 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1568 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1569 lpfc_issue_els_plogi(phba, ndlp, 0);
1570 return (ndlp->nlp_state);
1571}
1572
1573static uint32_t
1574lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
1575 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1576{
1577 lpfc_unreg_rpi(phba, ndlp);
1578 /* This routine does nothing, just return the current state */
1579 return (ndlp->nlp_state);
1580}
1581
1582static uint32_t
1583lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1584 struct lpfc_nodelist * ndlp, void *arg,
1585 uint32_t evt)
1586{
1587 LPFC_MBOXQ_t *pmb;
1588 MAILBOX_t *mb;
1589
1590 pmb = (LPFC_MBOXQ_t *) arg;
1591 mb = &pmb->mb;
1592
1593 /* save rpi */
1594 if (ndlp->nlp_rpi != 0)
1595 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1596
1597 ndlp->nlp_rpi = mb->un.varWords[0];
1598 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1599
1600 return (ndlp->nlp_state);
1601}
1602
1603static uint32_t
1604lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1605 struct lpfc_nodelist * ndlp, void *arg,
1606 uint32_t evt)
1607{
1608 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1609 return (NLP_STE_FREED_NODE);
1610}
1611
1612static uint32_t
1613lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1614 struct lpfc_nodelist * ndlp, void *arg,
1615 uint32_t evt)
1616{
1617 spin_lock_irq(phba->host->host_lock);
1618 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1619 spin_unlock_irq(phba->host->host_lock);
1620 return (ndlp->nlp_state);
1621}
1622
1623
1624/* This next section defines the NPort Discovery State Machine */
1625
1626/* There are 4 different double linked lists nodelist entries can reside on.
1627 * The plogi list and adisc list are used when Link Up discovery or RSCN
1628 * processing is needed. Each list holds the nodes that we will send PLOGI
1629 * or ADISC on. These lists will keep track of what nodes will be effected
1630 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
1631 * The unmapped_list will contain all nodes that we have successfully logged
1632 * into at the Fibre Channel level. The mapped_list will contain all nodes
1633 * that are mapped FCP targets.
1634 */
1635/*
1636 * The bind list is a list of undiscovered (potentially non-existent) nodes
1637 * that we have saved binding information on. This information is used when
1638 * nodes transition from the unmapped to the mapped list.
1639 */
1640/* For UNUSED_NODE state, the node has just been allocated .
1641 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
1642 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
1643 * and put on the unmapped list. For ADISC processing, the node is taken off
1644 * the ADISC list and placed on either the mapped or unmapped list (depending
1645 * on its previous state). Once on the unmapped list, a PRLI is issued and the
1646 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
1647 * changed to UNMAPPED_NODE. If the completion indicates a mapped
1648 * node, the node is taken off the unmapped list. The binding list is checked
1649 * for a valid binding, or a binding is automatically assigned. If binding
1650 * assignment is unsuccessful, the node is left on the unmapped list. If
1651 * binding assignment is successful, the associated binding list entry (if
1652 * any) is removed, and the node is placed on the mapped list.
1653 */
1654/*
1655 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1656 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
1657 * expire, all effected nodes will receive a DEVICE_RM event.
1658 */
1659/*
1660 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
1661 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
1662 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
1663 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
1664 * we will first process the ADISC list. 32 entries are processed initially and
1665 * ADISC is initited for each one. Completions / Events for each node are
1666 * funnelled thru the state machine. As each node finishes ADISC processing, it
1667 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
1668 * waiting, and the ADISC list count is identically 0, then we are done. For
1669 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
1670 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
1671 * list. 32 entries are processed initially and PLOGI is initited for each one.
1672 * Completions / Events for each node are funnelled thru the state machine. As
1673 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
1674 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
1675 * indentically 0, then we are done. We have now completed discovery / RSCN
1676 * handling. Upon completion, ALL nodes should be on either the mapped or
1677 * unmapped lists.
1678 */
1679
1680static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1681 (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = {
1682 /* Action routine Event Current State */
1683 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
1684 lpfc_rcv_els_unused_node, /* RCV_PRLI */
1685 lpfc_rcv_logo_unused_node, /* RCV_LOGO */
1686 lpfc_rcv_els_unused_node, /* RCV_ADISC */
1687 lpfc_rcv_els_unused_node, /* RCV_PDISC */
1688 lpfc_rcv_els_unused_node, /* RCV_PRLO */
1689 lpfc_disc_illegal, /* CMPL_PLOGI */
1690 lpfc_disc_illegal, /* CMPL_PRLI */
1691 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
1692 lpfc_disc_illegal, /* CMPL_ADISC */
1693 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1694 lpfc_device_rm_unused_node, /* DEVICE_RM */
1695 lpfc_disc_illegal, /* DEVICE_RECOVERY */
1696
1697 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
1698 lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
1699 lpfc_rcv_els_plogi_issue, /* RCV_LOGO */
1700 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
1701 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
1702 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
1703 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
1704 lpfc_disc_illegal, /* CMPL_PRLI */
1705 lpfc_disc_illegal, /* CMPL_LOGO */
1706 lpfc_disc_illegal, /* CMPL_ADISC */
1707 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1708 lpfc_device_rm_plogi_issue, /* DEVICE_RM */
1709 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
1710
1711 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
1712 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
1713 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
1714 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
1715 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
1716 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
1717 lpfc_disc_illegal, /* CMPL_PLOGI */
1718 lpfc_disc_illegal, /* CMPL_PRLI */
1719 lpfc_disc_illegal, /* CMPL_LOGO */
1720 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
1721 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1722 lpfc_device_rm_adisc_issue, /* DEVICE_RM */
1723 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
1724
1725 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
1726 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
1727 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
1728 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
1729 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
1730 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
1731 lpfc_disc_illegal, /* CMPL_PLOGI */
1732 lpfc_disc_illegal, /* CMPL_PRLI */
1733 lpfc_disc_illegal, /* CMPL_LOGO */
1734 lpfc_disc_illegal, /* CMPL_ADISC */
1735 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
1736 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
1737 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
1738
1739 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
1740 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
1741 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
1742 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
1743 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
1744 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
1745 lpfc_disc_illegal, /* CMPL_PLOGI */
1746 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
1747 lpfc_disc_illegal, /* CMPL_LOGO */
1748 lpfc_disc_illegal, /* CMPL_ADISC */
1749 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1750 lpfc_device_rm_prli_issue, /* DEVICE_RM */
1751 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
1752
1753 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
1754 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
1755 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
1756 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
1757 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
1758 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
1759 lpfc_disc_illegal, /* CMPL_PLOGI */
1760 lpfc_disc_illegal, /* CMPL_PRLI */
1761 lpfc_disc_illegal, /* CMPL_LOGO */
1762 lpfc_disc_illegal, /* CMPL_ADISC */
1763 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1764 lpfc_disc_illegal, /* DEVICE_RM */
1765 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
1766
1767 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
1768 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
1769 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
1770 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
1771 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
1772 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
1773 lpfc_disc_illegal, /* CMPL_PLOGI */
1774 lpfc_disc_illegal, /* CMPL_PRLI */
1775 lpfc_disc_illegal, /* CMPL_LOGO */
1776 lpfc_disc_illegal, /* CMPL_ADISC */
1777 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1778 lpfc_disc_illegal, /* DEVICE_RM */
1779 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
1780
1781 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
1782 lpfc_rcv_prli_npr_node, /* RCV_PRLI */
1783 lpfc_rcv_logo_npr_node, /* RCV_LOGO */
1784 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
1785 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
1786 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
1787 lpfc_disc_noop, /* CMPL_PLOGI */
1788 lpfc_disc_noop, /* CMPL_PRLI */
1789 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
1790 lpfc_disc_noop, /* CMPL_ADISC */
1791 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
1792 lpfc_device_rm_npr_node, /* DEVICE_RM */
1793 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
1794};
1795
1796int
1797lpfc_disc_state_machine(struct lpfc_hba * phba,
1798 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1799{
1800 uint32_t cur_state, rc;
1801 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
1802 uint32_t);
1803
1804 ndlp->nlp_disc_refcnt++;
1805 cur_state = ndlp->nlp_state;
1806
1807 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
1808 lpfc_printf_log(phba,
1809 KERN_INFO,
1810 LOG_DISCOVERY,
1811 "%d:0211 DSM in event x%x on NPort x%x in state %d "
1812 "Data: x%x\n",
1813 phba->brd_no,
1814 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
1815
1816 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
1817 rc = (func) (phba, ndlp, arg, evt);
1818
1819 /* DSM out state <rc> on NPort <nlp_DID> */
1820 lpfc_printf_log(phba,
1821 KERN_INFO,
1822 LOG_DISCOVERY,
1823 "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
1824 phba->brd_no,
1825 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1826
1827 ndlp->nlp_disc_refcnt--;
1828
1829 /* Check to see if ndlp removal is deferred */
1830 if ((ndlp->nlp_disc_refcnt == 0)
1831 && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
1832 spin_lock_irq(phba->host->host_lock);
1833 ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
1834 spin_unlock_irq(phba->host->host_lock);
1835 lpfc_nlp_remove(phba, ndlp);
1836 return (NLP_STE_FREED_NODE);
1837 }
1838 if (rc == NLP_STE_FREED_NODE)
1839 return (NLP_STE_FREED_NODE);
1840 ndlp->nlp_state = rc;
1841 return (rc);
1842}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
new file mode 100644
index 000000000000..42fab03ad2ba
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -0,0 +1,1246 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_scsi.c 1.37 2005/04/13 14:27:09EDT sf_support Exp $
23 */
24
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27
28#include <scsi/scsi.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi_transport_fc.h>
33
34#include "lpfc_version.h"
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42
43#define LPFC_RESET_WAIT 2
44#define LPFC_ABORT_WAIT 2
45
46static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
47{
48 fcmd->fcpLunLsl = 0;
49 fcmd->fcpLunMsl = swab16((uint16_t)lun);
50}
51
52/*
53 * This routine allocates a scsi buffer, which contains all the necessary
54 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
55 * contains information to build the IOCB. The DMAable region contains
56 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
57 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
58 * and the BPL BDE is setup in the IOCB.
59 */
60static struct lpfc_scsi_buf *
61lpfc_get_scsi_buf(struct lpfc_hba * phba)
62{
63 struct lpfc_scsi_buf *psb;
64 struct ulp_bde64 *bpl;
65 IOCB_t *iocb;
66 dma_addr_t pdma_phys;
67
68 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
69 if (!psb)
70 return NULL;
71 memset(psb, 0, sizeof (struct lpfc_scsi_buf));
72 psb->scsi_hba = phba;
73
74 /*
75 * Get memory from the pci pool to map the virt space to pci bus space
76 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
77 * struct fcp_rsp and the number of bde's necessary to support the
78 * sg_tablesize.
79 */
80 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
81 &psb->dma_handle);
82 if (!psb->data) {
83 kfree(psb);
84 return NULL;
85 }
86
87 /* Initialize virtual ptrs to dma_buf region. */
88 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
89
90 psb->fcp_cmnd = psb->data;
91 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
92 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
93 sizeof(struct fcp_rsp);
94
95 /* Initialize local short-hand pointers. */
96 bpl = psb->fcp_bpl;
97 pdma_phys = psb->dma_handle;
98
99 /*
100 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
101 * list bdes. Initialize the first two and leave the rest for
102 * queuecommand.
103 */
104 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
105 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
106 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
107 bpl->tus.f.bdeFlags = BUFF_USE_CMND;
108 bpl->tus.w = le32_to_cpu(bpl->tus.w);
109 bpl++;
110
111 /* Setup the physical region for the FCP RSP */
112 pdma_phys += sizeof (struct fcp_cmnd);
113 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
114 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
115 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
116 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
117 bpl->tus.w = le32_to_cpu(bpl->tus.w);
118
119 /*
120 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
121 * initialize it with all known data now.
122 */
123 pdma_phys += (sizeof (struct fcp_rsp));
124 iocb = &psb->cur_iocbq.iocb;
125 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
126 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
127 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
128 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
129 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
130 iocb->ulpBdeCount = 1;
131 iocb->ulpClass = CLASS3;
132
133 return psb;
134}
135
136static void
137lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
138{
139 struct lpfc_hba *phba = psb->scsi_hba;
140
141 /*
142 * There are only two special cases to consider. (1) the scsi command
143 * requested scatter-gather usage or (2) the scsi command allocated
144 * a request buffer, but did not request use_sg. There is a third
145 * case, but it does not require resource deallocation.
146 */
147 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
148 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
149 psb->seg_cnt, psb->pCmd->sc_data_direction);
150 } else {
151 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
152 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
153 psb->pCmd->request_bufflen,
154 psb->pCmd->sc_data_direction);
155 }
156 }
157
158 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
159}
160
161static int
162lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
163{
164 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
165 struct scatterlist *sgel = NULL;
166 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
167 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
168 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
169 dma_addr_t physaddr;
170 uint32_t i, num_bde = 0;
171 int datadir = scsi_cmnd->sc_data_direction;
172 int dma_error;
173
174 /*
175 * There are three possibilities here - use scatter-gather segment, use
176 * the single mapping, or neither. Start the lpfc command prep by
177 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
178 * data bde entry.
179 */
180 bpl += 2;
181 if (scsi_cmnd->use_sg) {
182 /*
183 * The driver stores the segment count returned from pci_map_sg
184 * because this a count of dma-mappings used to map the use_sg
185 * pages. They are not guaranteed to be the same for those
186 * architectures that implement an IOMMU.
187 */
188 sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
189 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
190 scsi_cmnd->use_sg, datadir);
191 if (lpfc_cmd->seg_cnt == 0)
192 return 1;
193
194 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
195 printk(KERN_ERR "%s: Too many sg segments from "
196 "dma_map_sg. Config %d, seg_cnt %d",
197 __FUNCTION__, phba->cfg_sg_seg_cnt,
198 lpfc_cmd->seg_cnt);
199 dma_unmap_sg(&phba->pcidev->dev, sgel,
200 lpfc_cmd->seg_cnt, datadir);
201 return 1;
202 }
203
204 /*
205 * The driver established a maximum scatter-gather segment count
206 * during probe that limits the number of sg elements in any
207 * single scsi command. Just run through the seg_cnt and format
208 * the bde's.
209 */
210 for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
211 physaddr = sg_dma_address(sgel);
212 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
213 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
214 bpl->tus.f.bdeSize = sg_dma_len(sgel);
215 if (datadir == DMA_TO_DEVICE)
216 bpl->tus.f.bdeFlags = 0;
217 else
218 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
219 bpl->tus.w = le32_to_cpu(bpl->tus.w);
220 bpl++;
221 sgel++;
222 num_bde++;
223 }
224 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
225 physaddr = dma_map_single(&phba->pcidev->dev,
226 scsi_cmnd->request_buffer,
227 scsi_cmnd->request_bufflen,
228 datadir);
229 dma_error = dma_mapping_error(physaddr);
230 if (dma_error) {
231 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
232 "%d:0718 Unable to dma_map_single "
233 "request_buffer: x%x\n",
234 phba->brd_no, dma_error);
235 return 1;
236 }
237
238 lpfc_cmd->nonsg_phys = physaddr;
239 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
240 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
241 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
242 if (datadir == DMA_TO_DEVICE)
243 bpl->tus.f.bdeFlags = 0;
244 bpl->tus.w = le32_to_cpu(bpl->tus.w);
245 num_bde = 1;
246 bpl++;
247 }
248
249 /*
250 * Finish initializing those IOCB fields that are dependent on the
251 * scsi_cmnd request_buffer
252 */
253 iocb_cmd->un.fcpi64.bdl.bdeSize +=
254 (num_bde * sizeof (struct ulp_bde64));
255 iocb_cmd->ulpBdeCount = 1;
256 iocb_cmd->ulpLe = 1;
257 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
258 return 0;
259}
260
261static void
262lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
263{
264 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
265 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
266 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
267 struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
268 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
269 uint32_t resp_info = fcprsp->rspStatus2;
270 uint32_t scsi_status = fcprsp->rspStatus3;
271 uint32_t host_status = DID_OK;
272 uint32_t rsplen = 0;
273
274 /*
275 * If this is a task management command, there is no
276 * scsi packet associated with this lpfc_cmd. The driver
277 * consumes it.
278 */
279 if (fcpcmd->fcpCntl2) {
280 scsi_status = 0;
281 goto out;
282 }
283
284 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
285 "%d:0730 FCP command failed: RSP "
286 "Data: x%x x%x x%x x%x x%x x%x\n",
287 phba->brd_no, resp_info, scsi_status,
288 be32_to_cpu(fcprsp->rspResId),
289 be32_to_cpu(fcprsp->rspSnsLen),
290 be32_to_cpu(fcprsp->rspRspLen),
291 fcprsp->rspInfo3);
292
293 if (resp_info & RSP_LEN_VALID) {
294 rsplen = be32_to_cpu(fcprsp->rspRspLen);
295 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
296 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
297 host_status = DID_ERROR;
298 goto out;
299 }
300 }
301
302 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
303 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
304 if (snslen > SCSI_SENSE_BUFFERSIZE)
305 snslen = SCSI_SENSE_BUFFERSIZE;
306
307 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
308 }
309
310 cmnd->resid = 0;
311 if (resp_info & RESID_UNDER) {
312 cmnd->resid = be32_to_cpu(fcprsp->rspResId);
313
314 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
315 "%d:0716 FCP Read Underrun, expected %d, "
316 "residual %d Data: x%x x%x x%x\n", phba->brd_no,
317 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
318 fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
319
320 /*
321 * The cmnd->underflow is the minimum number of bytes that must
322 * be transfered for this command. Provided a sense condition
323 * is not present, make sure the actual amount transferred is at
324 * least the underflow value or fail.
325 */
326 if (!(resp_info & SNS_LEN_VALID) &&
327 (scsi_status == SAM_STAT_GOOD) &&
328 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
329 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
330 "%d:0717 FCP command x%x residual "
331 "underrun converted to error "
332 "Data: x%x x%x x%x\n", phba->brd_no,
333 cmnd->cmnd[0], cmnd->request_bufflen,
334 cmnd->resid, cmnd->underflow);
335
336 host_status = DID_ERROR;
337 }
338 } else if (resp_info & RESID_OVER) {
339 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
340 "%d:0720 FCP command x%x residual "
341 "overrun error. Data: x%x x%x \n",
342 phba->brd_no, cmnd->cmnd[0],
343 cmnd->request_bufflen, cmnd->resid);
344 host_status = DID_ERROR;
345
346 /*
347 * Check SLI validation that all the transfer was actually done
348 * (fcpi_parm should be zero). Apply check only to reads.
349 */
350 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
351 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
352 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
353 "%d:0734 FCP Read Check Error Data: "
354 "x%x x%x x%x x%x\n", phba->brd_no,
355 be32_to_cpu(fcpcmd->fcpDl),
356 be32_to_cpu(fcprsp->rspResId),
357 fcpi_parm, cmnd->cmnd[0]);
358 host_status = DID_ERROR;
359 cmnd->resid = cmnd->request_bufflen;
360 }
361
362 out:
363 cmnd->result = ScsiResult(host_status, scsi_status);
364}
365
366static void
367lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
368 struct lpfc_iocbq *pIocbOut)
369{
370 struct lpfc_scsi_buf *lpfc_cmd =
371 (struct lpfc_scsi_buf *) pIocbIn->context1;
372 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
373 struct lpfc_nodelist *pnode = rdata->pnode;
374 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
375 unsigned long iflag;
376
377 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
378 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
379
380 if (lpfc_cmd->status) {
381 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
382 (lpfc_cmd->result & IOERR_DRVR_MASK))
383 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
384 else if (lpfc_cmd->status >= IOSTAT_CNT)
385 lpfc_cmd->status = IOSTAT_DEFAULT;
386
387 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
388 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
389 "x%x result: x%x Data: x%x x%x\n",
390 phba->brd_no, cmd->cmnd[0], cmd->device->id,
391 cmd->device->lun, lpfc_cmd->status,
392 lpfc_cmd->result, pIocbOut->iocb.ulpContext,
393 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
394
395 switch (lpfc_cmd->status) {
396 case IOSTAT_FCP_RSP_ERROR:
397 /* Call FCP RSP handler to determine result */
398 lpfc_handle_fcp_err(lpfc_cmd);
399 break;
400 case IOSTAT_NPORT_BSY:
401 case IOSTAT_FABRIC_BSY:
402 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
403 break;
404 default:
405 cmd->result = ScsiResult(DID_ERROR, 0);
406 break;
407 }
408
409 if (pnode) {
410 if (pnode->nlp_state != NLP_STE_MAPPED_NODE)
411 cmd->result = ScsiResult(DID_BUS_BUSY,
412 SAM_STAT_BUSY);
413 }
414 else {
415 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
416 }
417 } else {
418 cmd->result = ScsiResult(DID_OK, 0);
419 }
420
421 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
422 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
423
424 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
425 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
426 "SNS x%x x%x Data: x%x x%x\n",
427 phba->brd_no, cmd->device->id,
428 cmd->device->lun, cmd, cmd->result,
429 *lp, *(lp + 3), cmd->retries, cmd->resid);
430 }
431
432 spin_lock_irqsave(phba->host->host_lock, iflag);
433 lpfc_free_scsi_buf(lpfc_cmd);
434 cmd->host_scribble = NULL;
435 spin_unlock_irqrestore(phba->host->host_lock, iflag);
436
437 cmd->scsi_done(cmd);
438}
439
440static void
441lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
442 struct lpfc_nodelist *pnode)
443{
444 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
445 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
446 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
447 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
448 int datadir = scsi_cmnd->sc_data_direction;
449
450 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
451
452 lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
453
454 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
455
456 if (scsi_cmnd->device->tagged_supported) {
457 switch (scsi_cmnd->tag) {
458 case HEAD_OF_QUEUE_TAG:
459 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
460 break;
461 case ORDERED_QUEUE_TAG:
462 fcp_cmnd->fcpCntl1 = ORDERED_Q;
463 break;
464 default:
465 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
466 break;
467 }
468 } else
469 fcp_cmnd->fcpCntl1 = 0;
470
471 /*
472 * There are three possibilities here - use scatter-gather segment, use
473 * the single mapping, or neither. Start the lpfc command prep by
474 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
475 * data bde entry.
476 */
477 if (scsi_cmnd->use_sg) {
478 if (datadir == DMA_TO_DEVICE) {
479 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
480 iocb_cmd->un.fcpi.fcpi_parm = 0;
481 iocb_cmd->ulpPU = 0;
482 fcp_cmnd->fcpCntl3 = WRITE_DATA;
483 phba->fc4OutputRequests++;
484 } else {
485 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
486 iocb_cmd->ulpPU = PARM_READ_CHECK;
487 iocb_cmd->un.fcpi.fcpi_parm =
488 scsi_cmnd->request_bufflen;
489 fcp_cmnd->fcpCntl3 = READ_DATA;
490 phba->fc4InputRequests++;
491 }
492 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
493 if (datadir == DMA_TO_DEVICE) {
494 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
495 iocb_cmd->un.fcpi.fcpi_parm = 0;
496 iocb_cmd->ulpPU = 0;
497 fcp_cmnd->fcpCntl3 = WRITE_DATA;
498 phba->fc4OutputRequests++;
499 } else {
500 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
501 iocb_cmd->ulpPU = PARM_READ_CHECK;
502 iocb_cmd->un.fcpi.fcpi_parm =
503 scsi_cmnd->request_bufflen;
504 fcp_cmnd->fcpCntl3 = READ_DATA;
505 phba->fc4InputRequests++;
506 }
507 } else {
508 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
509 iocb_cmd->un.fcpi.fcpi_parm = 0;
510 iocb_cmd->ulpPU = 0;
511 fcp_cmnd->fcpCntl3 = 0;
512 phba->fc4ControlRequests++;
513 }
514
515 /*
516 * Finish initializing those IOCB fields that are independent
517 * of the scsi_cmnd request_buffer
518 */
519 piocbq->iocb.ulpContext = pnode->nlp_rpi;
520 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
521 piocbq->iocb.ulpFCP2Rcvy = 1;
522
523 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
524 piocbq->context1 = lpfc_cmd;
525 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
526 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
527}
528
529static int
530lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
531 struct lpfc_scsi_buf *lpfc_cmd,
532 uint8_t task_mgmt_cmd)
533{
534 struct lpfc_sli *psli;
535 struct lpfc_iocbq *piocbq;
536 IOCB_t *piocb;
537 struct fcp_cmnd *fcp_cmnd;
538 struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
539 struct lpfc_rport_data *rdata = scsi_dev->hostdata;
540 struct lpfc_nodelist *ndlp = rdata->pnode;
541
542 if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
543 return 0;
544 }
545
546 psli = &phba->sli;
547 piocbq = &(lpfc_cmd->cur_iocbq);
548 piocb = &piocbq->iocb;
549
550 fcp_cmnd = lpfc_cmd->fcp_cmnd;
551 lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
552 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
553
554 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
555
556 piocb->ulpContext = ndlp->nlp_rpi;
557 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
558 piocb->ulpFCP2Rcvy = 1;
559 }
560 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
561
562 /* ulpTimeout is only one byte */
563 if (lpfc_cmd->timeout > 0xff) {
564 /*
565 * Do not timeout the command at the firmware level.
566 * The driver will provide the timeout mechanism.
567 */
568 piocb->ulpTimeout = 0;
569 } else {
570 piocb->ulpTimeout = lpfc_cmd->timeout;
571 }
572
573 lpfc_cmd->rdata = rdata;
574
575 switch (task_mgmt_cmd) {
576 case FCP_LUN_RESET:
577 /* Issue LUN Reset to TGT <num> LUN <num> */
578 lpfc_printf_log(phba,
579 KERN_INFO,
580 LOG_FCP,
581 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
582 "Data: x%x x%x\n",
583 phba->brd_no,
584 scsi_dev->id, scsi_dev->lun,
585 ndlp->nlp_rpi, ndlp->nlp_flag);
586
587 break;
588 case FCP_ABORT_TASK_SET:
589 /* Issue Abort Task Set to TGT <num> LUN <num> */
590 lpfc_printf_log(phba,
591 KERN_INFO,
592 LOG_FCP,
593 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
594 "Data: x%x x%x\n",
595 phba->brd_no,
596 scsi_dev->id, scsi_dev->lun,
597 ndlp->nlp_rpi, ndlp->nlp_flag);
598
599 break;
600 case FCP_TARGET_RESET:
601 /* Issue Target Reset to TGT <num> */
602 lpfc_printf_log(phba,
603 KERN_INFO,
604 LOG_FCP,
605 "%d:0702 Issue Target Reset to TGT %d "
606 "Data: x%x x%x\n",
607 phba->brd_no,
608 scsi_dev->id, ndlp->nlp_rpi,
609 ndlp->nlp_flag);
610 break;
611 }
612
613 return (1);
614}
615
616static int
617lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
618{
619 struct lpfc_iocbq *iocbq;
620 struct lpfc_iocbq *iocbqrsp = NULL;
621 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
622 int ret;
623
624 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
625 if (!ret)
626 return FAILED;
627
628 lpfc_cmd->scsi_hba = phba;
629 iocbq = &lpfc_cmd->cur_iocbq;
630 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
631 if (!iocbqrsp)
632 return FAILED;
633 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
634
635 iocbq->iocb_flag |= LPFC_IO_POLL;
636 ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
637 &phba->sli.ring[phba->sli.fcp_ring],
638 iocbq, SLI_IOCB_HIGH_PRIORITY,
639 iocbqrsp,
640 lpfc_cmd->timeout);
641 if (ret != IOCB_SUCCESS) {
642 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
643 ret = FAILED;
644 } else {
645 ret = SUCCESS;
646 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
647 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
648 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
649 (lpfc_cmd->result & IOERR_DRVR_MASK))
650 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
651 }
652
653 /*
654 * All outstanding txcmplq I/Os should have been aborted by the target.
655 * Unfortunately, some targets do not abide by this forcing the driver
656 * to double check.
657 */
658 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
659 lpfc_cmd->pCmd->device->id,
660 lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
661
662 /* Return response IOCB to free list. */
663 list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
664 return ret;
665}
666
667static void
668lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
669 struct lpfc_iocbq *pIocbOut)
670{
671 unsigned long iflag;
672 struct lpfc_scsi_buf *lpfc_cmd =
673 (struct lpfc_scsi_buf *) pIocbIn->context1;
674
675 spin_lock_irqsave(phba->host->host_lock, iflag);
676 lpfc_free_scsi_buf(lpfc_cmd);
677 spin_unlock_irqrestore(phba->host->host_lock, iflag);
678}
679
680static void
681lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
682 struct lpfc_iocbq *pIocbIn,
683 struct lpfc_iocbq *pIocbOut)
684{
685 struct scsi_cmnd *ml_cmd =
686 ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
687
688 lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
689 ml_cmd->host_scribble = NULL;
690}
691
692const char *
693lpfc_info(struct Scsi_Host *host)
694{
695 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
696 int len;
697 static char lpfcinfobuf[384];
698
699 memset(lpfcinfobuf,0,384);
700 if (phba && phba->pcidev){
701 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
702 len = strlen(lpfcinfobuf);
703 snprintf(lpfcinfobuf + len,
704 384-len,
705 " on PCI bus %02x device %02x irq %d",
706 phba->pcidev->bus->number,
707 phba->pcidev->devfn,
708 phba->pcidev->irq);
709 len = strlen(lpfcinfobuf);
710 if (phba->Port[0]) {
711 snprintf(lpfcinfobuf + len,
712 384-len,
713 " port %s",
714 phba->Port);
715 }
716 }
717 return lpfcinfobuf;
718}
719
720static int
721lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
722{
723 struct lpfc_hba *phba =
724 (struct lpfc_hba *) cmnd->device->host->hostdata[0];
725 struct lpfc_sli *psli = &phba->sli;
726 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
727 struct lpfc_nodelist *ndlp = rdata->pnode;
728 struct lpfc_scsi_buf *lpfc_cmd = NULL;
729 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
730 int err = 0;
731
732 /*
733 * The target pointer is guaranteed not to be NULL because the driver
734 * only clears the device->hostdata field in lpfc_slave_destroy. This
735 * approach guarantees no further IO calls on this target.
736 */
737 if (!ndlp) {
738 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
739 goto out_fail_command;
740 }
741
742 /*
743 * A Fibre Channel target is present and functioning only when the node
744 * state is MAPPED. Any other state is a failure.
745 */
746 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
747 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
748 (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) {
749 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
750 goto out_fail_command;
751 }
752 /*
753 * The device is most likely recovered and the driver
754 * needs a bit more time to finish. Ask the midlayer
755 * to retry.
756 */
757 goto out_host_busy;
758 }
759
760 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
761 if (lpfc_cmd == NULL) {
762 printk(KERN_WARNING "%s: No buffer available - list empty, "
763 "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
764 goto out_host_busy;
765 }
766
767 /*
768 * Store the midlayer's command structure for the completion phase
769 * and complete the command initialization.
770 */
771 lpfc_cmd->pCmd = cmnd;
772 lpfc_cmd->rdata = rdata;
773 lpfc_cmd->timeout = 0;
774 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
775 cmnd->scsi_done = done;
776
777 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
778 if (err)
779 goto out_host_busy_free_buf;
780
781 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
782
783 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
784 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
785 if (err)
786 goto out_host_busy_free_buf;
787 return 0;
788
789 out_host_busy_free_buf:
790 lpfc_free_scsi_buf(lpfc_cmd);
791 cmnd->host_scribble = NULL;
792 out_host_busy:
793 return SCSI_MLQUEUE_HOST_BUSY;
794
795 out_fail_command:
796 done(cmnd);
797 return 0;
798}
799
800static int
801lpfc_abort_handler(struct scsi_cmnd *cmnd)
802{
803 struct lpfc_hba *phba =
804 (struct lpfc_hba *)cmnd->device->host->hostdata[0];
805 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
806 struct lpfc_iocbq *iocb, *next_iocb;
807 struct lpfc_iocbq *abtsiocb = NULL;
808 struct lpfc_scsi_buf *lpfc_cmd;
809 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
810 IOCB_t *cmd, *icmd;
811 unsigned long snum;
812 unsigned int id, lun;
813 unsigned int loop_count = 0;
814 int ret = IOCB_SUCCESS;
815
816 /*
817 * If the host_scribble data area is NULL, then the driver has already
818 * completed this command, but the midlayer did not see the completion
819 * before the eh fired. Just return SUCCESS.
820 */
821 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
822 if (!lpfc_cmd)
823 return SUCCESS;
824
825 /* save these now since lpfc_cmd can be freed */
826 id = lpfc_cmd->pCmd->device->id;
827 lun = lpfc_cmd->pCmd->device->lun;
828 snum = lpfc_cmd->pCmd->serial_number;
829
830 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
831 cmd = &iocb->iocb;
832 if (iocb->context1 != lpfc_cmd)
833 continue;
834
835 list_del_init(&iocb->list);
836 pring->txq_cnt--;
837 if (!iocb->iocb_cmpl) {
838 list_add_tail(&iocb->list, lpfc_iocb_list);
839 }
840 else {
841 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
842 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
843 lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
844 }
845
846 goto out;
847 }
848
849 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
850 if (abtsiocb == NULL)
851 return FAILED;
852
853 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
854
855 /*
856 * The scsi command was not in the txq. Check the txcmplq and if it is
857 * found, send an abort to the FW.
858 */
859 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
860 if (iocb->context1 != lpfc_cmd)
861 continue;
862
863 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted;
864 cmd = &iocb->iocb;
865 icmd = &abtsiocb->iocb;
866 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
867 icmd->un.acxri.abortContextTag = cmd->ulpContext;
868 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
869
870 icmd->ulpLe = 1;
871 icmd->ulpClass = cmd->ulpClass;
872 if (phba->hba_state >= LPFC_LINK_UP)
873 icmd->ulpCommand = CMD_ABORT_XRI_CN;
874 else
875 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
876
877 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) ==
878 IOCB_ERROR) {
879 list_add_tail(&abtsiocb->list, lpfc_iocb_list);
880 ret = IOCB_ERROR;
881 break;
882 }
883
884 /* Wait for abort to complete */
885 while (cmnd->host_scribble)
886 {
887 spin_unlock_irq(phba->host->host_lock);
888 set_current_state(TASK_UNINTERRUPTIBLE);
889 schedule_timeout(LPFC_ABORT_WAIT*HZ);
890 spin_lock_irq(phba->host->host_lock);
891 if (++loop_count
892 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
893 break;
894 }
895
896 if(cmnd->host_scribble) {
897 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
898 "%d:0748 abort handler timed "
899 "out waiting for abort to "
900 "complete. Data: "
901 "x%x x%x x%x x%lx\n",
902 phba->brd_no, ret, id, lun, snum);
903 cmnd->host_scribble = NULL;
904 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup;
905 ret = IOCB_ERROR;
906 }
907
908 break;
909 }
910
911 out:
912 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
913 "%d:0749 SCSI layer issued abort device "
914 "Data: x%x x%x x%x x%lx\n",
915 phba->brd_no, ret, id, lun, snum);
916
917 return ret == IOCB_SUCCESS ? SUCCESS : FAILED;
918}
919
920static int
921lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
922{
923 struct Scsi_Host *shost = cmnd->device->host;
924 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
925 struct lpfc_sli *psli = &phba->sli;
926 struct lpfc_scsi_buf *lpfc_cmd = NULL;
927 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
928 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
929 struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
930 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
931 struct lpfc_nodelist *pnode = rdata->pnode;
932 int ret = FAILED;
933 int cnt, loopcnt;
934
935 /*
936 * If target is not in a MAPPED state, delay the reset until
937 * target is rediscovered or nodev timeout expires.
938 */
939 while ( 1 ) {
940 if (!pnode)
941 break;
942
943 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
944 spin_unlock_irq(phba->host->host_lock);
945 set_current_state(TASK_UNINTERRUPTIBLE);
946 schedule_timeout( HZ/2);
947 spin_lock_irq(phba->host->host_lock);
948 }
949 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
950 break;
951 }
952
953 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
954 if (lpfc_cmd == NULL)
955 goto out;
956
957 lpfc_cmd->pCmd = cmnd;
958 lpfc_cmd->timeout = 60;
959 lpfc_cmd->scsi_hba = phba;
960
961 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
962 if (!ret)
963 goto out_free_scsi_buf;
964
965 iocbq = &lpfc_cmd->cur_iocbq;
966
967 /* get a buffer for this IOCB command response */
968 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
969 if (iocbqrsp == NULL)
970 goto out_free_scsi_buf;
971
972 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
973
974 iocbq->iocb_flag |= LPFC_IO_POLL;
975 iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
976
977 ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
978 &phba->sli.ring[psli->fcp_ring],
979 iocbq, 0, iocbqrsp, 60);
980 if (ret == IOCB_SUCCESS)
981 ret = SUCCESS;
982
983 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
984 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
985 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
986 if (lpfc_cmd->result & IOERR_DRVR_MASK)
987 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
988
989 /*
990 * All outstanding txcmplq I/Os should have been aborted by the target.
991 * Unfortunately, some targets do not abide by this forcing the driver
992 * to double check.
993 */
994 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
995 cmnd->device->id, cmnd->device->lun, 0,
996 LPFC_CTX_LUN);
997
998 loopcnt = 0;
999 while((cnt = lpfc_sli_sum_iocb(phba,
1000 &phba->sli.ring[phba->sli.fcp_ring],
1001 cmnd->device->id, cmnd->device->lun,
1002 LPFC_CTX_LUN))) {
1003 spin_unlock_irq(phba->host->host_lock);
1004 set_current_state(TASK_UNINTERRUPTIBLE);
1005 schedule_timeout(LPFC_RESET_WAIT*HZ);
1006 spin_lock_irq(phba->host->host_lock);
1007
1008 if (++loopcnt
1009 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
1010 break;
1011 }
1012
1013 if (cnt) {
1014 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1015 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1016 phba->brd_no, cnt);
1017 }
1018
1019 list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
1020
1021out_free_scsi_buf:
1022 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1023 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1024 "Data: x%x x%x x%x\n",
1025 phba->brd_no, lpfc_cmd->pCmd->device->id,
1026 lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
1027 lpfc_cmd->result);
1028 lpfc_free_scsi_buf(lpfc_cmd);
1029out:
1030 return ret;
1031}
1032
1033/*
1034 * Note: midlayer calls this function with the host_lock held
1035 */
1036static int
1037lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1038{
1039 struct Scsi_Host *shost = cmnd->device->host;
1040 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
1041 struct lpfc_nodelist *ndlp = NULL;
1042 int match;
1043 int ret = FAILED, i, err_count = 0;
1044 int cnt, loopcnt;
1045 unsigned int midlayer_id = 0;
1046 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1047 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1048
1049 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1050 if (lpfc_cmd == NULL)
1051 goto out;
1052
1053 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1054 lpfc_cmd->timeout = 60;
1055 lpfc_cmd->pCmd = cmnd;
1056 lpfc_cmd->scsi_hba = phba;
1057
1058 /*
1059 * Since the driver manages a single bus device, reset all
1060 * targets known to the driver. Should any target reset
1061 * fail, this routine returns failure to the midlayer.
1062 */
1063 midlayer_id = cmnd->device->id;
1064 for (i = 0; i < MAX_FCP_TARGET; i++) {
1065 /* Search the mapped list for this target ID */
1066 match = 0;
1067 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1068 if ((i == ndlp->nlp_sid) && ndlp->rport) {
1069 match = 1;
1070 break;
1071 }
1072 }
1073 if (!match)
1074 continue;
1075
1076 lpfc_cmd->pCmd->device->id = i;
1077 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
1078 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
1079 if (ret != SUCCESS) {
1080 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1081 "%d:0713 Bus Reset on target %d failed\n",
1082 phba->brd_no, i);
1083 err_count++;
1084 }
1085 }
1086
1087 cmnd->device->id = midlayer_id;
1088 loopcnt = 0;
1089 while((cnt = lpfc_sli_sum_iocb(phba,
1090 &phba->sli.ring[phba->sli.fcp_ring],
1091 0, 0, LPFC_CTX_HOST))) {
1092 spin_unlock_irq(phba->host->host_lock);
1093 set_current_state(TASK_UNINTERRUPTIBLE);
1094 schedule_timeout(LPFC_RESET_WAIT*HZ);
1095 spin_lock_irq(phba->host->host_lock);
1096
1097 if (++loopcnt
1098 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
1099 break;
1100 }
1101
1102 if (cnt) {
1103 /* flush all outstanding commands on the host */
1104 i = lpfc_sli_abort_iocb(phba,
1105 &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0,
1106 LPFC_CTX_HOST);
1107
1108 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1109 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1110 phba->brd_no, cnt, i);
1111 }
1112
1113 if (!err_count)
1114 ret = SUCCESS;
1115
1116 lpfc_free_scsi_buf(lpfc_cmd);
1117 lpfc_printf_log(phba,
1118 KERN_ERR,
1119 LOG_FCP,
1120 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1121 phba->brd_no, ret);
1122out:
1123 return ret;
1124}
1125
1126static int
1127lpfc_slave_alloc(struct scsi_device *sdev)
1128{
1129 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
1130 struct lpfc_nodelist *ndlp = NULL;
1131 int match = 0;
1132 struct lpfc_scsi_buf *scsi_buf = NULL;
1133 uint32_t total = 0, i;
1134 uint32_t num_to_alloc = 0;
1135 unsigned long flags;
1136 struct list_head *listp;
1137 struct list_head *node_list[6];
1138
1139 /*
1140 * Store the target pointer in the scsi_device hostdata pointer provided
1141 * the driver has already discovered the target id.
1142 */
1143
1144 /* Search the nlp lists other than unmap_list for this target ID */
1145 node_list[0] = &phba->fc_npr_list;
1146 node_list[1] = &phba->fc_nlpmap_list;
1147 node_list[2] = &phba->fc_prli_list;
1148 node_list[3] = &phba->fc_reglogin_list;
1149 node_list[4] = &phba->fc_adisc_list;
1150 node_list[5] = &phba->fc_plogi_list;
1151
1152 for (i = 0; i < 6 && !match; i++) {
1153 listp = node_list[i];
1154 if (list_empty(listp))
1155 continue;
1156 list_for_each_entry(ndlp, listp, nlp_listp) {
1157 if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
1158 match = 1;
1159 break;
1160 }
1161 }
1162 }
1163
1164 if (!match)
1165 return -ENXIO;
1166
1167 sdev->hostdata = ndlp->rport->dd_data;
1168
1169 /*
1170 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1171 * available list of scsi buffers. Don't allocate more than the
1172 * HBA limit conveyed to the midlayer via the host structure. Note
1173 * that this list of scsi bufs exists for the lifetime of the driver.
1174 */
1175 total = phba->total_scsi_bufs;
1176 num_to_alloc = LPFC_CMD_PER_LUN;
1177 if (total >= phba->cfg_hba_queue_depth) {
1178 printk(KERN_WARNING "%s, At config limitation of "
1179 "%d allocated scsi_bufs\n", __FUNCTION__, total);
1180 return 0;
1181 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
1182 num_to_alloc = phba->cfg_hba_queue_depth - total;
1183 }
1184
1185 for (i = 0; i < num_to_alloc; i++) {
1186 scsi_buf = lpfc_get_scsi_buf(phba);
1187 if (!scsi_buf) {
1188 printk(KERN_ERR "%s, failed to allocate "
1189 "scsi_buf\n", __FUNCTION__);
1190 break;
1191 }
1192
1193 spin_lock_irqsave(phba->host->host_lock, flags);
1194 phba->total_scsi_bufs++;
1195 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1196 spin_unlock_irqrestore(phba->host->host_lock, flags);
1197 }
1198 return 0;
1199}
1200
1201static int
1202lpfc_slave_configure(struct scsi_device *sdev)
1203{
1204 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
1205 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1206
1207 if (sdev->tagged_supported)
1208 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
1209 else
1210 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
1211
1212 /*
1213 * Initialize the fc transport attributes for the target
1214 * containing this scsi device. Also note that the driver's
1215 * target pointer is stored in the starget_data for the
1216 * driver's sysfs entry point functions.
1217 */
1218 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
1219
1220 return 0;
1221}
1222
1223static void
1224lpfc_slave_destroy(struct scsi_device *sdev)
1225{
1226 sdev->hostdata = NULL;
1227 return;
1228}
1229
1230struct scsi_host_template lpfc_template = {
1231 .module = THIS_MODULE,
1232 .name = LPFC_DRIVER_NAME,
1233 .info = lpfc_info,
1234 .queuecommand = lpfc_queuecommand,
1235 .eh_abort_handler = lpfc_abort_handler,
1236 .eh_device_reset_handler= lpfc_reset_lun_handler,
1237 .eh_bus_reset_handler = lpfc_reset_bus_handler,
1238 .slave_alloc = lpfc_slave_alloc,
1239 .slave_configure = lpfc_slave_configure,
1240 .slave_destroy = lpfc_slave_destroy,
1241 .this_id = -1,
1242 .sg_tablesize = LPFC_SG_SEG_CNT,
1243 .cmd_per_lun = LPFC_CMD_PER_LUN,
1244 .use_clustering = ENABLE_CLUSTERING,
1245 .shost_attrs = lpfc_host_attrs,
1246};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
new file mode 100644
index 000000000000..4aafba47628d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -0,0 +1,157 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_scsi.h 1.83 2005/04/07 08:47:43EDT sf_support Exp $
23 */
24
25struct lpfc_hba;
26
27#define list_remove_head(list, entry, type, member) \
28 if (!list_empty(list)) { \
29 entry = list_entry((list)->next, type, member); \
30 list_del_init(&entry->member); \
31 }
32
33#define list_get_first(list, type, member) \
34 (list_empty(list)) ? NULL : \
35 list_entry((list)->next, type, member)
36
37/* per-port data that is allocated in the FC transport for us */
38struct lpfc_rport_data {
39 struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
40};
41
42struct fcp_rsp {
43 uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
44 uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
45
46 uint8_t rspStatus0; /* FCP_STATUS byte 0 (reserved) */
47 uint8_t rspStatus1; /* FCP_STATUS byte 1 (reserved) */
48 uint8_t rspStatus2; /* FCP_STATUS byte 2 field validity */
49#define RSP_LEN_VALID 0x01 /* bit 0 */
50#define SNS_LEN_VALID 0x02 /* bit 1 */
51#define RESID_OVER 0x04 /* bit 2 */
52#define RESID_UNDER 0x08 /* bit 3 */
53 uint8_t rspStatus3; /* FCP_STATUS byte 3 SCSI status byte */
54
55 uint32_t rspResId; /* Residual xfer if residual count field set in
56 fcpStatus2 */
57 /* Received in Big Endian format */
58 uint32_t rspSnsLen; /* Length of sense data in fcpSnsInfo */
59 /* Received in Big Endian format */
60 uint32_t rspRspLen; /* Length of FCP response data in fcpRspInfo */
61 /* Received in Big Endian format */
62
63 uint8_t rspInfo0; /* FCP_RSP_INFO byte 0 (reserved) */
64 uint8_t rspInfo1; /* FCP_RSP_INFO byte 1 (reserved) */
65 uint8_t rspInfo2; /* FCP_RSP_INFO byte 2 (reserved) */
66 uint8_t rspInfo3; /* FCP_RSP_INFO RSP_CODE byte 3 */
67
68#define RSP_NO_FAILURE 0x00
69#define RSP_DATA_BURST_ERR 0x01
70#define RSP_CMD_FIELD_ERR 0x02
71#define RSP_RO_MISMATCH_ERR 0x03
72#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */
73#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */
74
75 uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */
76
77 uint8_t rspSnsInfo[128];
78#define SNS_ILLEGAL_REQ 0x05 /* sense key is byte 3 ([2]) */
79#define SNSCOD_BADCMD 0x20 /* sense code is byte 13 ([12]) */
80};
81
82struct fcp_cmnd {
83 uint32_t fcpLunMsl; /* most significant lun word (32 bits) */
84 uint32_t fcpLunLsl; /* least significant lun word (32 bits) */
85 /* # of bits to shift lun id to end up in right
86 * payload word, little endian = 8, big = 16.
87 */
88#if __BIG_ENDIAN
89#define FC_LUN_SHIFT 16
90#define FC_ADDR_MODE_SHIFT 24
91#else /* __LITTLE_ENDIAN */
92#define FC_LUN_SHIFT 8
93#define FC_ADDR_MODE_SHIFT 0
94#endif
95
96 uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
97 uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
98#define SIMPLE_Q 0x00
99#define HEAD_OF_Q 0x01
100#define ORDERED_Q 0x02
101#define ACA_Q 0x04
102#define UNTAGGED 0x05
103 uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */
104#define FCP_ABORT_TASK_SET 0x02 /* Bit 1 */
105#define FCP_CLEAR_TASK_SET 0x04 /* bit 2 */
106#define FCP_BUS_RESET 0x08 /* bit 3 */
107#define FCP_LUN_RESET 0x10 /* bit 4 */
108#define FCP_TARGET_RESET 0x20 /* bit 5 */
109#define FCP_CLEAR_ACA 0x40 /* bit 6 */
110#define FCP_TERMINATE_TASK 0x80 /* bit 7 */
111 uint8_t fcpCntl3;
112#define WRITE_DATA 0x01 /* Bit 0 */
113#define READ_DATA 0x02 /* Bit 1 */
114
115 uint8_t fcpCdb[16]; /* SRB cdb field is copied here */
116 uint32_t fcpDl; /* Total transfer length */
117
118};
119
120struct lpfc_scsi_buf {
121 struct list_head list;
122 struct scsi_cmnd *pCmd;
123 struct lpfc_hba *scsi_hba;
124 struct lpfc_rport_data *rdata;
125
126 uint32_t timeout;
127
128 uint16_t status; /* From IOCB Word 7- ulpStatus */
129 uint32_t result; /* From IOCB Word 4. */
130
131 uint32_t seg_cnt; /* Number of scatter-gather segments returned by
132 * dma_map_sg. The driver needs this for calls
133 * to dma_unmap_sg. */
134 dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
135
136 /*
137 * data and dma_handle are the kernel virutal and bus address of the
138 * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
139 * gather bde list that supports the sg_tablesize value.
140 */
141 void *data;
142 dma_addr_t dma_handle;
143
144 struct fcp_cmnd *fcp_cmnd;
145 struct fcp_rsp *fcp_rsp;
146 struct ulp_bde64 *fcp_bpl;
147
148 /* cur_iocbq has phys of the dma-able buffer.
149 * Iotag is in here
150 */
151 struct lpfc_iocbq cur_iocbq;
152};
153
154#define LPFC_SCSI_DMA_EXT_SIZE 264
155#define LPFC_BPL_SIZE 1024
156
157#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
new file mode 100644
index 000000000000..8d14b28c80b9
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -0,0 +1,2885 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_sli.c 1.232 2005/04/13 11:59:16EDT sf_support Exp $
23 */
24
25#include <linux/blkdev.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h>
33
34#include "lpfc_hw.h"
35#include "lpfc_sli.h"
36#include "lpfc_disc.h"
37#include "lpfc_scsi.h"
38#include "lpfc.h"
39#include "lpfc_crtn.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_compat.h"
42
43/*
44 * Define macro to log: Mailbox command x%x cannot issue Data
45 * This allows multiple uses of lpfc_msgBlk0311
46 * w/o perturbing log msg utility.
47 */
48#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
49 lpfc_printf_log(phba, \
50 KERN_INFO, \
51 LOG_MBOX | LOG_SLI, \
52 "%d:0311 Mailbox command x%x cannot issue " \
53 "Data: x%x x%x x%x\n", \
54 phba->brd_no, \
55 mb->mbxCommand, \
56 phba->hba_state, \
57 psli->sli_flag, \
58 flag);
59
60
61/* There are only four IOCB completion types. */
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
69/*
70 * Translate the iocb command to an iocb command type used to decide the final
71 * disposition of each completed IOCB.
72 */
73static lpfc_iocb_type
74lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
75{
76 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
77
78 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
79 return 0;
80
81 switch (iocb_cmnd) {
82 case CMD_XMIT_SEQUENCE_CR:
83 case CMD_XMIT_SEQUENCE_CX:
84 case CMD_XMIT_BCAST_CN:
85 case CMD_XMIT_BCAST_CX:
86 case CMD_ELS_REQUEST_CR:
87 case CMD_ELS_REQUEST_CX:
88 case CMD_CREATE_XRI_CR:
89 case CMD_CREATE_XRI_CX:
90 case CMD_GET_RPI_CN:
91 case CMD_XMIT_ELS_RSP_CX:
92 case CMD_GET_RPI_CR:
93 case CMD_FCP_IWRITE_CR:
94 case CMD_FCP_IWRITE_CX:
95 case CMD_FCP_IREAD_CR:
96 case CMD_FCP_IREAD_CX:
97 case CMD_FCP_ICMND_CR:
98 case CMD_FCP_ICMND_CX:
99 case CMD_ADAPTER_MSG:
100 case CMD_ADAPTER_DUMP:
101 case CMD_XMIT_SEQUENCE64_CR:
102 case CMD_XMIT_SEQUENCE64_CX:
103 case CMD_XMIT_BCAST64_CN:
104 case CMD_XMIT_BCAST64_CX:
105 case CMD_ELS_REQUEST64_CR:
106 case CMD_ELS_REQUEST64_CX:
107 case CMD_FCP_IWRITE64_CR:
108 case CMD_FCP_IWRITE64_CX:
109 case CMD_FCP_IREAD64_CR:
110 case CMD_FCP_IREAD64_CX:
111 case CMD_FCP_ICMND64_CR:
112 case CMD_FCP_ICMND64_CX:
113 case CMD_GEN_REQUEST64_CR:
114 case CMD_GEN_REQUEST64_CX:
115 case CMD_XMIT_ELS_RSP64_CX:
116 type = LPFC_SOL_IOCB;
117 break;
118 case CMD_ABORT_XRI_CN:
119 case CMD_ABORT_XRI_CX:
120 case CMD_CLOSE_XRI_CN:
121 case CMD_CLOSE_XRI_CX:
122 case CMD_XRI_ABORTED_CX:
123 case CMD_ABORT_MXRI64_CN:
124 type = LPFC_ABORT_IOCB;
125 break;
126 case CMD_RCV_SEQUENCE_CX:
127 case CMD_RCV_ELS_REQ_CX:
128 case CMD_RCV_SEQUENCE64_CX:
129 case CMD_RCV_ELS_REQ64_CX:
130 type = LPFC_UNSOL_IOCB;
131 break;
132 default:
133 type = LPFC_UNKNOWN_IOCB;
134 break;
135 }
136
137 return type;
138}
139
140static int
141lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
142{
143 struct lpfc_sli *psli = &phba->sli;
144 MAILBOX_t *pmbox = &pmb->mb;
145 int i, rc;
146
147 for (i = 0; i < psli->num_rings; i++) {
148 phba->hba_state = LPFC_INIT_MBX_CMDS;
149 lpfc_config_ring(phba, i, pmb);
150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151 if (rc != MBX_SUCCESS) {
152 lpfc_printf_log(phba,
153 KERN_ERR,
154 LOG_INIT,
155 "%d:0446 Adapter failed to init, "
156 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
157 "ring %d\n",
158 phba->brd_no,
159 pmbox->mbxCommand,
160 pmbox->mbxStatus,
161 i);
162 phba->hba_state = LPFC_HBA_ERROR;
163 return -ENXIO;
164 }
165 }
166 return 0;
167}
168
169static int
170lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
171 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
172{
173 uint16_t iotag;
174
175 list_add_tail(&piocb->list, &pring->txcmplq);
176 pring->txcmplq_cnt++;
177 if (unlikely(pring->ringno == LPFC_ELS_RING))
178 mod_timer(&phba->els_tmofunc,
179 jiffies + HZ * (phba->fc_ratov << 1));
180
181 if (pring->fast_lookup) {
182 /* Setup fast lookup based on iotag for completion */
183 iotag = piocb->iocb.ulpIoTag;
184 if (iotag && (iotag < pring->fast_iotag))
185 *(pring->fast_lookup + iotag) = piocb;
186 else {
187
188 /* Cmd ring <ringno> put: iotag <iotag> greater then
189 configured max <fast_iotag> wd0 <icmd> */
190 lpfc_printf_log(phba,
191 KERN_ERR,
192 LOG_SLI,
193 "%d:0316 Cmd ring %d put: iotag x%x "
194 "greater then configured max x%x "
195 "wd0 x%x\n",
196 phba->brd_no,
197 pring->ringno, iotag,
198 pring->fast_iotag,
199 *(((uint32_t *)(&piocb->iocb)) + 7));
200 }
201 }
202 return (0);
203}
204
205static struct lpfc_iocbq *
206lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
207{
208 struct list_head *dlp;
209 struct lpfc_iocbq *cmd_iocb;
210
211 dlp = &pring->txq;
212 cmd_iocb = NULL;
213 list_remove_head((&pring->txq), cmd_iocb,
214 struct lpfc_iocbq,
215 list);
216 if (cmd_iocb) {
217 /* If the first ptr is not equal to the list header,
218 * deque the IOCBQ_t and return it.
219 */
220 pring->txq_cnt--;
221 }
222 return (cmd_iocb);
223}
224
225static IOCB_t *
226lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
227{
228 MAILBOX_t *mbox = (MAILBOX_t *)phba->sli.MBhostaddr;
229 PGP *pgp = (PGP *)&mbox->us.s2.port[pring->ringno];
230 uint32_t max_cmd_idx = pring->numCiocb;
231 IOCB_t *iocb = NULL;
232
233 if ((pring->next_cmdidx == pring->cmdidx) &&
234 (++pring->next_cmdidx >= max_cmd_idx))
235 pring->next_cmdidx = 0;
236
237 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
238
239 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
240
241 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
242 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
243 "%d:0315 Ring %d issue: portCmdGet %d "
244 "is bigger then cmd ring %d\n",
245 phba->brd_no, pring->ringno,
246 pring->local_getidx, max_cmd_idx);
247
248 phba->hba_state = LPFC_HBA_ERROR;
249 /*
250 * All error attention handlers are posted to
251 * worker thread
252 */
253 phba->work_ha |= HA_ERATT;
254 phba->work_hs = HS_FFER3;
255 if (phba->work_wait)
256 wake_up(phba->work_wait);
257
258 return NULL;
259 }
260
261 if (pring->local_getidx == pring->next_cmdidx)
262 return NULL;
263 }
264
265 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
266
267 return iocb;
268}
269
270static uint32_t
271lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
272{
273 uint32_t search_start;
274
275 if (pring->fast_lookup == NULL) {
276 pring->iotag_ctr++;
277 if (pring->iotag_ctr >= pring->iotag_max)
278 pring->iotag_ctr = 1;
279 return pring->iotag_ctr;
280 }
281
282 search_start = pring->iotag_ctr;
283
284 do {
285 pring->iotag_ctr++;
286 if (pring->iotag_ctr >= pring->fast_iotag)
287 pring->iotag_ctr = 1;
288
289 if (*(pring->fast_lookup + pring->iotag_ctr) == NULL)
290 return pring->iotag_ctr;
291
292 } while (pring->iotag_ctr != search_start);
293
294 /*
295 * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
296 */
297 lpfc_printf_log(phba,
298 KERN_ERR,
299 LOG_SLI,
300 "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
301 phba->brd_no,
302 pring->ringno,
303 pring->fast_iotag);
304 return (0);
305}
306
307static void
308lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
309 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
310{
311 /*
312 * Allocate and set up an iotag
313 */
314 nextiocb->iocb.ulpIoTag =
315 lpfc_sli_next_iotag(phba, &phba->sli.ring[phba->sli.fcp_ring]);
316
317 /*
318 * Issue iocb command to adapter
319 */
320 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
321 wmb();
322 pring->stats.iocb_cmd++;
323
324 /*
325 * If there is no completion routine to call, we can release the
326 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
327 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
328 */
329 if (nextiocb->iocb_cmpl)
330 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
331 else {
332 list_add_tail(&nextiocb->list, &phba->lpfc_iocb_list);
333 }
334
335 /*
336 * Let the HBA know what IOCB slot will be the next one the
337 * driver will put a command into.
338 */
339 pring->cmdidx = pring->next_cmdidx;
340 writeb(pring->cmdidx, phba->MBslimaddr
341 + (SLIMOFF + (pring->ringno * 2)) * 4);
342}
343
344static void
345lpfc_sli_update_full_ring(struct lpfc_hba * phba,
346 struct lpfc_sli_ring *pring)
347{
348 int ringno = pring->ringno;
349
350 pring->flag |= LPFC_CALL_RING_AVAILABLE;
351
352 wmb();
353
354 /*
355 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
356 * The HBA will tell us when an IOCB entry is available.
357 */
358 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
359 readl(phba->CAregaddr); /* flush */
360
361 pring->stats.iocb_cmd_full++;
362}
363
364static void
365lpfc_sli_update_ring(struct lpfc_hba * phba,
366 struct lpfc_sli_ring *pring)
367{
368 int ringno = pring->ringno;
369
370 /*
371 * Tell the HBA that there is work to do in this ring.
372 */
373 wmb();
374 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
375 readl(phba->CAregaddr); /* flush */
376}
377
378static void
379lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
380{
381 IOCB_t *iocb;
382 struct lpfc_iocbq *nextiocb;
383
384 /*
385 * Check to see if:
386 * (a) there is anything on the txq to send
387 * (b) link is up
388 * (c) link attention events can be processed (fcp ring only)
389 * (d) IOCB processing is not blocked by the outstanding mbox command.
390 */
391 if (pring->txq_cnt &&
392 (phba->hba_state > LPFC_LINK_DOWN) &&
393 (pring->ringno != phba->sli.fcp_ring ||
394 phba->sli.sli_flag & LPFC_PROCESS_LA) &&
395 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
396
397 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
398 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
399 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
400
401 if (iocb)
402 lpfc_sli_update_ring(phba, pring);
403 else
404 lpfc_sli_update_full_ring(phba, pring);
405 }
406
407 return;
408}
409
410/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
411static void
412lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
413{
414 PGP *pgp =
415 ((PGP *) &
416 (((MAILBOX_t *)phba->sli.MBhostaddr)->us.s2.port[ringno]));
417
418 /* If the ring is active, flag it */
419 if (phba->sli.ring[ringno].cmdringaddr) {
420 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
421 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
422 /*
423 * Force update of the local copy of cmdGetInx
424 */
425 phba->sli.ring[ringno].local_getidx
426 = le32_to_cpu(pgp->cmdGetInx);
427 spin_lock_irq(phba->host->host_lock);
428 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
429 spin_unlock_irq(phba->host->host_lock);
430 }
431 }
432}
433
434static int
435lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
436{
437 uint8_t ret;
438
439 switch (mbxCommand) {
440 case MBX_LOAD_SM:
441 case MBX_READ_NV:
442 case MBX_WRITE_NV:
443 case MBX_RUN_BIU_DIAG:
444 case MBX_INIT_LINK:
445 case MBX_DOWN_LINK:
446 case MBX_CONFIG_LINK:
447 case MBX_CONFIG_RING:
448 case MBX_RESET_RING:
449 case MBX_READ_CONFIG:
450 case MBX_READ_RCONFIG:
451 case MBX_READ_SPARM:
452 case MBX_READ_STATUS:
453 case MBX_READ_RPI:
454 case MBX_READ_XRI:
455 case MBX_READ_REV:
456 case MBX_READ_LNK_STAT:
457 case MBX_REG_LOGIN:
458 case MBX_UNREG_LOGIN:
459 case MBX_READ_LA:
460 case MBX_CLEAR_LA:
461 case MBX_DUMP_MEMORY:
462 case MBX_DUMP_CONTEXT:
463 case MBX_RUN_DIAGS:
464 case MBX_RESTART:
465 case MBX_UPDATE_CFG:
466 case MBX_DOWN_LOAD:
467 case MBX_DEL_LD_ENTRY:
468 case MBX_RUN_PROGRAM:
469 case MBX_SET_MASK:
470 case MBX_SET_SLIM:
471 case MBX_UNREG_D_ID:
472 case MBX_CONFIG_FARP:
473 case MBX_LOAD_AREA:
474 case MBX_RUN_BIU_DIAG64:
475 case MBX_CONFIG_PORT:
476 case MBX_READ_SPARM64:
477 case MBX_READ_RPI64:
478 case MBX_REG_LOGIN64:
479 case MBX_READ_LA64:
480 case MBX_FLASH_WR_ULA:
481 case MBX_SET_DEBUG:
482 case MBX_LOAD_EXP_ROM:
483 ret = mbxCommand;
484 break;
485 default:
486 ret = MBX_SHUTDOWN;
487 break;
488 }
489 return (ret);
490}
491static void
492lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
493{
494 wait_queue_head_t *pdone_q;
495
496 /*
497 * If pdone_q is empty, the driver thread gave up waiting and
498 * continued running.
499 */
500 pdone_q = (wait_queue_head_t *) pmboxq->context1;
501 if (pdone_q)
502 wake_up_interruptible(pdone_q);
503 return;
504}
505
506void
507lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
508{
509 struct lpfc_dmabuf *mp;
510 mp = (struct lpfc_dmabuf *) (pmb->context1);
511 if (mp) {
512 lpfc_mbuf_free(phba, mp->virt, mp->phys);
513 kfree(mp);
514 }
515 mempool_free( pmb, phba->mbox_mem_pool);
516 return;
517}
518
519int
520lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
521{
522 MAILBOX_t *mbox;
523 MAILBOX_t *pmbox;
524 LPFC_MBOXQ_t *pmb;
525 struct lpfc_sli *psli;
526 int i, rc;
527 uint32_t process_next;
528
529 psli = &phba->sli;
530 /* We should only get here if we are in SLI2 mode */
531 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
532 return (1);
533 }
534
535 phba->sli.slistat.mbox_event++;
536
537 /* Get a Mailbox buffer to setup mailbox commands for callback */
538 if ((pmb = phba->sli.mbox_active)) {
539 pmbox = &pmb->mb;
540 mbox = (MAILBOX_t *) phba->sli.MBhostaddr;
541
542 /* First check out the status word */
543 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
544
545 /* Sanity check to ensure the host owns the mailbox */
546 if (pmbox->mbxOwner != OWN_HOST) {
547 /* Lets try for a while */
548 for (i = 0; i < 10240; i++) {
549 /* First copy command data */
550 lpfc_sli_pcimem_bcopy(mbox, pmbox,
551 sizeof (uint32_t));
552 if (pmbox->mbxOwner == OWN_HOST)
553 goto mbout;
554 }
555 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
556 <status> */
557 lpfc_printf_log(phba,
558 KERN_ERR,
559 LOG_MBOX | LOG_SLI,
560 "%d:0304 Stray Mailbox Interrupt "
561 "mbxCommand x%x mbxStatus x%x\n",
562 phba->brd_no,
563 pmbox->mbxCommand,
564 pmbox->mbxStatus);
565
566 spin_lock_irq(phba->host->host_lock);
567 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
568 spin_unlock_irq(phba->host->host_lock);
569 return (1);
570 }
571
572 mbout:
573 del_timer_sync(&phba->sli.mbox_tmo);
574 phba->work_hba_events &= ~WORKER_MBOX_TMO;
575
576 /*
577 * It is a fatal error if unknown mbox command completion.
578 */
579 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
580 MBX_SHUTDOWN) {
581
582 /* Unknow mailbox command compl */
583 lpfc_printf_log(phba,
584 KERN_ERR,
585 LOG_MBOX | LOG_SLI,
586 "%d:0323 Unknown Mailbox command %x Cmpl\n",
587 phba->brd_no,
588 pmbox->mbxCommand);
589 phba->hba_state = LPFC_HBA_ERROR;
590 phba->work_hs = HS_FFER3;
591 lpfc_handle_eratt(phba);
592 return (0);
593 }
594
595 phba->sli.mbox_active = NULL;
596 if (pmbox->mbxStatus) {
597 phba->sli.slistat.mbox_stat_err++;
598 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
599 /* Mbox cmd cmpl error - RETRYing */
600 lpfc_printf_log(phba,
601 KERN_INFO,
602 LOG_MBOX | LOG_SLI,
603 "%d:0305 Mbox cmd cmpl error - "
604 "RETRYing Data: x%x x%x x%x x%x\n",
605 phba->brd_no,
606 pmbox->mbxCommand,
607 pmbox->mbxStatus,
608 pmbox->un.varWords[0],
609 phba->hba_state);
610 pmbox->mbxStatus = 0;
611 pmbox->mbxOwner = OWN_HOST;
612 spin_lock_irq(phba->host->host_lock);
613 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
614 spin_unlock_irq(phba->host->host_lock);
615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
616 if (rc == MBX_SUCCESS)
617 return (0);
618 }
619 }
620
621 /* Mailbox cmd <cmd> Cmpl <cmpl> */
622 lpfc_printf_log(phba,
623 KERN_INFO,
624 LOG_MBOX | LOG_SLI,
625 "%d:0307 Mailbox cmd x%x Cmpl x%p "
626 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
627 phba->brd_no,
628 pmbox->mbxCommand,
629 pmb->mbox_cmpl,
630 *((uint32_t *) pmbox),
631 pmbox->un.varWords[0],
632 pmbox->un.varWords[1],
633 pmbox->un.varWords[2],
634 pmbox->un.varWords[3],
635 pmbox->un.varWords[4],
636 pmbox->un.varWords[5],
637 pmbox->un.varWords[6],
638 pmbox->un.varWords[7]);
639
640 if (pmb->mbox_cmpl) {
641 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
642 pmb->mbox_cmpl(phba,pmb);
643 }
644 }
645
646
647 do {
648 process_next = 0; /* by default don't loop */
649 spin_lock_irq(phba->host->host_lock);
650 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
651
652 /* Process next mailbox command if there is one */
653 if ((pmb = lpfc_mbox_get(phba))) {
654 spin_unlock_irq(phba->host->host_lock);
655 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
656 if (rc == MBX_NOT_FINISHED) {
657 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
658 pmb->mbox_cmpl(phba,pmb);
659 process_next = 1;
660 continue; /* loop back */
661 }
662 } else {
663 spin_unlock_irq(phba->host->host_lock);
664 /* Turn on IOCB processing */
665 for (i = 0; i < phba->sli.num_rings; i++) {
666 lpfc_sli_turn_on_ring(phba, i);
667 }
668
669 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
670 while (!list_empty(&phba->freebufList)) {
671 struct lpfc_dmabuf *mp;
672
673 mp = NULL;
674 list_remove_head((&phba->freebufList),
675 mp,
676 struct lpfc_dmabuf,
677 list);
678 if (mp) {
679 lpfc_mbuf_free(phba, mp->virt,
680 mp->phys);
681 kfree(mp);
682 }
683 }
684 }
685
686 } while (process_next);
687
688 return (0);
689}
690static int
691lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
692 struct lpfc_iocbq *saveq)
693{
694 IOCB_t * irsp;
695 WORD5 * w5p;
696 uint32_t Rctl, Type;
697 uint32_t match, i;
698
699 match = 0;
700 irsp = &(saveq->iocb);
701 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
702 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
703 Rctl = FC_ELS_REQ;
704 Type = FC_ELS_DATA;
705 } else {
706 w5p =
707 (WORD5 *) & (saveq->iocb.un.
708 ulpWord[5]);
709 Rctl = w5p->hcsw.Rctl;
710 Type = w5p->hcsw.Type;
711
712 /* Firmware Workaround */
713 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
714 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
715 Rctl = FC_ELS_REQ;
716 Type = FC_ELS_DATA;
717 w5p->hcsw.Rctl = Rctl;
718 w5p->hcsw.Type = Type;
719 }
720 }
721 /* unSolicited Responses */
722 if (pring->prt[0].profile) {
723 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, saveq);
724 match = 1;
725 } else {
726 /* We must search, based on rctl / type
727 for the right routine */
728 for (i = 0; i < pring->num_mask;
729 i++) {
730 if ((pring->prt[i].rctl ==
731 Rctl)
732 && (pring->prt[i].
733 type == Type)) {
734 (pring->prt[i].lpfc_sli_rcv_unsol_event)
735 (phba, pring, saveq);
736 match = 1;
737 break;
738 }
739 }
740 }
741 if (match == 0) {
742 /* Unexpected Rctl / Type received */
743 /* Ring <ringno> handler: unexpected
744 Rctl <Rctl> Type <Type> received */
745 lpfc_printf_log(phba,
746 KERN_WARNING,
747 LOG_SLI,
748 "%d:0313 Ring %d handler: unexpected Rctl x%x "
749 "Type x%x received \n",
750 phba->brd_no,
751 pring->ringno,
752 Rctl,
753 Type);
754 }
755 return(1);
756}
757
758static struct lpfc_iocbq *
759lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring * pring,
760 struct lpfc_iocbq * prspiocb)
761{
762 IOCB_t *icmd = NULL;
763 IOCB_t *irsp = NULL;
764 struct lpfc_iocbq *cmd_iocb;
765 struct lpfc_iocbq *iocb, *next_iocb;
766 uint16_t iotag;
767
768 irsp = &prspiocb->iocb;
769 iotag = irsp->ulpIoTag;
770 cmd_iocb = NULL;
771
772 /* Search through txcmpl from the begining */
773 list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
774 icmd = &iocb->iocb;
775 if (iotag == icmd->ulpIoTag) {
776 /* Found a match. */
777 cmd_iocb = iocb;
778 list_del(&iocb->list);
779 pring->txcmplq_cnt--;
780 break;
781 }
782 }
783
784 return (cmd_iocb);
785}
786
787static struct lpfc_iocbq *
788lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba * phba,
789 struct lpfc_sli_ring * pring,
790 struct lpfc_iocbq * prspiocb)
791{
792 IOCB_t *irsp = NULL;
793 struct lpfc_iocbq *cmd_iocb = NULL;
794 uint16_t iotag;
795
796 if (unlikely(pring->fast_lookup == NULL))
797 return NULL;
798
799 /* Use fast lookup based on iotag for completion */
800 irsp = &prspiocb->iocb;
801 iotag = irsp->ulpIoTag;
802 if (iotag < pring->fast_iotag) {
803 cmd_iocb = *(pring->fast_lookup + iotag);
804 *(pring->fast_lookup + iotag) = NULL;
805 if (cmd_iocb) {
806 list_del(&cmd_iocb->list);
807 pring->txcmplq_cnt--;
808 return cmd_iocb;
809 } else {
810 /*
811 * This is clearly an error. A ring that uses iotags
812 * should never have a interrupt for a completion that
813 * is not on the ring. Return NULL and log a error.
814 */
815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
816 "%d:0327 Rsp ring %d error - command "
817 "completion for iotag x%x not found\n",
818 phba->brd_no, pring->ringno, iotag);
819 return NULL;
820 }
821 }
822
823 /*
824 * Rsp ring <ringno> get: iotag <iotag> greater then
825 * configured max <fast_iotag> wd0 <irsp>. This is an
826 * error. Just return NULL.
827 */
828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
829 "%d:0317 Rsp ring %d get: iotag x%x greater then "
830 "configured max x%x wd0 x%x\n",
831 phba->brd_no, pring->ringno, iotag, pring->fast_iotag,
832 *(((uint32_t *) irsp) + 7));
833 return NULL;
834}
835
836static int
837lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
838 struct lpfc_iocbq *saveq)
839{
840 struct lpfc_iocbq * cmdiocbp;
841 int rc = 1;
842 unsigned long iflag;
843
844 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
845 spin_lock_irqsave(phba->host->host_lock, iflag);
846 cmdiocbp = lpfc_sli_txcmpl_ring_search_slow(pring, saveq);
847 if (cmdiocbp) {
848 if (cmdiocbp->iocb_cmpl) {
849 /*
850 * Post all ELS completions to the worker thread.
851 * All other are passed to the completion callback.
852 */
853 if (pring->ringno == LPFC_ELS_RING) {
854 spin_unlock_irqrestore(phba->host->host_lock,
855 iflag);
856 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
857 spin_lock_irqsave(phba->host->host_lock, iflag);
858 }
859 else {
860 if (cmdiocbp->iocb_flag & LPFC_IO_POLL)
861 rc = 0;
862
863 spin_unlock_irqrestore(phba->host->host_lock,
864 iflag);
865 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
866 spin_lock_irqsave(phba->host->host_lock, iflag);
867 }
868 } else {
869 list_add_tail(&cmdiocbp->list, &phba->lpfc_iocb_list);
870 }
871 } else {
872 /*
873 * Unknown initiating command based on the response iotag.
874 * This could be the case on the ELS ring because of
875 * lpfc_els_abort().
876 */
877 if (pring->ringno != LPFC_ELS_RING) {
878 /*
879 * Ring <ringno> handler: unexpected completion IoTag
880 * <IoTag>
881 */
882 lpfc_printf_log(phba,
883 KERN_WARNING,
884 LOG_SLI,
885 "%d:0322 Ring %d handler: unexpected "
886 "completion IoTag x%x Data: x%x x%x x%x x%x\n",
887 phba->brd_no,
888 pring->ringno,
889 saveq->iocb.ulpIoTag,
890 saveq->iocb.ulpStatus,
891 saveq->iocb.un.ulpWord[4],
892 saveq->iocb.ulpCommand,
893 saveq->iocb.ulpContext);
894 }
895 }
896 spin_unlock_irqrestore(phba->host->host_lock, iflag);
897 return rc;
898}
899
900/*
901 * This routine presumes LPFC_FCP_RING handling and doesn't bother
902 * to check it explicitly.
903 */
904static int
905lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
906 struct lpfc_sli_ring * pring, uint32_t mask)
907{
908 IOCB_t *irsp = NULL;
909 struct lpfc_iocbq *cmdiocbq = NULL;
910 struct lpfc_iocbq rspiocbq;
911 PGP *pgp;
912 uint32_t status;
913 uint32_t portRspPut, portRspMax;
914 int rc = 1;
915 lpfc_iocb_type type;
916 unsigned long iflag;
917 uint32_t rsp_cmpl = 0;
918 void __iomem *to_slim;
919
920 spin_lock_irqsave(phba->host->host_lock, iflag);
921 pring->stats.iocb_event++;
922
923 /* The driver assumes SLI-2 mode */
924 pgp = (PGP *) &((MAILBOX_t *) phba->sli.MBhostaddr)
925 ->us.s2.port[pring->ringno];
926
927 /*
928 * The next available response entry should never exceed the maximum
929 * entries. If it does, treat it as an adapter hardware error.
930 */
931 portRspMax = pring->numRiocb;
932 portRspPut = le32_to_cpu(pgp->rspPutInx);
933 if (unlikely(portRspPut >= portRspMax)) {
934 /*
935 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
936 * rsp ring <portRspMax>
937 */
938 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
939 "%d:0312 Ring %d handler: portRspPut %d "
940 "is bigger then rsp ring %d\n",
941 phba->brd_no, pring->ringno, portRspPut,
942 portRspMax);
943
944 phba->hba_state = LPFC_HBA_ERROR;
945
946 /* All error attention handlers are posted to worker thread */
947 phba->work_ha |= HA_ERATT;
948 phba->work_hs = HS_FFER3;
949 if (phba->work_wait)
950 wake_up(phba->work_wait);
951
952 spin_unlock_irqrestore(phba->host->host_lock, iflag);
953 return 1;
954 }
955
956 rmb();
957 while (pring->rspidx != portRspPut) {
958 irsp = (IOCB_t *) IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
959 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
960 pring->stats.iocb_rsp++;
961 rsp_cmpl++;
962
963 if (unlikely(irsp->ulpStatus)) {
964 /* Rsp ring <ringno> error: IOCB */
965 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
966 "%d:0326 Rsp Ring %d error: IOCB Data: "
967 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
968 phba->brd_no, pring->ringno,
969 irsp->un.ulpWord[0], irsp->un.ulpWord[1],
970 irsp->un.ulpWord[2], irsp->un.ulpWord[3],
971 irsp->un.ulpWord[4], irsp->un.ulpWord[5],
972 *(((uint32_t *) irsp) + 6),
973 *(((uint32_t *) irsp) + 7));
974 }
975
976 switch (type) {
977 case LPFC_ABORT_IOCB:
978 case LPFC_SOL_IOCB:
979 /*
980 * Idle exchange closed via ABTS from port. No iocb
981 * resources need to be recovered.
982 */
983 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
984 printk(KERN_INFO "%s: IOCB cmd 0x%x processed. "
985 "Skipping completion\n", __FUNCTION__,
986 irsp->ulpCommand);
987 break;
988 }
989
990 rspiocbq.iocb.un.ulpWord[4] = irsp->un.ulpWord[4];
991 rspiocbq.iocb.ulpStatus = irsp->ulpStatus;
992 rspiocbq.iocb.ulpContext = irsp->ulpContext;
993 rspiocbq.iocb.ulpIoTag = irsp->ulpIoTag;
994 cmdiocbq = lpfc_sli_txcmpl_ring_iotag_lookup(phba,
995 pring,
996 &rspiocbq);
997 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
998 spin_unlock_irqrestore(
999 phba->host->host_lock, iflag);
1000 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1001 &rspiocbq);
1002 spin_lock_irqsave(phba->host->host_lock,
1003 iflag);
1004 }
1005 break;
1006 default:
1007 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1008 char adaptermsg[LPFC_MAX_ADPTMSG];
1009 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1010 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1011 MAX_MSG_DATA);
1012 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1013 phba->brd_no, adaptermsg);
1014 } else {
1015 /* Unknown IOCB command */
1016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1017 "%d:0321 Unknown IOCB command "
1018 "Data: x%x, x%x x%x x%x x%x\n",
1019 phba->brd_no, type, irsp->ulpCommand,
1020 irsp->ulpStatus, irsp->ulpIoTag,
1021 irsp->ulpContext);
1022 }
1023 break;
1024 }
1025
1026 /*
1027 * The response IOCB has been processed. Update the ring
1028 * pointer in SLIM. If the port response put pointer has not
1029 * been updated, sync the pgp->rspPutInx and fetch the new port
1030 * response put pointer.
1031 */
1032 if (++pring->rspidx >= portRspMax)
1033 pring->rspidx = 0;
1034
1035 to_slim = phba->MBslimaddr +
1036 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1037 writeb(pring->rspidx, to_slim);
1038
1039 if (pring->rspidx == portRspPut)
1040 portRspPut = le32_to_cpu(pgp->rspPutInx);
1041 }
1042
1043 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1044 pring->stats.iocb_rsp_full++;
1045 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1046 writel(status, phba->CAregaddr);
1047 readl(phba->CAregaddr);
1048 }
1049 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1050 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1051 pring->stats.iocb_cmd_empty++;
1052
1053 /* Force update of the local copy of cmdGetInx */
1054 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1055 lpfc_sli_resume_iocb(phba, pring);
1056
1057 if ((pring->lpfc_sli_cmd_available))
1058 (pring->lpfc_sli_cmd_available) (phba, pring);
1059
1060 }
1061
1062 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1063 return rc;
1064}
1065
1066
1067int
1068lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1069 struct lpfc_sli_ring * pring, uint32_t mask)
1070{
1071 IOCB_t *entry;
1072 IOCB_t *irsp = NULL;
1073 struct lpfc_iocbq *rspiocbp = NULL;
1074 struct lpfc_iocbq *next_iocb;
1075 struct lpfc_iocbq *cmdiocbp;
1076 struct lpfc_iocbq *saveq;
1077 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
1078 HGP *hgp;
1079 PGP *pgp;
1080 MAILBOX_t *mbox;
1081 uint8_t iocb_cmd_type;
1082 lpfc_iocb_type type;
1083 uint32_t status, free_saveq;
1084 uint32_t portRspPut, portRspMax;
1085 int rc = 1;
1086 unsigned long iflag;
1087 void __iomem *to_slim;
1088
1089 spin_lock_irqsave(phba->host->host_lock, iflag);
1090 pring->stats.iocb_event++;
1091
1092 /* The driver assumes SLI-2 mode */
1093 mbox = (MAILBOX_t *) phba->sli.MBhostaddr;
1094 pgp = (PGP *) & mbox->us.s2.port[pring->ringno];
1095 hgp = (HGP *) & mbox->us.s2.host[pring->ringno];
1096
1097 /*
1098 * The next available response entry should never exceed the maximum
1099 * entries. If it does, treat it as an adapter hardware error.
1100 */
1101 portRspMax = pring->numRiocb;
1102 portRspPut = le32_to_cpu(pgp->rspPutInx);
1103 if (portRspPut >= portRspMax) {
1104 /*
1105 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1106 * rsp ring <portRspMax>
1107 */
1108 lpfc_printf_log(phba,
1109 KERN_ERR,
1110 LOG_SLI,
1111 "%d:0312 Ring %d handler: portRspPut %d "
1112 "is bigger then rsp ring %d\n",
1113 phba->brd_no,
1114 pring->ringno, portRspPut, portRspMax);
1115
1116 phba->hba_state = LPFC_HBA_ERROR;
1117 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1118
1119 phba->work_hs = HS_FFER3;
1120 lpfc_handle_eratt(phba);
1121
1122 return 1;
1123 }
1124
1125 rmb();
1126 lpfc_iocb_list = &phba->lpfc_iocb_list;
1127 while (pring->rspidx != portRspPut) {
1128 /*
1129 * Build a completion list and call the appropriate handler.
1130 * The process is to get the next available response iocb, get
1131 * a free iocb from the list, copy the response data into the
1132 * free iocb, insert to the continuation list, and update the
1133 * next response index to slim. This process makes response
1134 * iocb's in the ring available to DMA as fast as possible but
1135 * pays a penalty for a copy operation. Since the iocb is
1136 * only 32 bytes, this penalty is considered small relative to
1137 * the PCI reads for register values and a slim write. When
1138 * the ulpLe field is set, the entire Command has been
1139 * received.
1140 */
1141 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1142 list_remove_head(lpfc_iocb_list, rspiocbp, struct lpfc_iocbq,
1143 list);
1144 if (rspiocbp == NULL) {
1145 printk(KERN_ERR "%s: out of buffers! Failing "
1146 "completion.\n", __FUNCTION__);
1147 break;
1148 }
1149
1150 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
1151 irsp = &rspiocbp->iocb;
1152
1153 if (++pring->rspidx >= portRspMax)
1154 pring->rspidx = 0;
1155
1156 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
1157 + 1) * 4;
1158 writeb(pring->rspidx, to_slim);
1159
1160 if (list_empty(&(pring->iocb_continueq))) {
1161 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1162 } else {
1163 list_add_tail(&rspiocbp->list,
1164 &(pring->iocb_continueq));
1165 }
1166
1167 pring->iocb_continueq_cnt++;
1168 if (irsp->ulpLe) {
1169 /*
1170 * By default, the driver expects to free all resources
1171 * associated with this iocb completion.
1172 */
1173 free_saveq = 1;
1174 saveq = list_get_first(&pring->iocb_continueq,
1175 struct lpfc_iocbq, list);
1176 irsp = &(saveq->iocb);
1177 list_del_init(&pring->iocb_continueq);
1178 pring->iocb_continueq_cnt = 0;
1179
1180 pring->stats.iocb_rsp++;
1181
1182 if (irsp->ulpStatus) {
1183 /* Rsp ring <ringno> error: IOCB */
1184 lpfc_printf_log(phba,
1185 KERN_WARNING,
1186 LOG_SLI,
1187 "%d:0328 Rsp Ring %d error: IOCB Data: "
1188 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1189 phba->brd_no,
1190 pring->ringno,
1191 irsp->un.ulpWord[0],
1192 irsp->un.ulpWord[1],
1193 irsp->un.ulpWord[2],
1194 irsp->un.ulpWord[3],
1195 irsp->un.ulpWord[4],
1196 irsp->un.ulpWord[5],
1197 *(((uint32_t *) irsp) + 6),
1198 *(((uint32_t *) irsp) + 7));
1199 }
1200
1201 /*
1202 * Fetch the IOCB command type and call the correct
1203 * completion routine. Solicited and Unsolicited
1204 * IOCBs on the ELS ring get freed back to the
1205 * lpfc_iocb_list by the discovery kernel thread.
1206 */
1207 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1208 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1209 if (type == LPFC_SOL_IOCB) {
1210 spin_unlock_irqrestore(phba->host->host_lock,
1211 iflag);
1212 rc = lpfc_sli_process_sol_iocb(phba, pring,
1213 saveq);
1214 spin_lock_irqsave(phba->host->host_lock, iflag);
1215 } else if (type == LPFC_UNSOL_IOCB) {
1216 spin_unlock_irqrestore(phba->host->host_lock,
1217 iflag);
1218 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1219 saveq);
1220 spin_lock_irqsave(phba->host->host_lock, iflag);
1221 } else if (type == LPFC_ABORT_IOCB) {
1222 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1223 ((cmdiocbp =
1224 lpfc_sli_txcmpl_ring_search_slow(pring,
1225 saveq)))) {
1226 /* Call the specified completion
1227 routine */
1228 if (cmdiocbp->iocb_cmpl) {
1229 spin_unlock_irqrestore(
1230 phba->host->host_lock,
1231 iflag);
1232 (cmdiocbp->iocb_cmpl) (phba,
1233 cmdiocbp, saveq);
1234 spin_lock_irqsave(
1235 phba->host->host_lock,
1236 iflag);
1237 } else {
1238 list_add_tail(&cmdiocbp->list,
1239 lpfc_iocb_list);
1240 }
1241 }
1242 } else if (type == LPFC_UNKNOWN_IOCB) {
1243 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1244
1245 char adaptermsg[LPFC_MAX_ADPTMSG];
1246
1247 memset(adaptermsg, 0,
1248 LPFC_MAX_ADPTMSG);
1249 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1250 MAX_MSG_DATA);
1251 dev_warn(&((phba->pcidev)->dev),
1252 "lpfc%d: %s",
1253 phba->brd_no, adaptermsg);
1254 } else {
1255 /* Unknown IOCB command */
1256 lpfc_printf_log(phba,
1257 KERN_ERR,
1258 LOG_SLI,
1259 "%d:0321 Unknown IOCB command "
1260 "Data: x%x x%x x%x x%x\n",
1261 phba->brd_no,
1262 irsp->ulpCommand,
1263 irsp->ulpStatus,
1264 irsp->ulpIoTag,
1265 irsp->ulpContext);
1266 }
1267 }
1268
1269 if (free_saveq) {
1270 if (!list_empty(&saveq->list)) {
1271 list_for_each_entry_safe(rspiocbp,
1272 next_iocb,
1273 &saveq->list,
1274 list) {
1275 list_add_tail(&rspiocbp->list,
1276 lpfc_iocb_list);
1277 }
1278 }
1279
1280 list_add_tail(&saveq->list, lpfc_iocb_list);
1281 }
1282 }
1283
1284 /*
1285 * If the port response put pointer has not been updated, sync
1286 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1287 * response put pointer.
1288 */
1289 if (pring->rspidx == portRspPut) {
1290 portRspPut = le32_to_cpu(pgp->rspPutInx);
1291 }
1292 } /* while (pring->rspidx != portRspPut) */
1293
1294 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
1295 /* At least one response entry has been freed */
1296 pring->stats.iocb_rsp_full++;
1297 /* SET RxRE_RSP in Chip Att register */
1298 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1299 writel(status, phba->CAregaddr);
1300 readl(phba->CAregaddr); /* flush */
1301 }
1302 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1303 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1304 pring->stats.iocb_cmd_empty++;
1305
1306 /* Force update of the local copy of cmdGetInx */
1307 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1308 lpfc_sli_resume_iocb(phba, pring);
1309
1310 if ((pring->lpfc_sli_cmd_available))
1311 (pring->lpfc_sli_cmd_available) (phba, pring);
1312
1313 }
1314
1315 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1316 return rc;
1317}
1318
1319int
1320lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1321{
1322 struct lpfc_iocbq *iocb, *next_iocb;
1323 IOCB_t *icmd = NULL, *cmd = NULL;
1324 int errcnt;
1325 uint16_t iotag;
1326
1327 errcnt = 0;
1328
1329 /* Error everything on txq and txcmplq
1330 * First do the txq.
1331 */
1332 spin_lock_irq(phba->host->host_lock);
1333 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1334 list_del_init(&iocb->list);
1335 if (iocb->iocb_cmpl) {
1336 icmd = &iocb->iocb;
1337 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1338 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1339 spin_unlock_irq(phba->host->host_lock);
1340 (iocb->iocb_cmpl) (phba, iocb, iocb);
1341 spin_lock_irq(phba->host->host_lock);
1342 } else {
1343 list_add_tail(&iocb->list, &phba->lpfc_iocb_list);
1344 }
1345 }
1346 pring->txq_cnt = 0;
1347 INIT_LIST_HEAD(&(pring->txq));
1348
1349 /* Next issue ABTS for everything on the txcmplq */
1350 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1351 cmd = &iocb->iocb;
1352
1353 /*
1354 * Imediate abort of IOCB, clear fast_lookup entry,
1355 * if any, deque and call compl
1356 */
1357 iotag = cmd->ulpIoTag;
1358 if (iotag && pring->fast_lookup &&
1359 (iotag < pring->fast_iotag))
1360 pring->fast_lookup[iotag] = NULL;
1361
1362 list_del_init(&iocb->list);
1363 pring->txcmplq_cnt--;
1364
1365 if (iocb->iocb_cmpl) {
1366 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1367 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1368 spin_unlock_irq(phba->host->host_lock);
1369 (iocb->iocb_cmpl) (phba, iocb, iocb);
1370 spin_lock_irq(phba->host->host_lock);
1371 } else {
1372 list_add_tail(&iocb->list, &phba->lpfc_iocb_list);
1373 }
1374 }
1375
1376 INIT_LIST_HEAD(&pring->txcmplq);
1377 pring->txcmplq_cnt = 0;
1378 spin_unlock_irq(phba->host->host_lock);
1379
1380 return errcnt;
1381}
1382
1383/******************************************************************************
1384* lpfc_sli_send_reset
1385*
1386* Note: After returning from this function, the HBA cannot be accessed for
1387* 1 ms. Since we do not wish to delay in interrupt context, it is the
1388* responsibility of the caller to perform the mdelay(1) and flush via readl().
1389******************************************************************************/
1390static int
1391lpfc_sli_send_reset(struct lpfc_hba * phba, uint16_t skip_post)
1392{
1393 MAILBOX_t *swpmb;
1394 volatile uint32_t word0;
1395 void __iomem *to_slim;
1396 unsigned long flags = 0;
1397
1398 spin_lock_irqsave(phba->host->host_lock, flags);
1399
1400 /* A board reset must use REAL SLIM. */
1401 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
1402
1403 word0 = 0;
1404 swpmb = (MAILBOX_t *) & word0;
1405 swpmb->mbxCommand = MBX_RESTART;
1406 swpmb->mbxHc = 1;
1407
1408 to_slim = phba->MBslimaddr;
1409 writel(*(uint32_t *) swpmb, to_slim);
1410 readl(to_slim); /* flush */
1411
1412 /* Only skip post after fc_ffinit is completed */
1413 if (skip_post) {
1414 word0 = 1; /* This is really setting up word1 */
1415 } else {
1416 word0 = 0; /* This is really setting up word1 */
1417 }
1418 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1419 writel(*(uint32_t *) swpmb, to_slim);
1420 readl(to_slim); /* flush */
1421
1422 /* Turn off parity checking and serr during the physical reset */
1423 pci_read_config_word(phba->pcidev, PCI_COMMAND, &phba->pci_cfg_value);
1424 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1425 (phba->pci_cfg_value &
1426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1427
1428 writel(HC_INITFF, phba->HCregaddr);
1429
1430 phba->hba_state = LPFC_INIT_START;
1431 spin_unlock_irqrestore(phba->host->host_lock, flags);
1432
1433 return 0;
1434}
1435
1436static int
1437lpfc_sli_brdreset(struct lpfc_hba * phba, uint16_t skip_post)
1438{
1439 struct lpfc_sli_ring *pring;
1440 int i;
1441 struct lpfc_dmabuf *mp, *next_mp;
1442 unsigned long flags = 0;
1443
1444 lpfc_sli_send_reset(phba, skip_post);
1445 mdelay(1);
1446
1447 spin_lock_irqsave(phba->host->host_lock, flags);
1448 /* Risk the write on flush case ie no delay after the readl */
1449 readl(phba->HCregaddr); /* flush */
1450 /* Now toggle INITFF bit set by lpfc_sli_send_reset */
1451 writel(0, phba->HCregaddr);
1452 readl(phba->HCregaddr); /* flush */
1453
1454 /* Restore PCI cmd register */
1455 pci_write_config_word(phba->pcidev, PCI_COMMAND, phba->pci_cfg_value);
1456
1457 /* perform board reset */
1458 phba->fc_eventTag = 0;
1459 phba->fc_myDID = 0;
1460 phba->fc_prevDID = Mask_DID;
1461
1462 /* Reset HBA */
1463 lpfc_printf_log(phba,
1464 KERN_INFO,
1465 LOG_SLI,
1466 "%d:0325 Reset HBA Data: x%x x%x x%x\n",
1467 phba->brd_no,
1468 phba->hba_state,
1469 phba->sli.sli_flag,
1470 skip_post);
1471
1472 /* Initialize relevant SLI info */
1473 for (i = 0; i < phba->sli.num_rings; i++) {
1474 pring = &phba->sli.ring[i];
1475 pring->flag = 0;
1476 pring->rspidx = 0;
1477 pring->next_cmdidx = 0;
1478 pring->local_getidx = 0;
1479 pring->cmdidx = 0;
1480 pring->missbufcnt = 0;
1481 }
1482 spin_unlock_irqrestore(phba->host->host_lock, flags);
1483
1484 if (skip_post) {
1485 mdelay(100);
1486 } else {
1487 mdelay(2000);
1488 }
1489
1490 spin_lock_irqsave(phba->host->host_lock, flags);
1491 /* Cleanup preposted buffers on the ELS ring */
1492 pring = &phba->sli.ring[LPFC_ELS_RING];
1493 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
1494 list_del(&mp->list);
1495 pring->postbufq_cnt--;
1496 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1497 kfree(mp);
1498 }
1499 spin_unlock_irqrestore(phba->host->host_lock, flags);
1500
1501 for (i = 0; i < phba->sli.num_rings; i++)
1502 lpfc_sli_abort_iocb_ring(phba, &phba->sli.ring[i]);
1503
1504 return 0;
1505}
1506
1507static int
1508lpfc_sli_chipset_init(struct lpfc_hba *phba)
1509{
1510 uint32_t status, i = 0;
1511
1512 /* Read the HBA Host Status Register */
1513 status = readl(phba->HSregaddr);
1514
1515 /* Check status register to see what current state is */
1516 i = 0;
1517 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
1518
1519 /* Check every 100ms for 5 retries, then every 500ms for 5, then
1520 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1521 * 4.
1522 */
1523 if (i++ >= 20) {
1524 /* Adapter failed to init, timeout, status reg
1525 <status> */
1526 lpfc_printf_log(phba,
1527 KERN_ERR,
1528 LOG_INIT,
1529 "%d:0436 Adapter failed to init, "
1530 "timeout, status reg x%x\n",
1531 phba->brd_no,
1532 status);
1533 phba->hba_state = LPFC_HBA_ERROR;
1534 return -ETIMEDOUT;
1535 }
1536
1537 /* Check to see if any errors occurred during init */
1538 if (status & HS_FFERM) {
1539 /* ERROR: During chipset initialization */
1540 /* Adapter failed to init, chipset, status reg
1541 <status> */
1542 lpfc_printf_log(phba,
1543 KERN_ERR,
1544 LOG_INIT,
1545 "%d:0437 Adapter failed to init, "
1546 "chipset, status reg x%x\n",
1547 phba->brd_no,
1548 status);
1549 phba->hba_state = LPFC_HBA_ERROR;
1550 return -EIO;
1551 }
1552
1553 if (i <= 5) {
1554 msleep(10);
1555 } else if (i <= 10) {
1556 msleep(500);
1557 } else {
1558 msleep(2500);
1559 }
1560
1561 if (i == 15) {
1562 lpfc_sli_brdreset(phba, 0);
1563 }
1564 /* Read the HBA Host Status Register */
1565 status = readl(phba->HSregaddr);
1566 }
1567
1568 /* Check to see if any errors occurred during init */
1569 if (status & HS_FFERM) {
1570 /* ERROR: During chipset initialization */
1571 /* Adapter failed to init, chipset, status reg <status> */
1572 lpfc_printf_log(phba,
1573 KERN_ERR,
1574 LOG_INIT,
1575 "%d:0438 Adapter failed to init, chipset, "
1576 "status reg x%x\n",
1577 phba->brd_no,
1578 status);
1579 phba->hba_state = LPFC_HBA_ERROR;
1580 return -EIO;
1581 }
1582
1583 /* Clear all interrupt enable conditions */
1584 writel(0, phba->HCregaddr);
1585 readl(phba->HCregaddr); /* flush */
1586
1587 /* setup host attn register */
1588 writel(0xffffffff, phba->HAregaddr);
1589 readl(phba->HAregaddr); /* flush */
1590 return 0;
1591}
1592
1593int
1594lpfc_sli_hba_setup(struct lpfc_hba * phba)
1595{
1596 LPFC_MBOXQ_t *pmb;
1597 uint32_t resetcount = 0, rc = 0, done = 0;
1598
1599 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1600 if (!pmb) {
1601 phba->hba_state = LPFC_HBA_ERROR;
1602 return -ENOMEM;
1603 }
1604
1605 while (resetcount < 2 && !done) {
1606 phba->hba_state = 0;
1607 lpfc_sli_brdreset(phba, 0);
1608 msleep(2500);
1609 rc = lpfc_sli_chipset_init(phba);
1610 if (rc)
1611 break;
1612
1613 resetcount++;
1614
1615 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
1616 * means the call was successful. Any other nonzero value is a failure,
1617 * but if ERESTART is returned, the driver may reset the HBA and try
1618 * again.
1619 */
1620 rc = lpfc_config_port_prep(phba);
1621 if (rc == -ERESTART) {
1622 phba->hba_state = 0;
1623 continue;
1624 } else if (rc) {
1625 break;
1626 }
1627
1628 phba->hba_state = LPFC_INIT_MBX_CMDS;
1629 lpfc_config_port(phba, pmb);
1630 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1631 if (rc == MBX_SUCCESS)
1632 done = 1;
1633 else {
1634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1635 "%d:0442 Adapter failed to init, mbxCmd x%x "
1636 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1637 phba->brd_no, pmb->mb.mbxCommand,
1638 pmb->mb.mbxStatus, 0);
1639 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
1640 }
1641 }
1642 if (!done)
1643 goto lpfc_sli_hba_setup_error;
1644
1645 rc = lpfc_sli_ring_map(phba, pmb);
1646
1647 if (rc)
1648 goto lpfc_sli_hba_setup_error;
1649
1650 phba->sli.sli_flag |= LPFC_PROCESS_LA;
1651
1652 rc = lpfc_config_port_post(phba);
1653 if (rc)
1654 goto lpfc_sli_hba_setup_error;
1655
1656 goto lpfc_sli_hba_setup_exit;
1657lpfc_sli_hba_setup_error:
1658 phba->hba_state = LPFC_HBA_ERROR;
1659lpfc_sli_hba_setup_exit:
1660 mempool_free(pmb, phba->mbox_mem_pool);
1661 return rc;
1662}
1663
1664static void
1665lpfc_mbox_abort(struct lpfc_hba * phba)
1666{
1667 LPFC_MBOXQ_t *pmbox;
1668 MAILBOX_t *mb;
1669
1670 if (phba->sli.mbox_active) {
1671 del_timer_sync(&phba->sli.mbox_tmo);
1672 phba->work_hba_events &= ~WORKER_MBOX_TMO;
1673 pmbox = phba->sli.mbox_active;
1674 mb = &pmbox->mb;
1675 phba->sli.mbox_active = NULL;
1676 if (pmbox->mbox_cmpl) {
1677 mb->mbxStatus = MBX_NOT_FINISHED;
1678 (pmbox->mbox_cmpl) (phba, pmbox);
1679 }
1680 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1681 }
1682
1683 /* Abort all the non active mailbox commands. */
1684 spin_lock_irq(phba->host->host_lock);
1685 pmbox = lpfc_mbox_get(phba);
1686 while (pmbox) {
1687 mb = &pmbox->mb;
1688 if (pmbox->mbox_cmpl) {
1689 mb->mbxStatus = MBX_NOT_FINISHED;
1690 spin_unlock_irq(phba->host->host_lock);
1691 (pmbox->mbox_cmpl) (phba, pmbox);
1692 spin_lock_irq(phba->host->host_lock);
1693 }
1694 pmbox = lpfc_mbox_get(phba);
1695 }
1696 spin_unlock_irq(phba->host->host_lock);
1697 return;
1698}
1699
1700/*! lpfc_mbox_timeout
1701 *
1702 * \pre
1703 * \post
1704 * \param hba Pointer to per struct lpfc_hba structure
1705 * \param l1 Pointer to the driver's mailbox queue.
1706 * \return
1707 * void
1708 *
1709 * \b Description:
1710 *
1711 * This routine handles mailbox timeout events at timer interrupt context.
1712 */
1713void
1714lpfc_mbox_timeout(unsigned long ptr)
1715{
1716 struct lpfc_hba *phba;
1717 unsigned long iflag;
1718
1719 phba = (struct lpfc_hba *)ptr;
1720 spin_lock_irqsave(phba->host->host_lock, iflag);
1721 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
1722 phba->work_hba_events |= WORKER_MBOX_TMO;
1723 if (phba->work_wait)
1724 wake_up(phba->work_wait);
1725 }
1726 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1727}
1728
1729void
1730lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
1731{
1732 LPFC_MBOXQ_t *pmbox;
1733 MAILBOX_t *mb;
1734
1735 spin_lock_irq(phba->host->host_lock);
1736 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
1737 spin_unlock_irq(phba->host->host_lock);
1738 return;
1739 }
1740
1741 pmbox = phba->sli.mbox_active;
1742 mb = &pmbox->mb;
1743
1744 /* Mbox cmd <mbxCommand> timeout */
1745 lpfc_printf_log(phba,
1746 KERN_ERR,
1747 LOG_MBOX | LOG_SLI,
1748 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
1749 phba->brd_no,
1750 mb->mbxCommand,
1751 phba->hba_state,
1752 phba->sli.sli_flag,
1753 phba->sli.mbox_active);
1754
1755 if (phba->sli.mbox_active == pmbox) {
1756 phba->sli.mbox_active = NULL;
1757 if (pmbox->mbox_cmpl) {
1758 mb->mbxStatus = MBX_NOT_FINISHED;
1759 spin_unlock_irq(phba->host->host_lock);
1760 (pmbox->mbox_cmpl) (phba, pmbox);
1761 spin_lock_irq(phba->host->host_lock);
1762 }
1763 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1764 }
1765
1766 spin_unlock_irq(phba->host->host_lock);
1767 lpfc_mbox_abort(phba);
1768 return;
1769}
1770
1771int
1772lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
1773{
1774 MAILBOX_t *mbox;
1775 MAILBOX_t *mb;
1776 struct lpfc_sli *psli;
1777 uint32_t status, evtctr;
1778 uint32_t ha_copy;
1779 int i;
1780 unsigned long drvr_flag = 0;
1781 volatile uint32_t word0, ldata;
1782 void __iomem *to_slim;
1783
1784 psli = &phba->sli;
1785
1786 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
1787
1788
1789 mb = &pmbox->mb;
1790 status = MBX_SUCCESS;
1791
1792 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
1793 /* Polling for a mbox command when another one is already active
1794 * is not allowed in SLI. Also, the driver must have established
1795 * SLI2 mode to queue and process multiple mbox commands.
1796 */
1797
1798 if (flag & MBX_POLL) {
1799 spin_unlock_irqrestore(phba->host->host_lock,
1800 drvr_flag);
1801
1802 /* Mbox command <mbxCommand> cannot issue */
1803 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
1804 return (MBX_NOT_FINISHED);
1805 }
1806
1807 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
1808 spin_unlock_irqrestore(phba->host->host_lock,
1809 drvr_flag);
1810 /* Mbox command <mbxCommand> cannot issue */
1811 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
1812 return (MBX_NOT_FINISHED);
1813 }
1814
1815 /* Handle STOP IOCB processing flag. This is only meaningful
1816 * if we are not polling for mbox completion.
1817 */
1818 if (flag & MBX_STOP_IOCB) {
1819 flag &= ~MBX_STOP_IOCB;
1820 /* Now flag each ring */
1821 for (i = 0; i < psli->num_rings; i++) {
1822 /* If the ring is active, flag it */
1823 if (psli->ring[i].cmdringaddr) {
1824 psli->ring[i].flag |=
1825 LPFC_STOP_IOCB_MBX;
1826 }
1827 }
1828 }
1829
1830 /* Another mailbox command is still being processed, queue this
1831 * command to be processed later.
1832 */
1833 lpfc_mbox_put(phba, pmbox);
1834
1835 /* Mbox cmd issue - BUSY */
1836 lpfc_printf_log(phba,
1837 KERN_INFO,
1838 LOG_MBOX | LOG_SLI,
1839 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
1840 phba->brd_no,
1841 mb->mbxCommand,
1842 phba->hba_state,
1843 psli->sli_flag,
1844 flag);
1845
1846 psli->slistat.mbox_busy++;
1847 spin_unlock_irqrestore(phba->host->host_lock,
1848 drvr_flag);
1849
1850 return (MBX_BUSY);
1851 }
1852
1853 /* Handle STOP IOCB processing flag. This is only meaningful
1854 * if we are not polling for mbox completion.
1855 */
1856 if (flag & MBX_STOP_IOCB) {
1857 flag &= ~MBX_STOP_IOCB;
1858 if (flag == MBX_NOWAIT) {
1859 /* Now flag each ring */
1860 for (i = 0; i < psli->num_rings; i++) {
1861 /* If the ring is active, flag it */
1862 if (psli->ring[i].cmdringaddr) {
1863 psli->ring[i].flag |=
1864 LPFC_STOP_IOCB_MBX;
1865 }
1866 }
1867 }
1868 }
1869
1870 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1871
1872 /* If we are not polling, we MUST be in SLI2 mode */
1873 if (flag != MBX_POLL) {
1874 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
1875 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1876 spin_unlock_irqrestore(phba->host->host_lock,
1877 drvr_flag);
1878 /* Mbox command <mbxCommand> cannot issue */
1879 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
1880 return (MBX_NOT_FINISHED);
1881 }
1882 /* timeout active mbox command */
1883 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
1884 }
1885
1886 /* Mailbox cmd <cmd> issue */
1887 lpfc_printf_log(phba,
1888 KERN_INFO,
1889 LOG_MBOX | LOG_SLI,
1890 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
1891 phba->brd_no,
1892 mb->mbxCommand,
1893 phba->hba_state,
1894 psli->sli_flag,
1895 flag);
1896
1897 psli->slistat.mbox_cmd++;
1898 evtctr = psli->slistat.mbox_event;
1899
1900 /* next set own bit for the adapter and copy over command word */
1901 mb->mbxOwner = OWN_CHIP;
1902
1903 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
1904
1905 /* First copy command data to host SLIM area */
1906 mbox = (MAILBOX_t *) psli->MBhostaddr;
1907 lpfc_sli_pcimem_bcopy(mb, mbox, MAILBOX_CMD_SIZE);
1908 } else {
1909 if (mb->mbxCommand == MBX_CONFIG_PORT) {
1910 /* copy command data into host mbox for cmpl */
1911 mbox = (MAILBOX_t *) psli->MBhostaddr;
1912 lpfc_sli_pcimem_bcopy(mb, mbox, MAILBOX_CMD_SIZE);
1913 }
1914
1915 /* First copy mbox command data to HBA SLIM, skip past first
1916 word */
1917 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1918 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
1919 MAILBOX_CMD_SIZE - sizeof (uint32_t));
1920
1921 /* Next copy over first word, with mbxOwner set */
1922 ldata = *((volatile uint32_t *)mb);
1923 to_slim = phba->MBslimaddr;
1924 writel(ldata, to_slim);
1925 readl(to_slim); /* flush */
1926
1927 if (mb->mbxCommand == MBX_CONFIG_PORT) {
1928 /* switch over to host mailbox */
1929 psli->sli_flag |= LPFC_SLI2_ACTIVE;
1930 }
1931 }
1932
1933 wmb();
1934 /* interrupt board to doit right away */
1935 writel(CA_MBATT, phba->CAregaddr);
1936 readl(phba->CAregaddr); /* flush */
1937
1938 switch (flag) {
1939 case MBX_NOWAIT:
1940 /* Don't wait for it to finish, just return */
1941 psli->mbox_active = pmbox;
1942 break;
1943
1944 case MBX_POLL:
1945 i = 0;
1946 psli->mbox_active = NULL;
1947 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
1948 /* First read mbox status word */
1949 mbox = (MAILBOX_t *) psli->MBhostaddr;
1950 word0 = *((volatile uint32_t *)mbox);
1951 word0 = le32_to_cpu(word0);
1952 } else {
1953 /* First read mbox status word */
1954 word0 = readl(phba->MBslimaddr);
1955 }
1956
1957 /* Read the HBA Host Attention Register */
1958 ha_copy = readl(phba->HAregaddr);
1959
1960 /* Wait for command to complete */
1961 while (((word0 & OWN_CHIP) == OWN_CHIP)
1962 || !(ha_copy & HA_MBATT)) {
1963 if (i++ >= 100) {
1964 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1965 spin_unlock_irqrestore(phba->host->host_lock,
1966 drvr_flag);
1967 return (MBX_NOT_FINISHED);
1968 }
1969
1970 /* Check if we took a mbox interrupt while we were
1971 polling */
1972 if (((word0 & OWN_CHIP) != OWN_CHIP)
1973 && (evtctr != psli->slistat.mbox_event))
1974 break;
1975
1976 spin_unlock_irqrestore(phba->host->host_lock,
1977 drvr_flag);
1978
1979 /* Can be in interrupt context, do not sleep */
1980 /* (or might be called with interrupts disabled) */
1981 mdelay(i);
1982
1983 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
1984
1985 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
1986 /* First copy command data */
1987 mbox = (MAILBOX_t *) psli->MBhostaddr;
1988 word0 = *((volatile uint32_t *)mbox);
1989 word0 = le32_to_cpu(word0);
1990 if (mb->mbxCommand == MBX_CONFIG_PORT) {
1991 MAILBOX_t *slimmb;
1992 volatile uint32_t slimword0;
1993 /* Check real SLIM for any errors */
1994 slimword0 = readl(phba->MBslimaddr);
1995 slimmb = (MAILBOX_t *) & slimword0;
1996 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
1997 && slimmb->mbxStatus) {
1998 psli->sli_flag &=
1999 ~LPFC_SLI2_ACTIVE;
2000 word0 = slimword0;
2001 }
2002 }
2003 } else {
2004 /* First copy command data */
2005 word0 = readl(phba->MBslimaddr);
2006 }
2007 /* Read the HBA Host Attention Register */
2008 ha_copy = readl(phba->HAregaddr);
2009 }
2010
2011 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2012 /* First copy command data */
2013 mbox = (MAILBOX_t *) psli->MBhostaddr;
2014 /* copy results back to user */
2015 lpfc_sli_pcimem_bcopy(mbox, mb, MAILBOX_CMD_SIZE);
2016 } else {
2017 /* First copy command data */
2018 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2019 MAILBOX_CMD_SIZE);
2020 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2021 pmbox->context2) {
2022 lpfc_memcpy_from_slim((void *)pmbox->context2,
2023 phba->MBslimaddr + DMP_RSP_OFFSET,
2024 mb->un.varDmp.word_cnt);
2025 }
2026 }
2027
2028 writel(HA_MBATT, phba->HAregaddr);
2029 readl(phba->HAregaddr); /* flush */
2030
2031 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2032 status = mb->mbxStatus;
2033 }
2034
2035 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2036 return (status);
2037}
2038
2039static int
2040lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2041 struct lpfc_iocbq * piocb)
2042{
2043 /* Insert the caller's iocb in the txq tail for later processing. */
2044 list_add_tail(&piocb->list, &pring->txq);
2045 pring->txq_cnt++;
2046 return (0);
2047}
2048
2049static struct lpfc_iocbq *
2050lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2051 struct lpfc_iocbq ** piocb)
2052{
2053 struct lpfc_iocbq * nextiocb;
2054
2055 nextiocb = lpfc_sli_ringtx_get(phba, pring);
2056 if (!nextiocb) {
2057 nextiocb = *piocb;
2058 *piocb = NULL;
2059 }
2060
2061 return nextiocb;
2062}
2063
2064int
2065lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2066 struct lpfc_iocbq *piocb, uint32_t flag)
2067{
2068 struct lpfc_iocbq *nextiocb;
2069 IOCB_t *iocb;
2070
2071 /*
2072 * We should never get an IOCB if we are in a < LINK_DOWN state
2073 */
2074 if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
2075 return IOCB_ERROR;
2076
2077 /*
2078 * Check to see if we are blocking IOCB processing because of a
2079 * outstanding mbox command.
2080 */
2081 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2082 goto iocb_busy;
2083
2084 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
2085 /*
2086 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
2087 * can be issued if the link is not up.
2088 */
2089 switch (piocb->iocb.ulpCommand) {
2090 case CMD_QUE_RING_BUF_CN:
2091 case CMD_QUE_RING_BUF64_CN:
2092 case CMD_CLOSE_XRI_CN:
2093 case CMD_ABORT_XRI_CN:
2094 /*
2095 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2096 * completion, iocb_cmpl MUST be 0.
2097 */
2098 if (piocb->iocb_cmpl)
2099 piocb->iocb_cmpl = NULL;
2100 /*FALLTHROUGH*/
2101 case CMD_CREATE_XRI_CR:
2102 break;
2103 default:
2104 goto iocb_busy;
2105 }
2106
2107 /*
2108 * For FCP commands, we must be in a state where we can process link
2109 * attention events.
2110 */
2111 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2112 !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
2113 goto iocb_busy;
2114
2115 /*
2116 * Check to see if this is a high priority command.
2117 * If so bypass tx queue processing.
2118 */
2119 if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) &&
2120 (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) {
2121 lpfc_sli_submit_iocb(phba, pring, iocb, piocb);
2122 piocb = NULL;
2123 }
2124
2125 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2126 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2127 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2128
2129 if (iocb)
2130 lpfc_sli_update_ring(phba, pring);
2131 else
2132 lpfc_sli_update_full_ring(phba, pring);
2133
2134 if (!piocb)
2135 return IOCB_SUCCESS;
2136
2137 goto out_busy;
2138
2139 iocb_busy:
2140 pring->stats.iocb_cmd_delay++;
2141
2142 out_busy:
2143
2144 if (!(flag & SLI_IOCB_RET_IOCB)) {
2145 lpfc_sli_ringtx_put(phba, pring, piocb);
2146 return IOCB_SUCCESS;
2147 }
2148
2149 return IOCB_BUSY;
2150}
2151
2152int
2153lpfc_sli_setup(struct lpfc_hba *phba)
2154{
2155 int i, totiocb = 0;
2156 struct lpfc_sli *psli = &phba->sli;
2157 struct lpfc_sli_ring *pring;
2158
2159 psli->num_rings = MAX_CONFIGURED_RINGS;
2160 psli->sli_flag = 0;
2161 psli->fcp_ring = LPFC_FCP_RING;
2162 psli->next_ring = LPFC_FCP_NEXT_RING;
2163 psli->ip_ring = LPFC_IP_RING;
2164
2165 for (i = 0; i < psli->num_rings; i++) {
2166 pring = &psli->ring[i];
2167 switch (i) {
2168 case LPFC_FCP_RING: /* ring 0 - FCP */
2169 /* numCiocb and numRiocb are used in config_port */
2170 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2171 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2172 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2173 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2174 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2175 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2176 pring->iotag_ctr = 0;
2177 pring->iotag_max =
2178 (phba->cfg_hba_queue_depth * 2);
2179 pring->fast_iotag = pring->iotag_max;
2180 pring->num_mask = 0;
2181 break;
2182 case LPFC_IP_RING: /* ring 1 - IP */
2183 /* numCiocb and numRiocb are used in config_port */
2184 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2185 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2186 pring->num_mask = 0;
2187 break;
2188 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
2189 /* numCiocb and numRiocb are used in config_port */
2190 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2191 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2192 pring->fast_iotag = 0;
2193 pring->iotag_ctr = 0;
2194 pring->iotag_max = 4096;
2195 pring->num_mask = 4;
2196 pring->prt[0].profile = 0; /* Mask 0 */
2197 pring->prt[0].rctl = FC_ELS_REQ;
2198 pring->prt[0].type = FC_ELS_DATA;
2199 pring->prt[0].lpfc_sli_rcv_unsol_event =
2200 lpfc_els_unsol_event;
2201 pring->prt[1].profile = 0; /* Mask 1 */
2202 pring->prt[1].rctl = FC_ELS_RSP;
2203 pring->prt[1].type = FC_ELS_DATA;
2204 pring->prt[1].lpfc_sli_rcv_unsol_event =
2205 lpfc_els_unsol_event;
2206 pring->prt[2].profile = 0; /* Mask 2 */
2207 /* NameServer Inquiry */
2208 pring->prt[2].rctl = FC_UNSOL_CTL;
2209 /* NameServer */
2210 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
2211 pring->prt[2].lpfc_sli_rcv_unsol_event =
2212 lpfc_ct_unsol_event;
2213 pring->prt[3].profile = 0; /* Mask 3 */
2214 /* NameServer response */
2215 pring->prt[3].rctl = FC_SOL_CTL;
2216 /* NameServer */
2217 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
2218 pring->prt[3].lpfc_sli_rcv_unsol_event =
2219 lpfc_ct_unsol_event;
2220 break;
2221 }
2222 totiocb += (pring->numCiocb + pring->numRiocb);
2223 }
2224 if (totiocb > MAX_SLI2_IOCB) {
2225 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2227 "%d:0462 Too many cmd / rsp ring entries in "
2228 "SLI2 SLIM Data: x%x x%x\n",
2229 phba->brd_no, totiocb, MAX_SLI2_IOCB);
2230 }
2231
2232 return 0;
2233}
2234
2235int
2236lpfc_sli_queue_setup(struct lpfc_hba * phba)
2237{
2238 struct lpfc_sli *psli;
2239 struct lpfc_sli_ring *pring;
2240 int i, cnt;
2241
2242 psli = &phba->sli;
2243 spin_lock_irq(phba->host->host_lock);
2244 INIT_LIST_HEAD(&psli->mboxq);
2245 /* Initialize list headers for txq and txcmplq as double linked lists */
2246 for (i = 0; i < psli->num_rings; i++) {
2247 pring = &psli->ring[i];
2248 pring->ringno = i;
2249 pring->next_cmdidx = 0;
2250 pring->local_getidx = 0;
2251 pring->cmdidx = 0;
2252 INIT_LIST_HEAD(&pring->txq);
2253 INIT_LIST_HEAD(&pring->txcmplq);
2254 INIT_LIST_HEAD(&pring->iocb_continueq);
2255 INIT_LIST_HEAD(&pring->postbufq);
2256 cnt = pring->fast_iotag;
2257 spin_unlock_irq(phba->host->host_lock);
2258 if (cnt) {
2259 pring->fast_lookup =
2260 kmalloc(cnt * sizeof (struct lpfc_iocbq *),
2261 GFP_KERNEL);
2262 if (pring->fast_lookup == 0) {
2263 return (0);
2264 }
2265 memset((char *)pring->fast_lookup, 0,
2266 cnt * sizeof (struct lpfc_iocbq *));
2267 }
2268 spin_lock_irq(phba->host->host_lock);
2269 }
2270 spin_unlock_irq(phba->host->host_lock);
2271 return (1);
2272}
2273
2274int
2275lpfc_sli_hba_down(struct lpfc_hba * phba)
2276{
2277 struct lpfc_sli *psli;
2278 struct lpfc_sli_ring *pring;
2279 LPFC_MBOXQ_t *pmb;
2280 struct lpfc_iocbq *iocb, *next_iocb;
2281 IOCB_t *icmd = NULL;
2282 int i;
2283 unsigned long flags = 0;
2284
2285 psli = &phba->sli;
2286 lpfc_hba_down_prep(phba);
2287
2288 spin_lock_irqsave(phba->host->host_lock, flags);
2289
2290 for (i = 0; i < psli->num_rings; i++) {
2291 pring = &psli->ring[i];
2292 pring->flag |= LPFC_DEFERRED_RING_EVENT;
2293
2294 /*
2295 * Error everything on the txq since these iocbs have not been
2296 * given to the FW yet.
2297 */
2298 pring->txq_cnt = 0;
2299
2300 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2301 list_del_init(&iocb->list);
2302 if (iocb->iocb_cmpl) {
2303 icmd = &iocb->iocb;
2304 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2305 icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2306 spin_unlock_irqrestore(phba->host->host_lock,
2307 flags);
2308 (iocb->iocb_cmpl) (phba, iocb, iocb);
2309 spin_lock_irqsave(phba->host->host_lock, flags);
2310 } else {
2311 list_add_tail(&iocb->list,
2312 &phba->lpfc_iocb_list);
2313 }
2314 }
2315
2316 INIT_LIST_HEAD(&(pring->txq));
2317
2318 if (pring->fast_lookup) {
2319 kfree(pring->fast_lookup);
2320 pring->fast_lookup = NULL;
2321 }
2322
2323 }
2324
2325 spin_unlock_irqrestore(phba->host->host_lock, flags);
2326
2327 /* Return any active mbox cmds */
2328 del_timer_sync(&psli->mbox_tmo);
2329 spin_lock_irqsave(phba->host->host_lock, flags);
2330 phba->work_hba_events &= ~WORKER_MBOX_TMO;
2331 if (psli->mbox_active) {
2332 pmb = psli->mbox_active;
2333 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2334 if (pmb->mbox_cmpl) {
2335 spin_unlock_irqrestore(phba->host->host_lock, flags);
2336 pmb->mbox_cmpl(phba,pmb);
2337 spin_lock_irqsave(phba->host->host_lock, flags);
2338 }
2339 }
2340 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2341 psli->mbox_active = NULL;
2342
2343 /* Return any pending mbox cmds */
2344 while ((pmb = lpfc_mbox_get(phba)) != NULL) {
2345 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2346 if (pmb->mbox_cmpl) {
2347 spin_unlock_irqrestore(phba->host->host_lock, flags);
2348 pmb->mbox_cmpl(phba,pmb);
2349 spin_lock_irqsave(phba->host->host_lock, flags);
2350 }
2351 }
2352
2353 INIT_LIST_HEAD(&psli->mboxq);
2354
2355 spin_unlock_irqrestore(phba->host->host_lock, flags);
2356
2357 /*
2358 * Provided the hba is not in an error state, reset it. It is not
2359 * capable of IO anymore.
2360 */
2361 if (phba->hba_state != LPFC_HBA_ERROR) {
2362 phba->hba_state = LPFC_INIT_START;
2363 lpfc_sli_brdreset(phba, 1);
2364 }
2365
2366 return 1;
2367}
2368
2369void
2370lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
2371{
2372 uint32_t *src = srcp;
2373 uint32_t *dest = destp;
2374 uint32_t ldata;
2375 int i;
2376
2377 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
2378 ldata = *src;
2379 ldata = le32_to_cpu(ldata);
2380 *dest = ldata;
2381 src++;
2382 dest++;
2383 }
2384}
2385
2386int
2387lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2388 struct lpfc_dmabuf * mp)
2389{
2390 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2391 later */
2392 list_add_tail(&mp->list, &pring->postbufq);
2393
2394 pring->postbufq_cnt++;
2395 return 0;
2396}
2397
2398
2399struct lpfc_dmabuf *
2400lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2401 dma_addr_t phys)
2402{
2403 struct lpfc_dmabuf *mp, *next_mp;
2404 struct list_head *slp = &pring->postbufq;
2405
2406 /* Search postbufq, from the begining, looking for a match on phys */
2407 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2408 if (mp->phys == phys) {
2409 list_del_init(&mp->list);
2410 pring->postbufq_cnt--;
2411 return mp;
2412 }
2413 }
2414
2415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2416 "%d:0410 Cannot find virtual addr for mapped buf on "
2417 "ring %d Data x%llx x%p x%p x%x\n",
2418 phba->brd_no, pring->ringno, (unsigned long long)phys,
2419 slp->next, slp->prev, pring->postbufq_cnt);
2420 return NULL;
2421}
2422
2423static void
2424lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2425 struct lpfc_iocbq * rspiocb)
2426{
2427 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2428 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
2429 * just aborted.
2430 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl
2431 */
2432 if (cmdiocb->context2) {
2433 buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
2434
2435 /* Free the response IOCB before completing the abort
2436 command. */
2437 buf_ptr = NULL;
2438 list_remove_head((&buf_ptr1->list), buf_ptr,
2439 struct lpfc_dmabuf, list);
2440 if (buf_ptr) {
2441 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2442 kfree(buf_ptr);
2443 }
2444 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2445 kfree(buf_ptr1);
2446 }
2447
2448 if (cmdiocb->context3) {
2449 buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
2450 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2451 kfree(buf_ptr);
2452 }
2453
2454 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
2455 return;
2456}
2457
2458int
2459lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2460 struct lpfc_sli_ring * pring,
2461 struct lpfc_iocbq * cmdiocb)
2462{
2463 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
2464 struct lpfc_iocbq *abtsiocbp = NULL;
2465 IOCB_t *icmd = NULL;
2466 IOCB_t *iabt = NULL;
2467
2468 /* issue ABTS for this IOCB based on iotag */
2469 list_remove_head(lpfc_iocb_list, abtsiocbp, struct lpfc_iocbq, list);
2470 if (abtsiocbp == NULL)
2471 return 0;
2472 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
2473
2474 iabt = &abtsiocbp->iocb;
2475 icmd = &cmdiocb->iocb;
2476 switch (icmd->ulpCommand) {
2477 case CMD_ELS_REQUEST64_CR:
2478 /* Even though we abort the ELS command, the firmware may access
2479 * the BPL or other resources before it processes our
2480 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
2481 * resources till the actual abort request completes.
2482 */
2483 abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
2484 abtsiocbp->context2 = cmdiocb->context2;
2485 abtsiocbp->context3 = cmdiocb->context3;
2486 cmdiocb->context2 = NULL;
2487 cmdiocb->context3 = NULL;
2488 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
2489 break;
2490 default:
2491 list_add_tail(&abtsiocbp->list, lpfc_iocb_list);
2492 return 0;
2493 }
2494
2495 iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
2496 iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
2497
2498 iabt->ulpLe = 1;
2499 iabt->ulpClass = CLASS3;
2500 iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
2501
2502 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
2503 list_add_tail(&abtsiocbp->list, lpfc_iocb_list);
2504 return 0;
2505 }
2506
2507 return 1;
2508}
2509
2510static int
2511lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf *lpfc_cmd, uint16_t tgt_id,
2512 uint64_t lun_id, struct lpfc_iocbq *iocb,
2513 uint32_t ctx, lpfc_ctx_cmd ctx_cmd)
2514{
2515 int rc = 1;
2516
2517 if (lpfc_cmd == NULL)
2518 return rc;
2519
2520 switch (ctx_cmd) {
2521 case LPFC_CTX_LUN:
2522 if ((lpfc_cmd->pCmd->device->id == tgt_id) &&
2523 (lpfc_cmd->pCmd->device->lun == lun_id))
2524 rc = 0;
2525 break;
2526 case LPFC_CTX_TGT:
2527 if (lpfc_cmd->pCmd->device->id == tgt_id)
2528 rc = 0;
2529 break;
2530 case LPFC_CTX_CTX:
2531 if (iocb->iocb.ulpContext == ctx)
2532 rc = 0;
2533 case LPFC_CTX_HOST:
2534 rc = 0;
2535 break;
2536 default:
2537 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
2538 __FUNCTION__, ctx_cmd);
2539 break;
2540 }
2541
2542 return rc;
2543}
2544
2545int
2546lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2547 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2548{
2549 struct lpfc_iocbq *iocb, *next_iocb;
2550 IOCB_t *cmd = NULL;
2551 struct lpfc_scsi_buf *lpfc_cmd;
2552 int sum = 0, ret_val = 0;
2553
2554 /* Next check the txcmplq */
2555 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2556 cmd = &iocb->iocb;
2557
2558 /* Must be a FCP command */
2559 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2560 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2561 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2562 continue;
2563 }
2564
2565 /* context1 MUST be a struct lpfc_scsi_buf */
2566 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2567 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
2568 NULL, 0, ctx_cmd);
2569 if (ret_val != 0)
2570 continue;
2571 sum++;
2572 }
2573 return sum;
2574}
2575
2576int
2577lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2578 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
2579 lpfc_ctx_cmd abort_cmd)
2580{
2581 struct lpfc_iocbq *iocb, *next_iocb;
2582 struct lpfc_iocbq *abtsiocb = NULL;
2583 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
2584 IOCB_t *cmd = NULL;
2585 struct lpfc_scsi_buf *lpfc_cmd;
2586 int errcnt = 0, ret_val = 0;
2587
2588 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2589 cmd = &iocb->iocb;
2590
2591 /* Must be a FCP command */
2592 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2593 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2594 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2595 continue;
2596 }
2597
2598 /* context1 MUST be a struct lpfc_scsi_buf */
2599 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2600 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
2601 iocb, ctx, abort_cmd);
2602 if (ret_val != 0)
2603 continue;
2604
2605 /* issue ABTS for this IOCB based on iotag */
2606 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq,
2607 list);
2608 if (abtsiocb == NULL) {
2609 errcnt++;
2610 continue;
2611 }
2612 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
2613
2614 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
2615 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
2616 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
2617 abtsiocb->iocb.ulpLe = 1;
2618 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2619
2620 if (phba->hba_state >= LPFC_LINK_UP)
2621 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
2622 else
2623 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
2624
2625 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
2626 if (ret_val == IOCB_ERROR) {
2627 list_add_tail(&abtsiocb->list, lpfc_iocb_list);
2628 errcnt++;
2629 continue;
2630 }
2631 }
2632
2633 return errcnt;
2634}
2635
2636void
2637lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
2638 struct lpfc_iocbq * queue1,
2639 struct lpfc_iocbq * queue2)
2640{
2641 if (queue1->context2 && queue2)
2642 memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
2643
2644 /* The waiter is looking for LPFC_IO_HIPRI bit to be set
2645 as a signal to wake up */
2646 queue1->iocb_flag |= LPFC_IO_HIPRI;
2647 return;
2648}
2649
2650int
2651lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
2652 struct lpfc_sli_ring * pring,
2653 struct lpfc_iocbq * piocb,
2654 uint32_t flag,
2655 struct lpfc_iocbq * prspiocbq,
2656 uint32_t timeout)
2657{
2658 int j, delay_time, retval = IOCB_ERROR;
2659
2660 /* The caller must left context1 empty. */
2661 if (piocb->context_un.hipri_wait_queue != 0) {
2662 return IOCB_ERROR;
2663 }
2664
2665 /*
2666 * If the caller has provided a response iocbq buffer, context2 must
2667 * be NULL or its an error.
2668 */
2669 if (prspiocbq && piocb->context2) {
2670 return IOCB_ERROR;
2671 }
2672
2673 piocb->context2 = prspiocbq;
2674
2675 /* Setup callback routine and issue the command. */
2676 piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
2677 retval = lpfc_sli_issue_iocb(phba, pring, piocb,
2678 flag | SLI_IOCB_HIGH_PRIORITY);
2679 if (retval != IOCB_SUCCESS) {
2680 piocb->context2 = NULL;
2681 return IOCB_ERROR;
2682 }
2683
2684 /*
2685 * This high-priority iocb was sent out-of-band. Poll for its
2686 * completion rather than wait for a signal. Note that the host_lock
2687 * is held by the midlayer and must be released here to allow the
2688 * interrupt handlers to complete the IO and signal this routine via
2689 * the iocb_flag.
2690 * Also, the delay_time is computed to be one second longer than
2691 * the scsi command timeout to give the FW time to abort on
2692 * timeout rather than the driver just giving up. Typically,
2693 * the midlayer does not specify a time for this command so the
2694 * driver is free to enforce its own timeout.
2695 */
2696
2697 delay_time = ((timeout + 1) * 1000) >> 6;
2698 retval = IOCB_ERROR;
2699 spin_unlock_irq(phba->host->host_lock);
2700 for (j = 0; j < 64; j++) {
2701 msleep(delay_time);
2702 if (piocb->iocb_flag & LPFC_IO_HIPRI) {
2703 piocb->iocb_flag &= ~LPFC_IO_HIPRI;
2704 retval = IOCB_SUCCESS;
2705 break;
2706 }
2707 }
2708
2709 spin_lock_irq(phba->host->host_lock);
2710 piocb->context2 = NULL;
2711 return retval;
2712}
2713int
2714lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
2715 uint32_t timeout)
2716{
2717 DECLARE_WAIT_QUEUE_HEAD(done_q);
2718 DECLARE_WAITQUEUE(wq_entry, current);
2719 uint32_t timeleft = 0;
2720 int retval;
2721
2722 /* The caller must leave context1 empty. */
2723 if (pmboxq->context1 != 0) {
2724 return (MBX_NOT_FINISHED);
2725 }
2726
2727 /* setup wake call as IOCB callback */
2728 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
2729 /* setup context field to pass wait_queue pointer to wake function */
2730 pmboxq->context1 = &done_q;
2731
2732 /* start to sleep before we wait, to avoid races */
2733 set_current_state(TASK_INTERRUPTIBLE);
2734 add_wait_queue(&done_q, &wq_entry);
2735
2736 /* now issue the command */
2737 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2738
2739 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
2740 timeleft = schedule_timeout(timeout * HZ);
2741 pmboxq->context1 = NULL;
2742 /* if schedule_timeout returns 0, we timed out and were not
2743 woken up */
2744 if (timeleft == 0) {
2745 retval = MBX_TIMEOUT;
2746 } else {
2747 retval = MBX_SUCCESS;
2748 }
2749 }
2750
2751
2752 set_current_state(TASK_RUNNING);
2753 remove_wait_queue(&done_q, &wq_entry);
2754 return retval;
2755}
2756
2757irqreturn_t
2758lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
2759{
2760 struct lpfc_hba *phba;
2761 uint32_t ha_copy;
2762 uint32_t work_ha_copy;
2763 unsigned long status;
2764 int i;
2765 uint32_t control;
2766
2767 /*
2768 * Get the driver's phba structure from the dev_id and
2769 * assume the HBA is not interrupting.
2770 */
2771 phba = (struct lpfc_hba *) dev_id;
2772
2773 if (unlikely(!phba))
2774 return IRQ_NONE;
2775
2776 phba->sli.slistat.sli_intr++;
2777
2778 /*
2779 * Call the HBA to see if it is interrupting. If not, don't claim
2780 * the interrupt
2781 */
2782
2783 /* Ignore all interrupts during initialization. */
2784 if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
2785 return IRQ_NONE;
2786
2787 /*
2788 * Read host attention register to determine interrupt source
2789 * Clear Attention Sources, except Error Attention (to
2790 * preserve status) and Link Attention
2791 */
2792 spin_lock(phba->host->host_lock);
2793 ha_copy = readl(phba->HAregaddr);
2794 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
2795 readl(phba->HAregaddr); /* flush */
2796 spin_unlock(phba->host->host_lock);
2797
2798 if (unlikely(!ha_copy))
2799 return IRQ_NONE;
2800
2801 work_ha_copy = ha_copy & phba->work_ha_mask;
2802
2803 if (unlikely(work_ha_copy)) {
2804 if (work_ha_copy & HA_LATT) {
2805 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
2806 /*
2807 * Turn off Link Attention interrupts
2808 * until CLEAR_LA done
2809 */
2810 spin_lock(phba->host->host_lock);
2811 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
2812 control = readl(phba->HCregaddr);
2813 control &= ~HC_LAINT_ENA;
2814 writel(control, phba->HCregaddr);
2815 readl(phba->HCregaddr); /* flush */
2816 spin_unlock(phba->host->host_lock);
2817 }
2818 else
2819 work_ha_copy &= ~HA_LATT;
2820 }
2821
2822 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
2823 for (i = 0; i < phba->sli.num_rings; i++) {
2824 if (work_ha_copy & (HA_RXATT << (4*i))) {
2825 /*
2826 * Turn off Slow Rings interrupts
2827 */
2828 spin_lock(phba->host->host_lock);
2829 control = readl(phba->HCregaddr);
2830 control &= ~(HC_R0INT_ENA << i);
2831 writel(control, phba->HCregaddr);
2832 readl(phba->HCregaddr); /* flush */
2833 spin_unlock(phba->host->host_lock);
2834 }
2835 }
2836 }
2837
2838 if (work_ha_copy & HA_ERATT) {
2839 phba->hba_state = LPFC_HBA_ERROR;
2840 /*
2841 * There was a link/board error. Read the
2842 * status register to retrieve the error event
2843 * and process it.
2844 */
2845 phba->sli.slistat.err_attn_event++;
2846 /* Save status info */
2847 phba->work_hs = readl(phba->HSregaddr);
2848 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
2849 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
2850
2851 /* Clear Chip error bit */
2852 writel(HA_ERATT, phba->HAregaddr);
2853 readl(phba->HAregaddr); /* flush */
2854
2855 /*
2856 * Reseting the HBA is the only reliable way
2857 * to shutdown interrupt when there is a
2858 * ERROR.
2859 */
2860 lpfc_sli_send_reset(phba, phba->hba_state);
2861 }
2862
2863 spin_lock(phba->host->host_lock);
2864 phba->work_ha |= work_ha_copy;
2865 if (phba->work_wait)
2866 wake_up(phba->work_wait);
2867 spin_unlock(phba->host->host_lock);
2868 }
2869
2870 ha_copy &= ~(phba->work_ha_mask);
2871
2872 /*
2873 * Process all events on FCP ring. Take the optimized path for
2874 * FCP IO. Any other IO is slow path and is handled by
2875 * the worker thread.
2876 */
2877 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
2878 status >>= (4*LPFC_FCP_RING);
2879 if (status & HA_RXATT)
2880 lpfc_sli_handle_fast_ring_event(phba,
2881 &phba->sli.ring[LPFC_FCP_RING],
2882 status);
2883 return IRQ_HANDLED;
2884
2885} /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
new file mode 100644
index 000000000000..abd9a8c84e9e
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -0,0 +1,216 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_sli.h 1.42 2005/03/21 02:01:28EST sf_support Exp $
23 */
24
25/* forward declaration for LPFC_IOCB_t's use */
26struct lpfc_hba;
27
28/* Define the context types that SLI handles for abort and sums. */
29typedef enum _lpfc_ctx_cmd {
30 LPFC_CTX_LUN,
31 LPFC_CTX_TGT,
32 LPFC_CTX_CTX,
33 LPFC_CTX_HOST
34} lpfc_ctx_cmd;
35
36/* This structure is used to handle IOCB requests / responses */
37struct lpfc_iocbq {
38 /* lpfc_iocbqs are used in double linked lists */
39 struct list_head list;
40 IOCB_t iocb; /* IOCB cmd */
41 uint8_t retry; /* retry counter for IOCB cmd - if needed */
42 uint8_t iocb_flag;
43#define LPFC_IO_POLL 1 /* Polling mode iocb */
44#define LPFC_IO_LIBDFC 2 /* libdfc iocb */
45#define LPFC_IO_WAIT 4
46#define LPFC_IO_HIPRI 8 /* High Priority Queue signal flag */
47
48 uint8_t abort_count;
49 uint8_t rsvd2;
50 uint32_t drvrTimeout; /* driver timeout in seconds */
51 void *context1; /* caller context information */
52 void *context2; /* caller context information */
53 void *context3; /* caller context information */
54 union {
55 wait_queue_head_t *hipri_wait_queue; /* High Priority Queue wait
56 queue */
57 struct lpfc_iocbq *rsp_iocb;
58 struct lpfcMboxq *mbox;
59 } context_un;
60
61 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
62 struct lpfc_iocbq *);
63
64};
65
66#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
67#define SLI_IOCB_HIGH_PRIORITY 2 /* High priority command */
68
69#define IOCB_SUCCESS 0
70#define IOCB_BUSY 1
71#define IOCB_ERROR 2
72#define IOCB_TIMEDOUT 3
73
74typedef struct lpfcMboxq {
75 /* MBOXQs are used in single linked lists */
76 struct list_head list; /* ptr to next mailbox command */
77 MAILBOX_t mb; /* Mailbox cmd */
78 void *context1; /* caller context information */
79 void *context2; /* caller context information */
80
81 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
82
83} LPFC_MBOXQ_t;
84
85#define MBX_POLL 1 /* poll mailbox till command done, then
86 return */
87#define MBX_NOWAIT 2 /* issue command then return immediately */
88#define MBX_STOP_IOCB 4 /* Stop iocb processing till mbox cmds
89 complete */
90
91#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per
92 ring */
93#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
94
95struct lpfc_sli_ring;
96
97struct lpfc_sli_ring_mask {
98 uint8_t profile; /* profile associated with ring */
99 uint8_t rctl; /* rctl / type pair configured for ring */
100 uint8_t type; /* rctl / type pair configured for ring */
101 uint8_t rsvd;
102 /* rcv'd unsol event */
103 void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
104 struct lpfc_sli_ring *,
105 struct lpfc_iocbq *);
106};
107
108
109/* Structure used to hold SLI statistical counters and info */
110struct lpfc_sli_ring_stat {
111 uint64_t iocb_event; /* IOCB event counters */
112 uint64_t iocb_cmd; /* IOCB cmd issued */
113 uint64_t iocb_rsp; /* IOCB rsp received */
114 uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */
115 uint64_t iocb_cmd_full; /* IOCB cmd ring full */
116 uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */
117 uint64_t iocb_rsp_full; /* IOCB rsp ring full */
118};
119
120/* Structure used to hold SLI ring information */
121struct lpfc_sli_ring {
122 uint16_t flag; /* ring flags */
123#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */
124#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */
125#define LPFC_STOP_IOCB_MBX 0x010 /* Stop processing IOCB cmds mbox */
126#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
127#define LPFC_STOP_IOCB_MASK 0x030 /* Stop processing IOCB cmds mask */
128 uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
129
130 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
131 uint32_t next_cmdidx; /* next_cmd index */
132 uint8_t rsvd;
133 uint8_t ringno; /* ring number */
134 uint8_t rspidx; /* current index in response ring */
135 uint8_t cmdidx; /* current index in command ring */
136 uint16_t numCiocb; /* number of command iocb's per ring */
137 uint16_t numRiocb; /* number of rsp iocb's per ring */
138
139 uint32_t fast_iotag; /* max fastlookup based iotag */
140 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
141 uint32_t iotag_max; /* max iotag value to use */
142 struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
143 iotag */
144 struct list_head txq;
145 uint16_t txq_cnt; /* current length of queue */
146 uint16_t txq_max; /* max length */
147 struct list_head txcmplq;
148 uint16_t txcmplq_cnt; /* current length of queue */
149 uint16_t txcmplq_max; /* max length */
150 uint32_t *cmdringaddr; /* virtual address for cmd rings */
151 uint32_t *rspringaddr; /* virtual address for rsp rings */
152 uint32_t missbufcnt; /* keep track of buffers to post */
153 struct list_head postbufq;
154 uint16_t postbufq_cnt; /* current length of queue */
155 uint16_t postbufq_max; /* max length */
156 struct list_head iocb_continueq;
157 uint16_t iocb_continueq_cnt; /* current length of queue */
158 uint16_t iocb_continueq_max; /* max length */
159
160 struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
161 uint32_t num_mask; /* number of mask entries in prt array */
162
163 struct lpfc_sli_ring_stat stats; /* SLI statistical info */
164
165 /* cmd ring available */
166 void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
167 struct lpfc_sli_ring *);
168};
169
170/* Structure used to hold SLI statistical counters and info */
171struct lpfc_sli_stat {
172 uint64_t mbox_stat_err; /* Mbox cmds completed status error */
173 uint64_t mbox_cmd; /* Mailbox commands issued */
174 uint64_t sli_intr; /* Count of Host Attention interrupts */
175 uint32_t err_attn_event; /* Error Attn event counters */
176 uint32_t link_event; /* Link event counters */
177 uint32_t mbox_event; /* Mailbox event counters */
178 uint32_t mbox_busy; /* Mailbox cmd busy */
179};
180
181/* Structure used to hold SLI information */
182struct lpfc_sli {
183 uint32_t num_rings;
184 uint32_t sli_flag;
185
186 /* Additional sli_flags */
187#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
188#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
189#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
190
191 struct lpfc_sli_ring ring[LPFC_MAX_RING];
192 int fcp_ring; /* ring used for FCP initiator commands */
193 int next_ring;
194
195 int ip_ring; /* ring used for IP network drv cmds */
196
197 struct lpfc_sli_stat slistat; /* SLI statistical info */
198 struct list_head mboxq;
199 uint16_t mboxq_cnt; /* current length of queue */
200 uint16_t mboxq_max; /* max length */
201 LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
202
203 struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
204 cmd */
205
206 uint32_t *MBhostaddr; /* virtual address for mbox cmds */
207};
208
209/* Given a pointer to the start of the ring, and the slot number of
210 * the desired iocb entry, calc a pointer to that entry.
211 * (assume iocb entry size is 32 bytes, or 8 words)
212 */
213#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
214
215#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
216 command */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
new file mode 100644
index 000000000000..dfacd8d82097
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -0,0 +1,32 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
20
21/*
22 * $Id: lpfc_version.h 1.49 2005/04/13 15:07:19EDT sf_support Exp $
23 */
24
25#define LPFC_DRIVER_VERSION "8.0.28"
26
27#define LPFC_DRIVER_NAME "lpfc"
28
29#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
30 LPFC_DRIVER_VERSION
31
32#define DFC_API_VERSION "0.0.0"
diff --git a/drivers/scsi/pci2000.c b/drivers/scsi/pci2000.c
index d58f303127f5..377a4666b568 100644
--- a/drivers/scsi/pci2000.c
+++ b/drivers/scsi/pci2000.c
@@ -209,7 +209,7 @@ static int BuildSgList (Scsi_Cmnd *SCpnt, PADAPTER2000 padapter, PDEV2000 pdev)
209 if ( SCpnt->use_sg ) 209 if ( SCpnt->use_sg )
210 { 210 {
211 sg = (struct scatterlist *)SCpnt->request_buffer; 211 sg = (struct scatterlist *)SCpnt->request_buffer;
212 zc = pci_map_sg (padapter->pdev, sg, SCpnt->use_sg, scsi_to_pci_dma_dir (SCpnt->sc_data_direction)); 212 zc = pci_map_sg (padapter->pdev, sg, SCpnt->use_sg, SCpnt->sc_data_direction);
213 for ( z = 0; z < zc; z++ ) 213 for ( z = 0; z < zc; z++ )
214 { 214 {
215 pdev->scatGath[z].address = cpu_to_le32 (sg_dma_address (sg)); 215 pdev->scatGath[z].address = cpu_to_le32 (sg_dma_address (sg));
@@ -225,7 +225,9 @@ static int BuildSgList (Scsi_Cmnd *SCpnt, PADAPTER2000 padapter, PDEV2000 pdev)
225 outl (0, padapter->mb3); 225 outl (0, padapter->mb3);
226 return TRUE; 226 return TRUE;
227 } 227 }
228 SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer, SCpnt->request_bufflen, scsi_to_pci_dma_dir (SCpnt->sc_data_direction)); 228 SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev,
229 SCpnt->request_buffer, SCpnt->request_bufflen,
230 SCpnt->sc_data_direction);
229 outl (SCpnt->SCp.have_data_in, padapter->mb2); 231 outl (SCpnt->SCp.have_data_in, padapter->mb2);
230 outl (SCpnt->request_bufflen, padapter->mb3); 232 outl (SCpnt->request_bufflen, padapter->mb3);
231 return TRUE; 233 return TRUE;
@@ -340,11 +342,11 @@ unmapProceed:;
340 } 342 }
341 } 343 }
342 if ( SCpnt->SCp.have_data_in ) 344 if ( SCpnt->SCp.have_data_in )
343 pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, SCpnt->request_bufflen, scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); 345 pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, SCpnt->request_bufflen, SCpnt->sc_data_direction);
344 else 346 else
345 { 347 {
346 if ( SCpnt->use_sg ) 348 if ( SCpnt->use_sg )
347 pci_unmap_sg (padapter->pdev, (struct scatterlist *)SCpnt->request_buffer, SCpnt->use_sg, scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); 349 pci_unmap_sg (padapter->pdev, (struct scatterlist *)SCpnt->request_buffer, SCpnt->use_sg, SCpnt->sc_data_direction);
348 } 350 }
349 351
350irqProceed:; 352irqProceed:;
@@ -495,7 +497,7 @@ int Pci2000_QueueCommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
495 else 497 else
496 { 498 {
497 SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer, SCpnt->request_bufflen, 499 SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer, SCpnt->request_bufflen,
498 scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); 500 SCpnt->sc_data_direction);
499 outl (SCpnt->SCp.have_data_in, padapter->mb2); 501 outl (SCpnt->SCp.have_data_in, padapter->mb2);
500 } 502 }
501 outl (cdb[5], padapter->mb0); 503 outl (cdb[5], padapter->mb0);
@@ -511,13 +513,13 @@ int Pci2000_QueueCommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
511 SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, 513 SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev,
512 ((struct scatterlist *)SCpnt->request_buffer)->address, 514 ((struct scatterlist *)SCpnt->request_buffer)->address,
513 SCpnt->request_bufflen, 515 SCpnt->request_bufflen,
514 scsi_to_pci_dma_dir (SCpnt->sc_data_direction)); 516 SCpnt->sc_data_direction);
515 } 517 }
516 else 518 else
517 { 519 {
518 SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer, 520 SCpnt->SCp.have_data_in = pci_map_single (padapter->pdev, SCpnt->request_buffer,
519 SCpnt->request_bufflen, 521 SCpnt->request_bufflen,
520 scsi_to_pci_dma_dir (SCpnt->sc_data_direction)); 522 SCpnt->sc_data_direction);
521 } 523 }
522 outl (SCpnt->SCp.have_data_in, padapter->mb2); 524 outl (SCpnt->SCp.have_data_in, padapter->mb2);
523 outl (SCpnt->request_bufflen, padapter->mb3); 525 outl (SCpnt->request_bufflen, padapter->mb3);
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index f7a247defba6..48fdd406c075 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,7 +1,7 @@
1EXTRA_CFLAGS += -DUNIQUE_FW_NAME 1EXTRA_CFLAGS += -DUNIQUE_FW_NAME
2 2
3qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 3qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
4 qla_dbg.o qla_sup.o qla_rscn.o 4 qla_dbg.o qla_sup.o qla_rscn.o qla_attr.o
5 5
6qla2100-y := ql2100.o ql2100_fw.o 6qla2100-y := ql2100.o ql2100_fw.o
7qla2200-y := ql2200.o ql2200_fw.o 7qla2200-y := ql2200.o ql2200_fw.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
new file mode 100644
index 000000000000..2240a0cde583
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -0,0 +1,338 @@
1/*
2 * QLOGIC LINUX SOFTWARE
3 *
4 * QLogic ISP2x00 device driver for Linux 2.6.x
5 * Copyright (C) 2003-2005 QLogic Corporation
6 * (www.qlogic.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19#include "qla_def.h"
20
21#include <linux/vmalloc.h>
22#include <scsi/scsi_transport_fc.h>
23
24/* SYSFS attributes --------------------------------------------------------- */
25
26static ssize_t
27qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf, loff_t off,
28 size_t count)
29{
30 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
31 struct device, kobj)));
32
33 if (ha->fw_dump_reading == 0)
34 return 0;
35 if (off > ha->fw_dump_buffer_len)
36 return 0;
37 if (off + count > ha->fw_dump_buffer_len)
38 count = ha->fw_dump_buffer_len - off;
39
40 memcpy(buf, &ha->fw_dump_buffer[off], count);
41
42 return (count);
43}
44
45static ssize_t
46qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf, loff_t off,
47 size_t count)
48{
49 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
50 struct device, kobj)));
51 int reading;
52 uint32_t dump_size;
53
54 if (off != 0)
55 return (0);
56
57 reading = simple_strtol(buf, NULL, 10);
58 switch (reading) {
59 case 0:
60 if (ha->fw_dump_reading == 1) {
61 qla_printk(KERN_INFO, ha,
62 "Firmware dump cleared on (%ld).\n",
63 ha->host_no);
64
65 vfree(ha->fw_dump_buffer);
66 free_pages((unsigned long)ha->fw_dump,
67 ha->fw_dump_order);
68
69 ha->fw_dump_reading = 0;
70 ha->fw_dump_buffer = NULL;
71 ha->fw_dump = NULL;
72 }
73 break;
74 case 1:
75 if (ha->fw_dump != NULL && !ha->fw_dump_reading) {
76 ha->fw_dump_reading = 1;
77
78 dump_size = FW_DUMP_SIZE_1M;
79 if (ha->fw_memory_size < 0x20000)
80 dump_size = FW_DUMP_SIZE_128K;
81 else if (ha->fw_memory_size < 0x80000)
82 dump_size = FW_DUMP_SIZE_512K;
83 ha->fw_dump_buffer = (char *)vmalloc(dump_size);
84 if (ha->fw_dump_buffer == NULL) {
85 qla_printk(KERN_WARNING, ha,
86 "Unable to allocate memory for firmware "
87 "dump buffer (%d).\n", dump_size);
88
89 ha->fw_dump_reading = 0;
90 return (count);
91 }
92 qla_printk(KERN_INFO, ha,
93 "Firmware dump ready for read on (%ld).\n",
94 ha->host_no);
95 memset(ha->fw_dump_buffer, 0, dump_size);
96 if (IS_QLA2100(ha) || IS_QLA2200(ha))
97 qla2100_ascii_fw_dump(ha);
98 else
99 qla2300_ascii_fw_dump(ha);
100 ha->fw_dump_buffer_len = strlen(ha->fw_dump_buffer);
101 }
102 break;
103 }
104 return (count);
105}
106
107static struct bin_attribute sysfs_fw_dump_attr = {
108 .attr = {
109 .name = "fw_dump",
110 .mode = S_IRUSR | S_IWUSR,
111 .owner = THIS_MODULE,
112 },
113 .size = 0,
114 .read = qla2x00_sysfs_read_fw_dump,
115 .write = qla2x00_sysfs_write_fw_dump,
116};
117
118static ssize_t
119qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
120 size_t count)
121{
122 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
123 struct device, kobj)));
124 uint16_t *witer;
125 unsigned long flags;
126 uint16_t cnt;
127
128 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != sizeof(nvram_t))
129 return 0;
130
131 /* Read NVRAM. */
132 spin_lock_irqsave(&ha->hardware_lock, flags);
133 qla2x00_lock_nvram_access(ha);
134 witer = (uint16_t *)buf;
135 for (cnt = 0; cnt < count / 2; cnt++) {
136 *witer = cpu_to_le16(qla2x00_get_nvram_word(ha,
137 cnt+ha->nvram_base));
138 witer++;
139 }
140 qla2x00_unlock_nvram_access(ha);
141 spin_unlock_irqrestore(&ha->hardware_lock, flags);
142
143 return (count);
144}
145
146static ssize_t
147qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off,
148 size_t count)
149{
150 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
151 struct device, kobj)));
152 uint8_t *iter;
153 uint16_t *witer;
154 unsigned long flags;
155 uint16_t cnt;
156 uint8_t chksum;
157
158 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != sizeof(nvram_t))
159 return 0;
160
161 /* Checksum NVRAM. */
162 iter = (uint8_t *)buf;
163 chksum = 0;
164 for (cnt = 0; cnt < count - 1; cnt++)
165 chksum += *iter++;
166 chksum = ~chksum + 1;
167 *iter = chksum;
168
169 /* Write NVRAM. */
170 spin_lock_irqsave(&ha->hardware_lock, flags);
171 qla2x00_lock_nvram_access(ha);
172 qla2x00_release_nvram_protection(ha);
173 witer = (uint16_t *)buf;
174 for (cnt = 0; cnt < count / 2; cnt++) {
175 qla2x00_write_nvram_word(ha, cnt+ha->nvram_base,
176 cpu_to_le16(*witer));
177 witer++;
178 }
179 qla2x00_unlock_nvram_access(ha);
180 spin_unlock_irqrestore(&ha->hardware_lock, flags);
181
182 return (count);
183}
184
185static struct bin_attribute sysfs_nvram_attr = {
186 .attr = {
187 .name = "nvram",
188 .mode = S_IRUSR | S_IWUSR,
189 .owner = THIS_MODULE,
190 },
191 .size = sizeof(nvram_t),
192 .read = qla2x00_sysfs_read_nvram,
193 .write = qla2x00_sysfs_write_nvram,
194};
195
196void
197qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
198{
199 struct Scsi_Host *host = ha->host;
200
201 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
202 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
203}
204
205void
206qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
207{
208 struct Scsi_Host *host = ha->host;
209
210 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
211 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
212}
213
214/* Host attributes. */
215
216static void
217qla2x00_get_host_port_id(struct Scsi_Host *shost)
218{
219 scsi_qla_host_t *ha = to_qla_host(shost);
220
221 fc_host_port_id(shost) = ha->d_id.b.domain << 16 |
222 ha->d_id.b.area << 8 | ha->d_id.b.al_pa;
223}
224
225static void
226qla2x00_get_starget_node_name(struct scsi_target *starget)
227{
228 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
229 scsi_qla_host_t *ha = to_qla_host(host);
230 fc_port_t *fcport;
231 uint64_t node_name = 0;
232
233 list_for_each_entry(fcport, &ha->fcports, list) {
234 if (starget->id == fcport->os_target_id) {
235 node_name = *(uint64_t *)fcport->node_name;
236 break;
237 }
238 }
239
240 fc_starget_node_name(starget) = be64_to_cpu(node_name);
241}
242
243static void
244qla2x00_get_starget_port_name(struct scsi_target *starget)
245{
246 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
247 scsi_qla_host_t *ha = to_qla_host(host);
248 fc_port_t *fcport;
249 uint64_t port_name = 0;
250
251 list_for_each_entry(fcport, &ha->fcports, list) {
252 if (starget->id == fcport->os_target_id) {
253 port_name = *(uint64_t *)fcport->port_name;
254 break;
255 }
256 }
257
258 fc_starget_port_name(starget) = be64_to_cpu(port_name);
259}
260
261static void
262qla2x00_get_starget_port_id(struct scsi_target *starget)
263{
264 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
265 scsi_qla_host_t *ha = to_qla_host(host);
266 fc_port_t *fcport;
267 uint32_t port_id = ~0U;
268
269 list_for_each_entry(fcport, &ha->fcports, list) {
270 if (starget->id == fcport->os_target_id) {
271 port_id = fcport->d_id.b.domain << 16 |
272 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
273 break;
274 }
275 }
276
277 fc_starget_port_id(starget) = port_id;
278}
279
280static void
281qla2x00_get_rport_loss_tmo(struct fc_rport *rport)
282{
283 struct Scsi_Host *host = rport_to_shost(rport);
284 scsi_qla_host_t *ha = to_qla_host(host);
285
286 rport->dev_loss_tmo = ha->port_down_retry_count + 5;
287}
288
289static void
290qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
291{
292 struct Scsi_Host *host = rport_to_shost(rport);
293 scsi_qla_host_t *ha = to_qla_host(host);
294
295 if (timeout)
296 ha->port_down_retry_count = timeout;
297 else
298 ha->port_down_retry_count = 1;
299
300 rport->dev_loss_tmo = ha->port_down_retry_count + 5;
301}
302
303static struct fc_function_template qla2xxx_transport_functions = {
304
305 .show_host_node_name = 1,
306 .show_host_port_name = 1,
307 .get_host_port_id = qla2x00_get_host_port_id,
308 .show_host_port_id = 1,
309
310 .dd_fcrport_size = sizeof(struct fc_port *),
311
312 .get_starget_node_name = qla2x00_get_starget_node_name,
313 .show_starget_node_name = 1,
314 .get_starget_port_name = qla2x00_get_starget_port_name,
315 .show_starget_port_name = 1,
316 .get_starget_port_id = qla2x00_get_starget_port_id,
317 .show_starget_port_id = 1,
318
319 .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
320 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
321 .show_rport_dev_loss_tmo = 1,
322
323};
324
325struct scsi_transport_template *
326qla2x00_alloc_transport_tmpl(void)
327{
328 return (fc_attach_transport(&qla2xxx_transport_functions));
329}
330
331void
332qla2x00_init_host_attr(scsi_qla_host_t *ha)
333{
334 fc_host_node_name(ha->host) =
335 be64_to_cpu(*(uint64_t *)ha->init_cb->node_name);
336 fc_host_port_name(ha->host) =
337 be64_to_cpu(*(uint64_t *)ha->init_cb->port_name);
338}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 0e8ebbc56e81..c4cd4ac414c4 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1065,11 +1065,6 @@ qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
1065 printk(" sp flags=0x%x\n", sp->flags); 1065 printk(" sp flags=0x%x\n", sp->flags);
1066 printk(" r_start=0x%lx, u_start=0x%lx, f_start=0x%lx, state=%d\n", 1066 printk(" r_start=0x%lx, u_start=0x%lx, f_start=0x%lx, state=%d\n",
1067 sp->r_start, sp->u_start, sp->f_start, sp->state); 1067 sp->r_start, sp->u_start, sp->f_start, sp->state);
1068
1069 printk(" e_start= 0x%lx, ext_history=%d, fo retry=%d, loopid=%x, "
1070 "port path=%d\n", sp->e_start, sp->ext_history, sp->fo_retry_cnt,
1071 sp->lun_queue->fclun->fcport->loop_id,
1072 sp->lun_queue->fclun->fcport->cur_path);
1073} 1068}
1074 1069
1075#if defined(QL_DEBUG_ROUTINES) 1070#if defined(QL_DEBUG_ROUTINES)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 36ae03173a5e..7d47b8d92047 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -219,7 +219,7 @@
219/* 219/*
220 * Timeout timer counts in seconds 220 * Timeout timer counts in seconds
221 */ 221 */
222#define PORT_RETRY_TIME 2 222#define PORT_RETRY_TIME 1
223#define LOOP_DOWN_TIMEOUT 60 223#define LOOP_DOWN_TIMEOUT 60
224#define LOOP_DOWN_TIME 255 /* 240 */ 224#define LOOP_DOWN_TIME 255 /* 240 */
225#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30) 225#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
@@ -241,6 +241,7 @@ typedef struct srb {
241 struct list_head list; 241 struct list_head list;
242 242
243 struct scsi_qla_host *ha; /* HA the SP is queued on */ 243 struct scsi_qla_host *ha; /* HA the SP is queued on */
244 struct fc_port *fcport;
244 245
245 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 246 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
246 247
@@ -251,11 +252,6 @@ typedef struct srb {
251 /* Request state */ 252 /* Request state */
252 uint16_t state; 253 uint16_t state;
253 254
254 /* Target/LUN queue pointers. */
255 struct os_tgt *tgt_queue; /* ptr to visible ha's target */
256 struct os_lun *lun_queue; /* ptr to visible ha's lun */
257 struct fc_lun *fclun; /* FC LUN context pointer. */
258
259 /* Timing counts. */ 255 /* Timing counts. */
260 unsigned long e_start; /* Start of extend timeout */ 256 unsigned long e_start; /* Start of extend timeout */
261 unsigned long r_start; /* Start of request */ 257 unsigned long r_start; /* Start of request */
@@ -1603,73 +1599,6 @@ typedef struct {
1603} rpt_lun_cmd_rsp_t; 1599} rpt_lun_cmd_rsp_t;
1604 1600
1605/* 1601/*
1606 * SCSI Target Queue structure
1607 */
1608typedef struct os_tgt {
1609 struct os_lun *olun[MAX_LUNS]; /* LUN context pointer. */
1610 struct fc_port *fcport;
1611 unsigned long flags;
1612 uint8_t port_down_retry_count;
1613 uint32_t down_timer;
1614 struct scsi_qla_host *ha;
1615
1616 /* Persistent binding information */
1617 port_id_t d_id;
1618 uint8_t node_name[WWN_SIZE];
1619 uint8_t port_name[WWN_SIZE];
1620} os_tgt_t;
1621
1622/*
1623 * SCSI Target Queue flags
1624 */
1625#define TQF_ONLINE 0 /* Device online to OS. */
1626#define TQF_SUSPENDED 1
1627#define TQF_RETRY_CMDS 2
1628
1629/*
1630 * SCSI LUN Queue structure
1631 */
1632typedef struct os_lun {
1633 struct fc_lun *fclun; /* FC LUN context pointer. */
1634 spinlock_t q_lock; /* Lun Lock */
1635
1636 unsigned long q_flag;
1637#define LUN_MPIO_RESET_CNTS 1 /* Lun */
1638#define LUN_MPIO_BUSY 2 /* Lun is changing paths */
1639#define LUN_EXEC_DELAYED 7 /* Lun execution is delayed */
1640
1641 u_long q_timeout; /* total command timeouts */
1642 atomic_t q_timer; /* suspend timer */
1643 uint32_t q_count; /* current count */
1644 uint32_t q_max; /* maxmum count lun can be suspended */
1645 uint8_t q_state; /* lun State */
1646#define LUN_STATE_READY 1 /* lun is ready for i/o */
1647#define LUN_STATE_RUN 2 /* lun has a timer running */
1648#define LUN_STATE_WAIT 3 /* lun is suspended */
1649#define LUN_STATE_TIMEOUT 4 /* lun has timed out */
1650
1651 u_long io_cnt; /* total xfer count since boot */
1652 u_long out_cnt; /* total outstanding IO count */
1653 u_long w_cnt; /* total writes */
1654 u_long r_cnt; /* total reads */
1655 u_long avg_time; /* */
1656} os_lun_t;
1657
1658
1659/* LUN BitMask structure definition, array of 32bit words,
1660 * 1 bit per lun. When bit == 1, the lun is masked.
1661 * Most significant bit of mask[0] is lun 0, bit 24 is lun 7.
1662 */
1663typedef struct lun_bit_mask {
1664 /* Must allocate at least enough bits to accomodate all LUNs */
1665#if ((MAX_FIBRE_LUNS & 0x7) == 0)
1666 uint8_t mask[MAX_FIBRE_LUNS >> 3];
1667#else
1668 uint8_t mask[(MAX_FIBRE_LUNS + 8) >> 3];
1669#endif
1670} lun_bit_mask_t;
1671
1672/*
1673 * Fibre channel port type. 1602 * Fibre channel port type.
1674 */ 1603 */
1675 typedef enum { 1604 typedef enum {
@@ -1686,8 +1615,6 @@ typedef struct lun_bit_mask {
1686 */ 1615 */
1687typedef struct fc_port { 1616typedef struct fc_port {
1688 struct list_head list; 1617 struct list_head list;
1689 struct list_head fcluns;
1690
1691 struct scsi_qla_host *ha; 1618 struct scsi_qla_host *ha;
1692 struct scsi_qla_host *vis_ha; /* only used when suspending lun */ 1619 struct scsi_qla_host *vis_ha; /* only used when suspending lun */
1693 1620
@@ -1702,8 +1629,7 @@ typedef struct fc_port {
1702 atomic_t state; 1629 atomic_t state;
1703 uint32_t flags; 1630 uint32_t flags;
1704 1631
1705 os_tgt_t *tgt_queue; 1632 unsigned int os_target_id;
1706 uint16_t os_target_id;
1707 1633
1708 uint16_t iodesc_idx_sent; 1634 uint16_t iodesc_idx_sent;
1709 1635
@@ -1717,7 +1643,7 @@ typedef struct fc_port {
1717 uint8_t mp_byte; /* multi-path byte (not used) */ 1643 uint8_t mp_byte; /* multi-path byte (not used) */
1718 uint8_t cur_path; /* current path id */ 1644 uint8_t cur_path; /* current path id */
1719 1645
1720 lun_bit_mask_t lun_mask; 1646 struct fc_rport *rport;
1721} fc_port_t; 1647} fc_port_t;
1722 1648
1723/* 1649/*
@@ -1764,25 +1690,6 @@ typedef struct fc_port {
1764#define FC_NO_LOOP_ID 0x1000 1690#define FC_NO_LOOP_ID 0x1000
1765 1691
1766/* 1692/*
1767 * Fibre channel LUN structure.
1768 */
1769typedef struct fc_lun {
1770 struct list_head list;
1771
1772 fc_port_t *fcport;
1773 fc_port_t *o_fcport;
1774 uint16_t lun;
1775 atomic_t state;
1776 uint8_t device_type;
1777
1778 uint8_t max_path_retries;
1779 uint32_t flags;
1780} fc_lun_t;
1781
1782#define FLF_VISIBLE_LUN BIT_0
1783#define FLF_ACTIVE_LUN BIT_1
1784
1785/*
1786 * FC-CT interface 1693 * FC-CT interface
1787 * 1694 *
1788 * NOTE: All structures are big-endian in form. 1695 * NOTE: All structures are big-endian in form.
@@ -2175,27 +2082,6 @@ typedef struct scsi_qla_host {
2175 uint32_t current_outstanding_cmd; 2082 uint32_t current_outstanding_cmd;
2176 srb_t *status_srb; /* Status continuation entry. */ 2083 srb_t *status_srb; /* Status continuation entry. */
2177 2084
2178 /*
2179 * Need to hold the list_lock with irq's disabled in order to access
2180 * the following list.
2181 *
2182 * This list_lock is of lower priority than the host_lock.
2183 */
2184 spinlock_t list_lock ____cacheline_aligned;
2185 /* lock to guard lists which
2186 * hold srb_t's */
2187 struct list_head retry_queue; /* watchdog queue */
2188 struct list_head done_queue; /* job on done queue */
2189 struct list_head failover_queue; /* failover list link. */
2190 struct list_head scsi_retry_queue; /* SCSI retry queue */
2191 struct list_head pending_queue; /* SCSI command pending queue */
2192
2193 unsigned long done_q_cnt;
2194 unsigned long pending_in_q;
2195 uint32_t retry_q_cnt;
2196 uint32_t scsi_retry_q_cnt;
2197 uint32_t failover_cnt;
2198
2199 unsigned long last_irq_cpu; /* cpu where we got our last irq */ 2085 unsigned long last_irq_cpu; /* cpu where we got our last irq */
2200 2086
2201 uint16_t revision; 2087 uint16_t revision;
@@ -2273,9 +2159,6 @@ typedef struct scsi_qla_host {
2273 struct io_descriptor io_descriptors[MAX_IO_DESCRIPTORS]; 2159 struct io_descriptor io_descriptors[MAX_IO_DESCRIPTORS];
2274 uint16_t iodesc_signature; 2160 uint16_t iodesc_signature;
2275 2161
2276 /* OS target queue pointers. */
2277 os_tgt_t *otgt[MAX_FIBRE_DEVICES];
2278
2279 /* RSCN queue. */ 2162 /* RSCN queue. */
2280 uint32_t rscn_queue[MAX_RSCN_COUNT]; 2163 uint32_t rscn_queue[MAX_RSCN_COUNT];
2281 uint8_t rscn_in_ptr; 2164 uint8_t rscn_in_ptr;
@@ -2420,8 +2303,6 @@ typedef struct scsi_qla_host {
2420#define LOOP_RDY(ha) (!LOOP_NOT_READY(ha)) 2303#define LOOP_RDY(ha) (!LOOP_NOT_READY(ha))
2421 2304
2422#define TGT_Q(ha, t) (ha->otgt[t]) 2305#define TGT_Q(ha, t) (ha->otgt[t])
2423#define LUN_Q(ha, t, l) (TGT_Q(ha, t)->olun[l])
2424#define GET_LU_Q(ha, t, l) ((TGT_Q(ha,t) != NULL)? TGT_Q(ha, t)->olun[l] : NULL)
2425 2306
2426#define to_qla_host(x) ((scsi_qla_host_t *) (x)->hostdata) 2307#define to_qla_host(x) ((scsi_qla_host_t *) (x)->hostdata)
2427 2308
@@ -2479,7 +2360,6 @@ struct _qla2x00stats {
2479#include "qla_gbl.h" 2360#include "qla_gbl.h"
2480#include "qla_dbg.h" 2361#include "qla_dbg.h"
2481#include "qla_inline.h" 2362#include "qla_inline.h"
2482#include "qla_listops.h"
2483 2363
2484/* 2364/*
2485* String arrays 2365* String arrays
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 5adf2af7ba64..e4bfe4d5bbe4 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -24,6 +24,7 @@
24#define __QLA_GBL_H 24#define __QLA_GBL_H
25 25
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <scsi/scsi_transport.h>
27 28
28extern void qla2x00_remove_one(struct pci_dev *); 29extern void qla2x00_remove_one(struct pci_dev *);
29extern int qla2x00_probe_one(struct pci_dev *, struct qla_board_info *); 30extern int qla2x00_probe_one(struct pci_dev *, struct qla_board_info *);
@@ -44,10 +45,10 @@ extern void qla2x00_restart_queues(scsi_qla_host_t *, uint8_t);
44 45
45extern void qla2x00_rescan_fcports(scsi_qla_host_t *); 46extern void qla2x00_rescan_fcports(scsi_qla_host_t *);
46 47
47extern void qla2x00_tgt_free(scsi_qla_host_t *ha, uint16_t t);
48
49extern int qla2x00_abort_isp(scsi_qla_host_t *); 48extern int qla2x00_abort_isp(scsi_qla_host_t *);
50 49
50extern void qla2x00_reg_remote_port(scsi_qla_host_t *, fc_port_t *);
51
51/* 52/*
52 * Global Data in qla_os.c source file. 53 * Global Data in qla_os.c source file.
53 */ 54 */
@@ -74,25 +75,15 @@ extern int ql2xsuspendcount;
74#if defined(MODULE) 75#if defined(MODULE)
75extern char *ql2xopts; 76extern char *ql2xopts;
76#endif 77#endif
78extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
77 79
78extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); 80extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
79 81
80extern void qla2x00_cmd_timeout(srb_t *); 82extern void qla2x00_cmd_timeout(srb_t *);
81 83
82extern int __qla2x00_suspend_lun(scsi_qla_host_t *, os_lun_t *, int, int, int);
83
84extern void qla2x00_done(scsi_qla_host_t *);
85extern void qla2x00_next(scsi_qla_host_t *);
86extern void qla2x00_flush_failover_q(scsi_qla_host_t *, os_lun_t *);
87extern void qla2x00_reset_lun_fo_counts(scsi_qla_host_t *, os_lun_t *);
88
89extern void qla2x00_extend_timeout(struct scsi_cmnd *, int);
90
91extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int); 84extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
92extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *); 85extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
93 86
94extern void qla2x00_abort_queues(scsi_qla_host_t *, uint8_t);
95
96extern void qla2x00_blink_led(scsi_qla_host_t *); 87extern void qla2x00_blink_led(scsi_qla_host_t *);
97 88
98extern int qla2x00_down_timeout(struct semaphore *, unsigned long); 89extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
@@ -150,7 +141,7 @@ qla2x00_abort_target(fc_port_t *fcport);
150#endif 141#endif
151 142
152extern int 143extern int
153qla2x00_target_reset(scsi_qla_host_t *, uint16_t, uint16_t); 144qla2x00_target_reset(scsi_qla_host_t *, struct fc_port *);
154 145
155extern int 146extern int
156qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 147qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -254,4 +245,13 @@ extern void qla2x00_cancel_io_descriptors(scsi_qla_host_t *);
254#define qla2x00_alloc_ioctl_mem(ha) (0) 245#define qla2x00_alloc_ioctl_mem(ha) (0)
255#define qla2x00_free_ioctl_mem(ha) do { } while (0) 246#define qla2x00_free_ioctl_mem(ha) do { } while (0)
256 247
248/*
249 * Global Function Prototypes in qla_attr.c source file.
250 */
251extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
252extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
253extern struct scsi_transport_template *qla2x00_alloc_transport_tmpl(void);
254extern void qla2x00_init_host_attr(scsi_qla_host_t *);
255extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
256extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
257#endif /* _QLA_GBL_H */ 257#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1ab5d92c3868..0387005fcb6d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -19,6 +19,7 @@
19#include "qla_def.h" 19#include "qla_def.h"
20 20
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <scsi/scsi_transport_fc.h>
22 23
23#include "qla_devtbl.h" 24#include "qla_devtbl.h"
24 25
@@ -44,34 +45,17 @@ static int qla2x00_init_rings(scsi_qla_host_t *);
44static int qla2x00_fw_ready(scsi_qla_host_t *); 45static int qla2x00_fw_ready(scsi_qla_host_t *);
45static int qla2x00_configure_hba(scsi_qla_host_t *); 46static int qla2x00_configure_hba(scsi_qla_host_t *);
46static int qla2x00_nvram_config(scsi_qla_host_t *); 47static int qla2x00_nvram_config(scsi_qla_host_t *);
47static void qla2x00_init_tgt_map(scsi_qla_host_t *);
48static int qla2x00_configure_loop(scsi_qla_host_t *); 48static int qla2x00_configure_loop(scsi_qla_host_t *);
49static int qla2x00_configure_local_loop(scsi_qla_host_t *); 49static int qla2x00_configure_local_loop(scsi_qla_host_t *);
50static void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); 50static void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
51static void qla2x00_lun_discovery(scsi_qla_host_t *, fc_port_t *);
52static int qla2x00_rpt_lun_discovery(scsi_qla_host_t *, fc_port_t *,
53 inq_cmd_rsp_t *, dma_addr_t);
54static int qla2x00_report_lun(scsi_qla_host_t *, fc_port_t *);
55static fc_lun_t *qla2x00_cfg_lun(scsi_qla_host_t *, fc_port_t *, uint16_t,
56 inq_cmd_rsp_t *, dma_addr_t);
57static fc_lun_t * qla2x00_add_lun(fc_port_t *, uint16_t);
58static int qla2x00_inquiry(scsi_qla_host_t *, fc_port_t *, uint16_t,
59 inq_cmd_rsp_t *, dma_addr_t);
60static int qla2x00_configure_fabric(scsi_qla_host_t *); 51static int qla2x00_configure_fabric(scsi_qla_host_t *);
61static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); 52static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
62static int qla2x00_device_resync(scsi_qla_host_t *); 53static int qla2x00_device_resync(scsi_qla_host_t *);
63static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, 54static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
64 uint16_t *); 55 uint16_t *);
65static void qla2x00_config_os(scsi_qla_host_t *ha);
66static uint16_t qla2x00_fcport_bind(scsi_qla_host_t *ha, fc_port_t *fcport);
67static os_lun_t * qla2x00_fclun_bind(scsi_qla_host_t *, fc_port_t *,
68 fc_lun_t *);
69static void qla2x00_lun_free(scsi_qla_host_t *, uint16_t, uint16_t);
70 56
71static int qla2x00_restart_isp(scsi_qla_host_t *); 57static int qla2x00_restart_isp(scsi_qla_host_t *);
72static void qla2x00_reset_adapter(scsi_qla_host_t *); 58static void qla2x00_reset_adapter(scsi_qla_host_t *);
73static os_tgt_t *qla2x00_tgt_alloc(scsi_qla_host_t *, uint16_t);
74static os_lun_t *qla2x00_lun_alloc(scsi_qla_host_t *, uint16_t, uint16_t);
75 59
76/****************************************************************************/ 60/****************************************************************************/
77/* QLogic ISP2x00 Hardware Support Functions. */ 61/* QLogic ISP2x00 Hardware Support Functions. */
@@ -119,9 +103,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
119 103
120 qla2x00_reset_chip(ha); 104 qla2x00_reset_chip(ha);
121 105
122 /* Initialize target map database. */
123 qla2x00_init_tgt_map(ha);
124
125 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 106 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
126 qla2x00_nvram_config(ha); 107 qla2x00_nvram_config(ha);
127 108
@@ -1529,25 +1510,6 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1529 return (rval); 1510 return (rval);
1530} 1511}
1531 1512
1532/*
1533* qla2x00_init_tgt_map
1534* Initializes target map.
1535*
1536* Input:
1537* ha = adapter block pointer.
1538*
1539* Output:
1540* TGT_Q initialized
1541*/
1542static void
1543qla2x00_init_tgt_map(scsi_qla_host_t *ha)
1544{
1545 uint32_t t;
1546
1547 for (t = 0; t < MAX_TARGETS; t++)
1548 TGT_Q(ha, t) = (os_tgt_t *)NULL;
1549}
1550
1551/** 1513/**
1552 * qla2x00_alloc_fcport() - Allocate a generic fcport. 1514 * qla2x00_alloc_fcport() - Allocate a generic fcport.
1553 * @ha: HA context 1515 * @ha: HA context
@@ -1572,7 +1534,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags)
1572 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX; 1534 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
1573 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1535 atomic_set(&fcport->state, FCS_UNCONFIGURED);
1574 fcport->flags = FCF_RLC_SUPPORT; 1536 fcport->flags = FCF_RLC_SUPPORT;
1575 INIT_LIST_HEAD(&fcport->fcluns);
1576 1537
1577 return (fcport); 1538 return (fcport);
1578} 1539}
@@ -1662,7 +1623,6 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
1662 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 1623 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
1663 rval = QLA_FUNCTION_FAILED; 1624 rval = QLA_FUNCTION_FAILED;
1664 } else { 1625 } else {
1665 qla2x00_config_os(ha);
1666 atomic_set(&ha->loop_state, LOOP_READY); 1626 atomic_set(&ha->loop_state, LOOP_READY);
1667 1627
1668 DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no)); 1628 DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no));
@@ -1907,8 +1867,11 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
1907 if (fcport->flags & FCF_TAPE_PRESENT) { 1867 if (fcport->flags & FCF_TAPE_PRESENT) {
1908 spin_lock_irqsave(&ha->hardware_lock, flags); 1868 spin_lock_irqsave(&ha->hardware_lock, flags);
1909 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 1869 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1870 fc_port_t *sfcp;
1871
1910 if ((sp = ha->outstanding_cmds[index]) != 0) { 1872 if ((sp = ha->outstanding_cmds[index]) != 0) {
1911 if (sp->fclun->fcport == fcport) { 1873 sfcp = sp->fcport;
1874 if (sfcp == fcport) {
1912 atomic_set(&fcport->state, FCS_ONLINE); 1875 atomic_set(&fcport->state, FCS_ONLINE);
1913 spin_unlock_irqrestore( 1876 spin_unlock_irqrestore(
1914 &ha->hardware_lock, flags); 1877 &ha->hardware_lock, flags);
@@ -1919,423 +1882,48 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
1919 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1882 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1920 } 1883 }
1921 1884
1922 /* Do LUN discovery. */
1923 if (fcport->port_type == FCT_INITIATOR || 1885 if (fcport->port_type == FCT_INITIATOR ||
1924 fcport->port_type == FCT_BROADCAST) { 1886 fcport->port_type == FCT_BROADCAST)
1925 fcport->device_type = TYPE_PROCESSOR; 1887 fcport->device_type = TYPE_PROCESSOR;
1926 } else {
1927 qla2x00_lun_discovery(ha, fcport);
1928 }
1929 atomic_set(&fcport->state, FCS_ONLINE);
1930}
1931 1888
1932/*
1933 * qla2x00_lun_discovery
1934 * Issue SCSI inquiry command for LUN discovery.
1935 *
1936 * Input:
1937 * ha: adapter state pointer.
1938 * fcport: FC port structure pointer.
1939 *
1940 * Context:
1941 * Kernel context.
1942 */
1943static void
1944qla2x00_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport)
1945{
1946 inq_cmd_rsp_t *inq;
1947 dma_addr_t inq_dma;
1948 uint16_t lun;
1949
1950 inq = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &inq_dma);
1951 if (inq == NULL) {
1952 qla_printk(KERN_WARNING, ha,
1953 "Memory Allocation failed - INQ\n");
1954 return;
1955 }
1956
1957 /* Always add a fc_lun_t structure for lun 0 -- mid-layer requirement */
1958 qla2x00_add_lun(fcport, 0);
1959
1960 /* If report LUN works, exit. */
1961 if (qla2x00_rpt_lun_discovery(ha, fcport, inq, inq_dma) !=
1962 QLA_SUCCESS) {
1963 for (lun = 0; lun < ha->max_probe_luns; lun++) {
1964 /* Configure LUN. */
1965 qla2x00_cfg_lun(ha, fcport, lun, inq, inq_dma);
1966 }
1967 }
1968
1969 dma_pool_free(ha->s_dma_pool, inq, inq_dma);
1970}
1971
1972/*
1973 * qla2x00_rpt_lun_discovery
1974 * Issue SCSI report LUN command for LUN discovery.
1975 *
1976 * Input:
1977 * ha: adapter state pointer.
1978 * fcport: FC port structure pointer.
1979 *
1980 * Returns:
1981 * qla2x00 local function return status code.
1982 *
1983 * Context:
1984 * Kernel context.
1985 */
1986static int
1987qla2x00_rpt_lun_discovery(scsi_qla_host_t *ha, fc_port_t *fcport,
1988 inq_cmd_rsp_t *inq, dma_addr_t inq_dma)
1989{
1990 int rval;
1991 uint32_t len, cnt;
1992 uint16_t lun;
1993
1994 /* Assume a failed status */
1995 rval = QLA_FUNCTION_FAILED;
1996
1997 /* No point in continuing if the device doesn't support RLC */
1998 if ((fcport->flags & FCF_RLC_SUPPORT) == 0)
1999 return (rval);
2000
2001 rval = qla2x00_report_lun(ha, fcport);
2002 if (rval != QLA_SUCCESS)
2003 return (rval);
2004
2005 /* Configure LUN list. */
2006 len = be32_to_cpu(ha->rlc_rsp->list.hdr.len);
2007 len /= 8;
2008 for (cnt = 0; cnt < len; cnt++) {
2009 lun = CHAR_TO_SHORT(ha->rlc_rsp->list.lst[cnt].lsb,
2010 ha->rlc_rsp->list.lst[cnt].msb.b);
2011
2012 DEBUG3(printk("scsi(%ld): RLC lun = (%d)\n", ha->host_no, lun));
2013
2014 /* We only support 0 through MAX_LUNS-1 range */
2015 if (lun < MAX_LUNS) {
2016 qla2x00_cfg_lun(ha, fcport, lun, inq, inq_dma);
2017 }
2018 }
2019 atomic_set(&fcport->state, FCS_ONLINE); 1889 atomic_set(&fcport->state, FCS_ONLINE);
2020 1890
2021 return (rval); 1891 if (ha->flags.init_done)
2022} 1892 qla2x00_reg_remote_port(ha, fcport);
2023
2024/*
2025 * qla2x00_report_lun
2026 * Issue SCSI report LUN command.
2027 *
2028 * Input:
2029 * ha: adapter state pointer.
2030 * fcport: FC port structure pointer.
2031 *
2032 * Returns:
2033 * qla2x00 local function return status code.
2034 *
2035 * Context:
2036 * Kernel context.
2037 */
2038static int
2039qla2x00_report_lun(scsi_qla_host_t *ha, fc_port_t *fcport)
2040{
2041 int rval;
2042 uint16_t retries;
2043 uint16_t comp_status;
2044 uint16_t scsi_status;
2045 rpt_lun_cmd_rsp_t *rlc;
2046 dma_addr_t rlc_dma;
2047
2048 rval = QLA_FUNCTION_FAILED;
2049 rlc = ha->rlc_rsp;
2050 rlc_dma = ha->rlc_rsp_dma;
2051
2052 for (retries = 3; retries; retries--) {
2053 memset(rlc, 0, sizeof(rpt_lun_cmd_rsp_t));
2054 rlc->p.cmd.entry_type = COMMAND_A64_TYPE;
2055 rlc->p.cmd.entry_count = 1;
2056 SET_TARGET_ID(ha, rlc->p.cmd.target, fcport->loop_id);
2057 rlc->p.cmd.control_flags =
2058 __constant_cpu_to_le16(CF_READ | CF_SIMPLE_TAG);
2059 rlc->p.cmd.scsi_cdb[0] = REPORT_LUNS;
2060 rlc->p.cmd.scsi_cdb[8] = MSB(sizeof(rpt_lun_lst_t));
2061 rlc->p.cmd.scsi_cdb[9] = LSB(sizeof(rpt_lun_lst_t));
2062 rlc->p.cmd.dseg_count = __constant_cpu_to_le16(1);
2063 rlc->p.cmd.timeout = __constant_cpu_to_le16(10);
2064 rlc->p.cmd.byte_count =
2065 __constant_cpu_to_le32(sizeof(rpt_lun_lst_t));
2066 rlc->p.cmd.dseg_0_address[0] = cpu_to_le32(
2067 LSD(rlc_dma + sizeof(sts_entry_t)));
2068 rlc->p.cmd.dseg_0_address[1] = cpu_to_le32(
2069 MSD(rlc_dma + sizeof(sts_entry_t)));
2070 rlc->p.cmd.dseg_0_length =
2071 __constant_cpu_to_le32(sizeof(rpt_lun_lst_t));
2072
2073 rval = qla2x00_issue_iocb(ha, rlc, rlc_dma,
2074 sizeof(rpt_lun_cmd_rsp_t));
2075
2076 comp_status = le16_to_cpu(rlc->p.rsp.comp_status);
2077 scsi_status = le16_to_cpu(rlc->p.rsp.scsi_status);
2078
2079 if (rval != QLA_SUCCESS || comp_status != CS_COMPLETE ||
2080 scsi_status & SS_CHECK_CONDITION) {
2081
2082 /* Device underrun, treat as OK. */
2083 if (rval == QLA_SUCCESS &&
2084 comp_status == CS_DATA_UNDERRUN &&
2085 scsi_status & SS_RESIDUAL_UNDER) {
2086
2087 rval = QLA_SUCCESS;
2088 break;
2089 }
2090
2091 DEBUG(printk("scsi(%ld): RLC failed to issue iocb! "
2092 "fcport=[%04x/%p] rval=%x cs=%x ss=%x\n",
2093 ha->host_no, fcport->loop_id, fcport, rval,
2094 comp_status, scsi_status));
2095
2096 rval = QLA_FUNCTION_FAILED;
2097 if (scsi_status & SS_CHECK_CONDITION) {
2098 DEBUG2(printk("scsi(%ld): RLC "
2099 "SS_CHECK_CONDITION Sense Data "
2100 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
2101 ha->host_no,
2102 rlc->p.rsp.req_sense_data[0],
2103 rlc->p.rsp.req_sense_data[1],
2104 rlc->p.rsp.req_sense_data[2],
2105 rlc->p.rsp.req_sense_data[3],
2106 rlc->p.rsp.req_sense_data[4],
2107 rlc->p.rsp.req_sense_data[5],
2108 rlc->p.rsp.req_sense_data[6],
2109 rlc->p.rsp.req_sense_data[7]));
2110 if (rlc->p.rsp.req_sense_data[2] ==
2111 ILLEGAL_REQUEST) {
2112 fcport->flags &= ~(FCF_RLC_SUPPORT);
2113 break;
2114 }
2115 }
2116 } else {
2117 break;
2118 }
2119 }
2120
2121 return (rval);
2122} 1893}
2123 1894
2124/* 1895void
2125 * qla2x00_cfg_lun 1896qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2126 * Configures LUN into fcport LUN list.
2127 *
2128 * Input:
2129 * fcport: FC port structure pointer.
2130 * lun: LUN number.
2131 *
2132 * Context:
2133 * Kernel context.
2134 */
2135static fc_lun_t *
2136qla2x00_cfg_lun(scsi_qla_host_t *ha, fc_port_t *fcport, uint16_t lun,
2137 inq_cmd_rsp_t *inq, dma_addr_t inq_dma)
2138{
2139 fc_lun_t *fclun;
2140 uint8_t device_type;
2141
2142 /* Bypass LUNs that failed. */
2143 if (qla2x00_inquiry(ha, fcport, lun, inq, inq_dma) != QLA_SUCCESS) {
2144 DEBUG2(printk("scsi(%ld): Failed inquiry - loop id=0x%04x "
2145 "lun=%d\n", ha->host_no, fcport->loop_id, lun));
2146
2147 return (NULL);
2148 }
2149 device_type = (inq->inq[0] & 0x1f);
2150 switch (device_type) {
2151 case TYPE_DISK:
2152 case TYPE_PROCESSOR:
2153 case TYPE_WORM:
2154 case TYPE_ROM:
2155 case TYPE_SCANNER:
2156 case TYPE_MOD:
2157 case TYPE_MEDIUM_CHANGER:
2158 case TYPE_ENCLOSURE:
2159 case 0x20:
2160 case 0x0C:
2161 break;
2162 case TYPE_TAPE:
2163 fcport->flags |= FCF_TAPE_PRESENT;
2164 break;
2165 default:
2166 DEBUG2(printk("scsi(%ld): Unsupported lun type -- "
2167 "loop id=0x%04x lun=%d type=%x\n",
2168 ha->host_no, fcport->loop_id, lun, device_type));
2169 return (NULL);
2170 }
2171
2172 fcport->device_type = device_type;
2173 fclun = qla2x00_add_lun(fcport, lun);
2174
2175 if (fclun != NULL) {
2176 atomic_set(&fcport->state, FCS_ONLINE);
2177 }
2178
2179 return (fclun);
2180}
2181
2182/*
2183 * qla2x00_add_lun
2184 * Adds LUN to database
2185 *
2186 * Input:
2187 * fcport: FC port structure pointer.
2188 * lun: LUN number.
2189 *
2190 * Context:
2191 * Kernel context.
2192 */
2193static fc_lun_t *
2194qla2x00_add_lun(fc_port_t *fcport, uint16_t lun)
2195{ 1897{
2196 int found; 1898 struct fc_rport_identifiers rport_ids;
2197 fc_lun_t *fclun; 1899 struct fc_rport *rport;
2198 1900
2199 if (fcport == NULL) { 1901 if (fcport->rport) {
2200 DEBUG(printk("scsi: Unable to add lun to NULL port\n")); 1902 fc_remote_port_unblock(fcport->rport);
2201 return (NULL); 1903 return;
2202 }
2203
2204 /* Allocate LUN if not already allocated. */
2205 found = 0;
2206 list_for_each_entry(fclun, &fcport->fcluns, list) {
2207 if (fclun->lun == lun) {
2208 found++;
2209 break;
2210 }
2211 }
2212 if (found)
2213 return (NULL);
2214
2215 fclun = kmalloc(sizeof(fc_lun_t), GFP_ATOMIC);
2216 if (fclun == NULL) {
2217 printk(KERN_WARNING
2218 "%s(): Memory Allocation failed - FCLUN\n",
2219 __func__);
2220 return (NULL);
2221 } 1904 }
2222 1905
2223 /* Setup LUN structure. */ 1906 rport_ids.node_name = be64_to_cpu(*(uint64_t *)fcport->node_name);
2224 memset(fclun, 0, sizeof(fc_lun_t)); 1907 rport_ids.port_name = be64_to_cpu(*(uint64_t *)fcport->port_name);
2225 fclun->lun = lun; 1908 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2226 fclun->fcport = fcport; 1909 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2227 fclun->o_fcport = fcport; 1910 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2228 fclun->device_type = fcport->device_type; 1911 if (fcport->port_type == FCT_INITIATOR)
2229 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1912 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2230 1913 if (fcport->port_type == FCT_TARGET)
2231 list_add_tail(&fclun->list, &fcport->fcluns); 1914 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2232
2233 return (fclun);
2234}
2235
2236/*
2237 * qla2x00_inquiry
2238 * Issue SCSI inquiry command.
2239 *
2240 * Input:
2241 * ha = adapter block pointer.
2242 * fcport = FC port structure pointer.
2243 *
2244 * Return:
2245 * 0 - Success
2246 * BIT_0 - error
2247 *
2248 * Context:
2249 * Kernel context.
2250 */
2251static int
2252qla2x00_inquiry(scsi_qla_host_t *ha,
2253 fc_port_t *fcport, uint16_t lun, inq_cmd_rsp_t *inq, dma_addr_t inq_dma)
2254{
2255 int rval;
2256 uint16_t retries;
2257 uint16_t comp_status;
2258 uint16_t scsi_status;
2259
2260 rval = QLA_FUNCTION_FAILED;
2261 1915
2262 for (retries = 3; retries; retries--) { 1916 fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
2263 memset(inq, 0, sizeof(inq_cmd_rsp_t)); 1917 if (!rport)
2264 inq->p.cmd.entry_type = COMMAND_A64_TYPE; 1918 qla_printk(KERN_WARNING, ha,
2265 inq->p.cmd.entry_count = 1; 1919 "Unable to allocate fc remote port!\n");
2266 inq->p.cmd.lun = cpu_to_le16(lun);
2267 SET_TARGET_ID(ha, inq->p.cmd.target, fcport->loop_id);
2268 inq->p.cmd.control_flags =
2269 __constant_cpu_to_le16(CF_READ | CF_SIMPLE_TAG);
2270 inq->p.cmd.scsi_cdb[0] = INQUIRY;
2271 inq->p.cmd.scsi_cdb[4] = INQ_DATA_SIZE;
2272 inq->p.cmd.dseg_count = __constant_cpu_to_le16(1);
2273 inq->p.cmd.timeout = __constant_cpu_to_le16(10);
2274 inq->p.cmd.byte_count =
2275 __constant_cpu_to_le32(INQ_DATA_SIZE);
2276 inq->p.cmd.dseg_0_address[0] = cpu_to_le32(
2277 LSD(inq_dma + sizeof(sts_entry_t)));
2278 inq->p.cmd.dseg_0_address[1] = cpu_to_le32(
2279 MSD(inq_dma + sizeof(sts_entry_t)));
2280 inq->p.cmd.dseg_0_length =
2281 __constant_cpu_to_le32(INQ_DATA_SIZE);
2282
2283 DEBUG5(printk("scsi(%ld): Lun Inquiry - fcport=[%04x/%p],"
2284 " lun (%d)\n",
2285 ha->host_no, fcport->loop_id, fcport, lun));
2286
2287 rval = qla2x00_issue_iocb(ha, inq, inq_dma,
2288 sizeof(inq_cmd_rsp_t));
2289
2290 comp_status = le16_to_cpu(inq->p.rsp.comp_status);
2291 scsi_status = le16_to_cpu(inq->p.rsp.scsi_status);
2292
2293 DEBUG5(printk("scsi(%ld): lun (%d) inquiry - "
2294 "inq[0]= 0x%x, comp status 0x%x, scsi status 0x%x, "
2295 "rval=%d\n",
2296 ha->host_no, lun, inq->inq[0], comp_status, scsi_status,
2297 rval));
2298
2299 if (rval != QLA_SUCCESS || comp_status != CS_COMPLETE ||
2300 scsi_status & SS_CHECK_CONDITION) {
2301
2302 DEBUG(printk("scsi(%ld): INQ failed to issue iocb! "
2303 "fcport=[%04x/%p] rval=%x cs=%x ss=%x\n",
2304 ha->host_no, fcport->loop_id, fcport, rval,
2305 comp_status, scsi_status));
2306
2307 if (rval == QLA_SUCCESS)
2308 rval = QLA_FUNCTION_FAILED;
2309
2310 if (scsi_status & SS_CHECK_CONDITION) {
2311 DEBUG2(printk("scsi(%ld): INQ "
2312 "SS_CHECK_CONDITION Sense Data "
2313 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
2314 ha->host_no,
2315 inq->p.rsp.req_sense_data[0],
2316 inq->p.rsp.req_sense_data[1],
2317 inq->p.rsp.req_sense_data[2],
2318 inq->p.rsp.req_sense_data[3],
2319 inq->p.rsp.req_sense_data[4],
2320 inq->p.rsp.req_sense_data[5],
2321 inq->p.rsp.req_sense_data[6],
2322 inq->p.rsp.req_sense_data[7]));
2323 }
2324 1920
2325 /* Device underrun drop LUN. */ 1921 if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
2326 if (comp_status == CS_DATA_UNDERRUN && 1922 fcport->os_target_id = rport->scsi_target_id;
2327 scsi_status & SS_RESIDUAL_UNDER) {
2328 break;
2329 }
2330 } else {
2331 break;
2332 }
2333 }
2334 1923
2335 return (rval); 1924 rport->dd_data = fcport;
2336} 1925}
2337 1926
2338
2339/* 1927/*
2340 * qla2x00_configure_fabric 1928 * qla2x00_configure_fabric
2341 * Setup SNS devices with loop ID's. 1929 * Setup SNS devices with loop ID's.
@@ -2486,12 +2074,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2486 break; 2074 break;
2487 } 2075 }
2488 2076
2489 /* Login and update database */
2490 qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
2491
2492 /* Remove device from the new list and add it to DB */ 2077 /* Remove device from the new list and add it to DB */
2493 list_del(&fcport->list); 2078 list_del(&fcport->list);
2494 list_add_tail(&fcport->list, &ha->fcports); 2079 list_add_tail(&fcport->list, &ha->fcports);
2080
2081 /* Login and update database */
2082 qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
2495 } 2083 }
2496 } while (0); 2084 } while (0);
2497 2085
@@ -2895,8 +2483,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2895 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2483 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2896 if (format != 3 || 2484 if (format != 3 ||
2897 fcport->port_type != FCT_INITIATOR) { 2485 fcport->port_type != FCT_INITIATOR) {
2898 atomic_set(&fcport->state, 2486 qla2x00_mark_device_lost(ha, fcport, 0);
2899 FCS_DEVICE_LOST);
2900 } 2487 }
2901 } 2488 }
2902 fcport->flags &= ~FCF_FARP_DONE; 2489 fcport->flags &= ~FCF_FARP_DONE;
@@ -3146,7 +2733,6 @@ qla2x00_loop_resync(scsi_qla_host_t *ha)
3146 wait_time && 2733 wait_time &&
3147 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 2734 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)));
3148 } 2735 }
3149 qla2x00_restart_queues(ha, 1);
3150 } 2736 }
3151 2737
3152 if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2738 if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
@@ -3160,87 +2746,6 @@ qla2x00_loop_resync(scsi_qla_host_t *ha)
3160 return (rval); 2746 return (rval);
3161} 2747}
3162 2748
3163/*
3164 * qla2x00_restart_queues
3165 * Restart device queues.
3166 *
3167 * Input:
3168 * ha = adapter block pointer.
3169 *
3170 * Context:
3171 * Kernel/Interrupt context.
3172 */
3173void
3174qla2x00_restart_queues(scsi_qla_host_t *ha, uint8_t flush)
3175{
3176 srb_t *sp;
3177 int retry_q_cnt = 0;
3178 int pending_q_cnt = 0;
3179 struct list_head *list, *temp;
3180 unsigned long flags = 0;
3181
3182 clear_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags);
3183
3184 /* start pending queue */
3185 pending_q_cnt = ha->qthreads;
3186 if (flush) {
3187 spin_lock_irqsave(&ha->list_lock,flags);
3188 list_for_each_safe(list, temp, &ha->pending_queue) {
3189 sp = list_entry(list, srb_t, list);
3190
3191 if ((sp->flags & SRB_TAPE))
3192 continue;
3193
3194 /*
3195 * When time expire return request back to OS as BUSY
3196 */
3197 __del_from_pending_queue(ha, sp);
3198 sp->cmd->result = DID_BUS_BUSY << 16;
3199 sp->cmd->host_scribble = (unsigned char *)NULL;
3200 __add_to_done_queue(ha, sp);
3201 }
3202 spin_unlock_irqrestore(&ha->list_lock, flags);
3203 } else {
3204 if (!list_empty(&ha->pending_queue))
3205 qla2x00_next(ha);
3206 }
3207
3208 /*
3209 * Clear out our retry queue
3210 */
3211 if (flush) {
3212 spin_lock_irqsave(&ha->list_lock, flags);
3213 retry_q_cnt = ha->retry_q_cnt;
3214 list_for_each_safe(list, temp, &ha->retry_queue) {
3215 sp = list_entry(list, srb_t, list);
3216 /* when time expire return request back to OS as BUSY */
3217 __del_from_retry_queue(ha, sp);
3218 sp->cmd->result = DID_BUS_BUSY << 16;
3219 sp->cmd->host_scribble = (unsigned char *)NULL;
3220 __add_to_done_queue(ha, sp);
3221 }
3222 spin_unlock_irqrestore(&ha->list_lock, flags);
3223
3224 DEBUG2(printk("%s(%ld): callback %d commands.\n",
3225 __func__,
3226 ha->host_no,
3227 retry_q_cnt);)
3228 }
3229
3230 DEBUG2(printk("%s(%ld): active=%ld, retry=%d, pending=%d, "
3231 "done=%ld, scsi retry=%d commands.\n",
3232 __func__,
3233 ha->host_no,
3234 ha->actthreads,
3235 ha->retry_q_cnt,
3236 pending_q_cnt,
3237 ha->done_q_cnt,
3238 ha->scsi_retry_q_cnt);)
3239
3240 if (!list_empty(&ha->done_queue))
3241 qla2x00_done(ha);
3242}
3243
3244void 2749void
3245qla2x00_rescan_fcports(scsi_qla_host_t *ha) 2750qla2x00_rescan_fcports(scsi_qla_host_t *ha)
3246{ 2751{
@@ -3258,396 +2763,6 @@ qla2x00_rescan_fcports(scsi_qla_host_t *ha)
3258 rescan_done = 1; 2763 rescan_done = 1;
3259 } 2764 }
3260 qla2x00_probe_for_all_luns(ha); 2765 qla2x00_probe_for_all_luns(ha);
3261
3262 /* Update OS target and lun structures if necessary. */
3263 if (rescan_done) {
3264 qla2x00_config_os(ha);
3265 }
3266}
3267
3268
3269/*
3270 * qla2x00_config_os
3271 * Setup OS target and LUN structures.
3272 *
3273 * Input:
3274 * ha = adapter state pointer.
3275 *
3276 * Context:
3277 * Kernel context.
3278 */
3279static void
3280qla2x00_config_os(scsi_qla_host_t *ha)
3281{
3282 fc_port_t *fcport;
3283 fc_lun_t *fclun;
3284 os_tgt_t *tq;
3285 uint16_t tgt;
3286
3287
3288 for (tgt = 0; tgt < MAX_TARGETS; tgt++) {
3289 if ((tq = TGT_Q(ha, tgt)) == NULL)
3290 continue;
3291
3292 clear_bit(TQF_ONLINE, &tq->flags);
3293 }
3294
3295 list_for_each_entry(fcport, &ha->fcports, list) {
3296 if (atomic_read(&fcport->state) != FCS_ONLINE ||
3297 fcport->port_type == FCT_INITIATOR ||
3298 fcport->port_type == FCT_BROADCAST) {
3299 fcport->os_target_id = MAX_TARGETS;
3300 continue;
3301 }
3302
3303 if (fcport->flags & FCF_FO_MASKED) {
3304 continue;
3305 }
3306
3307 /* Bind FC port to OS target number. */
3308 if (qla2x00_fcport_bind(ha, fcport) == MAX_TARGETS) {
3309 continue;
3310 }
3311
3312 /* Bind FC LUN to OS LUN number. */
3313 list_for_each_entry(fclun, &fcport->fcluns, list) {
3314 qla2x00_fclun_bind(ha, fcport, fclun);
3315 }
3316 }
3317}
3318
3319/*
3320 * qla2x00_fcport_bind
3321 * Locates a target number for FC port.
3322 *
3323 * Input:
3324 * ha = adapter state pointer.
3325 * fcport = FC port structure pointer.
3326 *
3327 * Returns:
3328 * target number
3329 *
3330 * Context:
3331 * Kernel context.
3332 */
3333static uint16_t
3334qla2x00_fcport_bind(scsi_qla_host_t *ha, fc_port_t *fcport)
3335{
3336 int found;
3337 uint16_t tgt;
3338 os_tgt_t *tq;
3339
3340 /* Check for persistent binding. */
3341 for (tgt = 0; tgt < MAX_TARGETS; tgt++) {
3342 if ((tq = TGT_Q(ha, tgt)) == NULL)
3343 continue;
3344
3345 found = 0;
3346 switch (ha->binding_type) {
3347 case BIND_BY_PORT_ID:
3348 if (fcport->d_id.b24 == tq->d_id.b24) {
3349 memcpy(tq->node_name, fcport->node_name,
3350 WWN_SIZE);
3351 memcpy(tq->port_name, fcport->port_name,
3352 WWN_SIZE);
3353 found++;
3354 }
3355 break;
3356 case BIND_BY_PORT_NAME:
3357 if (memcmp(fcport->port_name, tq->port_name,
3358 WWN_SIZE) == 0) {
3359 /*
3360 * In case of persistent binding, update the
3361 * WWNN.
3362 */
3363 memcpy(tq->node_name, fcport->node_name,
3364 WWN_SIZE);
3365 found++;
3366 }
3367 break;
3368 }
3369 if (found)
3370 break;
3371 }
3372
3373 /* TODO: honor the ConfigRequired flag */
3374 if (tgt == MAX_TARGETS) {
3375 /* Check if targetID 0 available. */
3376 tgt = 0;
3377
3378 if (TGT_Q(ha, tgt) != NULL) {
3379 /* Locate first free target for device. */
3380 for (tgt = 0; tgt < MAX_TARGETS; tgt++) {
3381 if (TGT_Q(ha, tgt) == NULL) {
3382 break;
3383 }
3384 }
3385 }
3386 if (tgt != MAX_TARGETS) {
3387 if ((tq = qla2x00_tgt_alloc(ha, tgt)) != NULL) {
3388 memcpy(tq->node_name, fcport->node_name,
3389 WWN_SIZE);
3390 memcpy(tq->port_name, fcport->port_name,
3391 WWN_SIZE);
3392 tq->d_id.b24 = fcport->d_id.b24;
3393 }
3394 }
3395 }
3396
3397 /* Reset target numbers incase it changed. */
3398 fcport->os_target_id = tgt;
3399 if (tgt != MAX_TARGETS && tq != NULL) {
3400 DEBUG2(printk("scsi(%ld): Assigning target ID=%02d @ %p to "
3401 "loop id=0x%04x, port state=0x%x, port down retry=%d\n",
3402 ha->host_no, tgt, tq, fcport->loop_id,
3403 atomic_read(&fcport->state),
3404 atomic_read(&fcport->port_down_timer)));
3405
3406 fcport->tgt_queue = tq;
3407 fcport->flags |= FCF_PERSISTENT_BOUND;
3408 tq->fcport = fcport;
3409 set_bit(TQF_ONLINE, &tq->flags);
3410 tq->port_down_retry_count = ha->port_down_retry_count;
3411 }
3412
3413 if (tgt == MAX_TARGETS) {
3414 qla_printk(KERN_WARNING, ha,
3415 "Unable to bind fcport, loop_id=%x\n", fcport->loop_id);
3416 }
3417
3418 return (tgt);
3419}
3420
3421/*
3422 * qla2x00_fclun_bind
3423 * Binds all FC device LUNS to OS LUNS.
3424 *
3425 * Input:
3426 * ha: adapter state pointer.
3427 * fcport: FC port structure pointer.
3428 *
3429 * Returns:
3430 * target number
3431 *
3432 * Context:
3433 * Kernel context.
3434 */
3435static os_lun_t *
3436qla2x00_fclun_bind(scsi_qla_host_t *ha, fc_port_t *fcport, fc_lun_t *fclun)
3437{
3438 os_lun_t *lq;
3439 uint16_t tgt;
3440 uint16_t lun;
3441
3442 tgt = fcport->os_target_id;
3443 lun = fclun->lun;
3444
3445 /* Allocate LUNs */
3446 if (lun >= MAX_LUNS) {
3447 DEBUG2(printk("scsi(%ld): Unable to bind lun, invalid "
3448 "lun=(%x).\n", ha->host_no, lun));
3449 return (NULL);
3450 }
3451
3452 /* Always alloc LUN 0 so kernel will scan past LUN 0. */
3453 if (lun != 0 && (EXT_IS_LUN_BIT_SET(&(fcport->lun_mask), lun))) {
3454 return (NULL);
3455 }
3456
3457 if ((lq = qla2x00_lun_alloc(ha, tgt, lun)) == NULL) {
3458 qla_printk(KERN_WARNING, ha,
3459 "Unable to bind fclun, loop_id=%x lun=%x\n",
3460 fcport->loop_id, lun);
3461 return (NULL);
3462 }
3463
3464 lq->fclun = fclun;
3465
3466 return (lq);
3467}
3468
3469/*
3470 * qla2x00_tgt_alloc
3471 * Allocate and pre-initialize target queue.
3472 *
3473 * Input:
3474 * ha = adapter block pointer.
3475 * t = SCSI target number.
3476 *
3477 * Returns:
3478 * NULL = failure
3479 *
3480 * Context:
3481 * Kernel context.
3482 */
3483static os_tgt_t *
3484qla2x00_tgt_alloc(scsi_qla_host_t *ha, uint16_t tgt)
3485{
3486 os_tgt_t *tq;
3487
3488 /*
3489 * If SCSI addressing OK, allocate TGT queue and lock.
3490 */
3491 if (tgt >= MAX_TARGETS) {
3492 DEBUG2(printk("scsi(%ld): Unable to allocate target, invalid "
3493 "target number %d.\n", ha->host_no, tgt));
3494 return (NULL);
3495 }
3496
3497 tq = TGT_Q(ha, tgt);
3498 if (tq == NULL) {
3499 tq = kmalloc(sizeof(os_tgt_t), GFP_ATOMIC);
3500 if (tq != NULL) {
3501 DEBUG2(printk("scsi(%ld): Alloc Target %d @ %p\n",
3502 ha->host_no, tgt, tq));
3503
3504 memset(tq, 0, sizeof(os_tgt_t));
3505 tq->ha = ha;
3506
3507 TGT_Q(ha, tgt) = tq;
3508 }
3509 }
3510 if (tq != NULL) {
3511 tq->port_down_retry_count = ha->port_down_retry_count;
3512 } else {
3513 qla_printk(KERN_WARNING, ha,
3514 "Unable to allocate target.\n");
3515 ha->mem_err++;
3516 }
3517
3518 return (tq);
3519}
3520
3521/*
3522 * qla2x00_tgt_free
3523 * Frees target and LUN queues.
3524 *
3525 * Input:
3526 * ha = adapter block pointer.
3527 * t = SCSI target number.
3528 *
3529 * Context:
3530 * Kernel context.
3531 */
3532void
3533qla2x00_tgt_free(scsi_qla_host_t *ha, uint16_t tgt)
3534{
3535 os_tgt_t *tq;
3536 uint16_t lun;
3537
3538 /*
3539 * If SCSI addressing OK, allocate TGT queue and lock.
3540 */
3541 if (tgt >= MAX_TARGETS) {
3542 DEBUG2(printk("scsi(%ld): Unable to de-allocate target, "
3543 "invalid target number %d.\n", ha->host_no, tgt));
3544
3545 return;
3546 }
3547
3548 tq = TGT_Q(ha, tgt);
3549 if (tq != NULL) {
3550 TGT_Q(ha, tgt) = NULL;
3551
3552 /* Free LUN structures. */
3553 for (lun = 0; lun < MAX_LUNS; lun++)
3554 qla2x00_lun_free(ha, tgt, lun);
3555
3556 kfree(tq);
3557 }
3558
3559 return;
3560}
3561
3562/*
3563 * qla2x00_lun_alloc
3564 * Allocate and initialize LUN queue.
3565 *
3566 * Input:
3567 * ha = adapter block pointer.
3568 * t = SCSI target number.
3569 * l = LUN number.
3570 *
3571 * Returns:
3572 * NULL = failure
3573 *
3574 * Context:
3575 * Kernel context.
3576 */
3577static os_lun_t *
3578qla2x00_lun_alloc(scsi_qla_host_t *ha, uint16_t tgt, uint16_t lun)
3579{
3580 os_lun_t *lq;
3581
3582 /*
3583 * If SCSI addressing OK, allocate LUN queue.
3584 */
3585 if (tgt >= MAX_TARGETS || lun >= MAX_LUNS || TGT_Q(ha, tgt) == NULL) {
3586 DEBUG2(printk("scsi(%ld): Unable to allocate lun, invalid "
3587 "parameter.\n", ha->host_no));
3588
3589 return (NULL);
3590 }
3591
3592 lq = LUN_Q(ha, tgt, lun);
3593 if (lq == NULL) {
3594 lq = kmalloc(sizeof(os_lun_t), GFP_ATOMIC);
3595 if (lq != NULL) {
3596 DEBUG2(printk("scsi(%ld): Alloc Lun %d @ tgt %d.\n",
3597 ha->host_no, lun, tgt));
3598
3599 memset(lq, 0, sizeof(os_lun_t));
3600 LUN_Q(ha, tgt, lun) = lq;
3601
3602 /*
3603 * The following lun queue initialization code
3604 * must be duplicated in alloc_ioctl_mem function
3605 * for ioctl_lq.
3606 */
3607 lq->q_state = LUN_STATE_READY;
3608 spin_lock_init(&lq->q_lock);
3609 }
3610 }
3611
3612 if (lq == NULL) {
3613 qla_printk(KERN_WARNING, ha, "Unable to allocate lun.\n");
3614 }
3615
3616 return (lq);
3617}
3618
3619/*
3620 * qla2x00_lun_free
3621 * Frees LUN queue.
3622 *
3623 * Input:
3624 * ha = adapter block pointer.
3625 * t = SCSI target number.
3626 *
3627 * Context:
3628 * Kernel context.
3629 */
3630static void
3631qla2x00_lun_free(scsi_qla_host_t *ha, uint16_t tgt, uint16_t lun)
3632{
3633 os_lun_t *lq;
3634
3635 /*
3636 * If SCSI addressing OK, allocate TGT queue and lock.
3637 */
3638 if (tgt >= MAX_TARGETS || lun >= MAX_LUNS) {
3639 DEBUG2(printk("scsi(%ld): Unable to deallocate lun, invalid "
3640 "parameter.\n", ha->host_no));
3641
3642 return;
3643 }
3644
3645 if (TGT_Q(ha, tgt) != NULL && (lq = LUN_Q(ha, tgt, lun)) != NULL) {
3646 LUN_Q(ha, tgt, lun) = NULL;
3647 kfree(lq);
3648 }
3649
3650 return;
3651} 2766}
3652 2767
3653/* 2768/*
@@ -3697,26 +2812,10 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3697 ha->outstanding_cmds[cnt] = NULL; 2812 ha->outstanding_cmds[cnt] = NULL;
3698 if (ha->actthreads) 2813 if (ha->actthreads)
3699 ha->actthreads--; 2814 ha->actthreads--;
3700 sp->lun_queue->out_cnt--;
3701
3702 /*
3703 * Set the cmd host_byte status depending on
3704 * whether the scsi_error_handler is
3705 * active or not.
3706 */
3707 if (sp->flags & SRB_TAPE) {
3708 sp->cmd->result = DID_NO_CONNECT << 16;
3709 } else {
3710 if (ha->host->eh_active != EH_ACTIVE)
3711 sp->cmd->result =
3712 DID_BUS_BUSY << 16;
3713 else
3714 sp->cmd->result =
3715 DID_RESET << 16;
3716 }
3717 sp->flags = 0; 2815 sp->flags = 0;
2816 sp->cmd->result = DID_RESET << 16;
3718 sp->cmd->host_scribble = (unsigned char *)NULL; 2817 sp->cmd->host_scribble = (unsigned char *)NULL;
3719 add_to_done_queue(ha, sp); 2818 qla2x00_sp_compl(ha, sp);
3720 } 2819 }
3721 } 2820 }
3722 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2821 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3739,11 +2838,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3739 /* Enable ISP interrupts. */ 2838 /* Enable ISP interrupts. */
3740 qla2x00_enable_intrs(ha); 2839 qla2x00_enable_intrs(ha);
3741 2840
3742 /* v2.19.5b6 Return all commands */
3743 qla2x00_abort_queues(ha, 1);
3744
3745 /* Restart queues that may have been stopped. */
3746 qla2x00_restart_queues(ha, 1);
3747 ha->isp_abort_cnt = 0; 2841 ha->isp_abort_cnt = 0;
3748 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 2842 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3749 } else { /* failed the ISP abort */ 2843 } else { /* failed the ISP abort */
@@ -3758,7 +2852,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3758 * completely. 2852 * completely.
3759 */ 2853 */
3760 qla2x00_reset_adapter(ha); 2854 qla2x00_reset_adapter(ha);
3761 qla2x00_abort_queues(ha, 0);
3762 ha->flags.online = 0; 2855 ha->flags.online = 0;
3763 clear_bit(ISP_ABORT_RETRY, 2856 clear_bit(ISP_ABORT_RETRY,
3764 &ha->dpc_flags); 2857 &ha->dpc_flags);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 07c11330f9a3..6a05d1b8d48a 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -187,23 +187,6 @@ qla2x00_is_wwn_zero(uint8_t *wwn)
187 return (0); 187 return (0);
188} 188}
189 189
190static __inline__ uint8_t
191qla2x00_suspend_lun(scsi_qla_host_t *, os_lun_t *, int, int);
192static __inline__ uint8_t
193qla2x00_delay_lun(scsi_qla_host_t *, os_lun_t *, int);
194
195static __inline__ uint8_t
196qla2x00_suspend_lun(scsi_qla_host_t *ha, os_lun_t *lq, int time, int count)
197{
198 return (__qla2x00_suspend_lun(ha, lq, time, count, 0));
199}
200
201static __inline__ uint8_t
202qla2x00_delay_lun(scsi_qla_host_t *ha, os_lun_t *lq, int time)
203{
204 return (__qla2x00_suspend_lun(ha, lq, time, 1, 1));
205}
206
207static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *); 190static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *);
208/* 191/*
209 * This routine will wait for fabric devices for 192 * This routine will wait for fabric devices for
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index ec066074c722..af964bb3d870 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -216,18 +216,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
216 cur_seg++; 216 cur_seg++;
217 } 217 }
218 } else { 218 } else {
219 dma_addr_t req_dma; 219 *cur_dsd++ = cpu_to_le32(sp->dma_handle);
220 struct page *page;
221 unsigned long offset;
222
223 page = virt_to_page(cmd->request_buffer);
224 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
225 req_dma = pci_map_page(ha->pdev, page, offset,
226 cmd->request_bufflen, cmd->sc_data_direction);
227
228 sp->dma_handle = req_dma;
229
230 *cur_dsd++ = cpu_to_le32(req_dma);
231 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 220 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
232 } 221 }
233} 222}
@@ -299,19 +288,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
299 cur_seg++; 288 cur_seg++;
300 } 289 }
301 } else { 290 } else {
302 dma_addr_t req_dma; 291 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
303 struct page *page; 292 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
304 unsigned long offset;
305
306 page = virt_to_page(cmd->request_buffer);
307 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
308 req_dma = pci_map_page(ha->pdev, page, offset,
309 cmd->request_bufflen, cmd->sc_data_direction);
310
311 sp->dma_handle = req_dma;
312
313 *cur_dsd++ = cpu_to_le32(LSD(req_dma));
314 *cur_dsd++ = cpu_to_le32(MSD(req_dma));
315 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 293 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
316 } 294 }
317} 295}
@@ -328,13 +306,11 @@ qla2x00_start_scsi(srb_t *sp)
328 int ret; 306 int ret;
329 unsigned long flags; 307 unsigned long flags;
330 scsi_qla_host_t *ha; 308 scsi_qla_host_t *ha;
331 fc_lun_t *fclun;
332 struct scsi_cmnd *cmd; 309 struct scsi_cmnd *cmd;
333 uint32_t *clr_ptr; 310 uint32_t *clr_ptr;
334 uint32_t index; 311 uint32_t index;
335 uint32_t handle; 312 uint32_t handle;
336 cmd_entry_t *cmd_pkt; 313 cmd_entry_t *cmd_pkt;
337 uint32_t timeout;
338 struct scatterlist *sg; 314 struct scatterlist *sg;
339 uint16_t cnt; 315 uint16_t cnt;
340 uint16_t req_cnt; 316 uint16_t req_cnt;
@@ -344,10 +320,11 @@ qla2x00_start_scsi(srb_t *sp)
344 320
345 /* Setup device pointers. */ 321 /* Setup device pointers. */
346 ret = 0; 322 ret = 0;
347 fclun = sp->lun_queue->fclun; 323 ha = sp->ha;
348 ha = fclun->fcport->ha;
349 reg = ha->iobase; 324 reg = ha->iobase;
350 cmd = sp->cmd; 325 cmd = sp->cmd;
326 /* So we know we haven't pci_map'ed anything yet */
327 tot_dsds = 0;
351 328
352 /* Send marker if required */ 329 /* Send marker if required */
353 if (ha->marker_needed != 0) { 330 if (ha->marker_needed != 0) {
@@ -372,8 +349,27 @@ qla2x00_start_scsi(srb_t *sp)
372 if (index == MAX_OUTSTANDING_COMMANDS) 349 if (index == MAX_OUTSTANDING_COMMANDS)
373 goto queuing_error; 350 goto queuing_error;
374 351
352 /* Map the sg table so we have an accurate count of sg entries needed */
353 if (cmd->use_sg) {
354 sg = (struct scatterlist *) cmd->request_buffer;
355 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
356 cmd->sc_data_direction);
357 if (tot_dsds == 0)
358 goto queuing_error;
359 } else if (cmd->request_bufflen) {
360 dma_addr_t req_dma;
361
362 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
363 cmd->request_bufflen, cmd->sc_data_direction);
364 if (dma_mapping_error(req_dma))
365 goto queuing_error;
366
367 sp->dma_handle = req_dma;
368 tot_dsds = 1;
369 }
370
375 /* Calculate the number of request entries needed. */ 371 /* Calculate the number of request entries needed. */
376 req_cnt = (ha->calc_request_entries)(cmd->request->nr_hw_segments); 372 req_cnt = (ha->calc_request_entries)(tot_dsds);
377 if (ha->req_q_cnt < (req_cnt + 2)) { 373 if (ha->req_q_cnt < (req_cnt + 2)) {
378 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
379 if (ha->req_ring_index < cnt) 375 if (ha->req_ring_index < cnt)
@@ -385,19 +381,6 @@ qla2x00_start_scsi(srb_t *sp)
385 if (ha->req_q_cnt < (req_cnt + 2)) 381 if (ha->req_q_cnt < (req_cnt + 2))
386 goto queuing_error; 382 goto queuing_error;
387 383
388 /* Finally, we have enough space, now perform mappings. */
389 tot_dsds = 0;
390 if (cmd->use_sg) {
391 sg = (struct scatterlist *) cmd->request_buffer;
392 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
393 cmd->sc_data_direction);
394 if (tot_dsds == 0)
395 goto queuing_error;
396 } else if (cmd->request_bufflen) {
397 tot_dsds++;
398 }
399 req_cnt = (ha->calc_request_entries)(tot_dsds);
400
401 /* Build command packet */ 384 /* Build command packet */
402 ha->current_outstanding_cmd = handle; 385 ha->current_outstanding_cmd = handle;
403 ha->outstanding_cmds[handle] = sp; 386 ha->outstanding_cmds[handle] = sp;
@@ -412,11 +395,9 @@ qla2x00_start_scsi(srb_t *sp)
412 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 395 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
413 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 396 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
414 397
415 /* Set target ID */ 398 /* Set target ID and LUN number*/
416 SET_TARGET_ID(ha, cmd_pkt->target, fclun->fcport->loop_id); 399 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
417 400 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
418 /* Set LUN number*/
419 cmd_pkt->lun = cpu_to_le16(fclun->lun);
420 401
421 /* Update tagged queuing modifier */ 402 /* Update tagged queuing modifier */
422 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); 403 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
@@ -433,18 +414,6 @@ qla2x00_start_scsi(srb_t *sp)
433 } 414 }
434 } 415 }
435 416
436 /*
437 * Allocate at least 5 (+ QLA_CMD_TIMER_DELTA) seconds for RISC timeout.
438 */
439 timeout = (uint32_t)(cmd->timeout_per_command / HZ);
440 if (timeout > 65535)
441 cmd_pkt->timeout = __constant_cpu_to_le16(0);
442 else if (timeout > 25)
443 cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout -
444 (5 + QLA_CMD_TIMER_DELTA));
445 else
446 cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout);
447
448 /* Load SCSI command packet. */ 417 /* Load SCSI command packet. */
449 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 418 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
450 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); 419 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
@@ -466,7 +435,6 @@ qla2x00_start_scsi(srb_t *sp)
466 435
467 ha->actthreads++; 436 ha->actthreads++;
468 ha->total_ios++; 437 ha->total_ios++;
469 sp->lun_queue->out_cnt++;
470 sp->flags |= SRB_DMA_VALID; 438 sp->flags |= SRB_DMA_VALID;
471 sp->state = SRB_ACTIVE_STATE; 439 sp->state = SRB_ACTIVE_STATE;
472 sp->u_start = jiffies; 440 sp->u_start = jiffies;
@@ -479,6 +447,14 @@ qla2x00_start_scsi(srb_t *sp)
479 return (QLA_SUCCESS); 447 return (QLA_SUCCESS);
480 448
481queuing_error: 449queuing_error:
450 if (cmd->use_sg && tot_dsds) {
451 sg = (struct scatterlist *) cmd->request_buffer;
452 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
453 cmd->sc_data_direction);
454 } else if (tot_dsds) {
455 pci_unmap_single(ha->pdev, sp->dma_handle,
456 cmd->request_bufflen, cmd->sc_data_direction);
457 }
482 spin_unlock_irqrestore(&ha->hardware_lock, flags); 458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
483 459
484 return (QLA_FUNCTION_FAILED); 460 return (QLA_FUNCTION_FAILED);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 603d4c683c6c..6792cfae56e2 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -27,8 +27,6 @@ static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
27static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); 27static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
28static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *); 28static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
29 29
30static int qla2x00_check_sense(struct scsi_cmnd *cp, os_lun_t *);
31
32/** 30/**
33 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 31 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
34 * @irq: 32 * @irq:
@@ -93,7 +91,6 @@ qla2100_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
93 } 91 }
94 spin_unlock_irqrestore(&ha->hardware_lock, flags); 92 spin_unlock_irqrestore(&ha->hardware_lock, flags);
95 93
96 qla2x00_next(ha);
97 ha->last_irq_cpu = _smp_processor_id(); 94 ha->last_irq_cpu = _smp_processor_id();
98 ha->total_isr_cnt++; 95 ha->total_isr_cnt++;
99 96
@@ -107,9 +104,6 @@ qla2100_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
107 spin_unlock_irqrestore(&ha->mbx_reg_lock, flags); 104 spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
108 } 105 }
109 106
110 if (!list_empty(&ha->done_queue))
111 qla2x00_done(ha);
112
113 return (IRQ_HANDLED); 107 return (IRQ_HANDLED);
114} 108}
115 109
@@ -206,7 +200,6 @@ qla2300_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
206 } 200 }
207 spin_unlock_irqrestore(&ha->hardware_lock, flags); 201 spin_unlock_irqrestore(&ha->hardware_lock, flags);
208 202
209 qla2x00_next(ha);
210 ha->last_irq_cpu = _smp_processor_id(); 203 ha->last_irq_cpu = _smp_processor_id();
211 ha->total_isr_cnt++; 204 ha->total_isr_cnt++;
212 205
@@ -220,9 +213,6 @@ qla2300_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
220 spin_unlock_irqrestore(&ha->mbx_reg_lock, flags); 213 spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
221 } 214 }
222 215
223 if (!list_empty(&ha->done_queue))
224 qla2x00_done(ha);
225
226 return (IRQ_HANDLED); 216 return (IRQ_HANDLED);
227} 217}
228 218
@@ -707,14 +697,13 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
707 697
708 if (ha->actthreads) 698 if (ha->actthreads)
709 ha->actthreads--; 699 ha->actthreads--;
710 sp->lun_queue->out_cnt--;
711 CMD_COMPL_STATUS(sp->cmd) = 0L; 700 CMD_COMPL_STATUS(sp->cmd) = 0L;
712 CMD_SCSI_STATUS(sp->cmd) = 0L; 701 CMD_SCSI_STATUS(sp->cmd) = 0L;
713 702
714 /* Save ISP completion status */ 703 /* Save ISP completion status */
715 sp->cmd->result = DID_OK << 16; 704 sp->cmd->result = DID_OK << 16;
716 sp->fo_retry_cnt = 0; 705 sp->fo_retry_cnt = 0;
717 add_to_done_queue(ha, sp); 706 qla2x00_sp_compl(ha, sp);
718 } else { 707 } else {
719 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 708 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
720 ha->host_no)); 709 ha->host_no));
@@ -828,11 +817,8 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
828static void 817static void
829qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) 818qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
830{ 819{
831 int ret;
832 unsigned b, t, l; 820 unsigned b, t, l;
833 srb_t *sp; 821 srb_t *sp;
834 os_lun_t *lq;
835 os_tgt_t *tq;
836 fc_port_t *fcport; 822 fc_port_t *fcport;
837 struct scsi_cmnd *cp; 823 struct scsi_cmnd *cp;
838 uint16_t comp_status; 824 uint16_t comp_status;
@@ -882,21 +868,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
882 if (ha->actthreads) 868 if (ha->actthreads)
883 ha->actthreads--; 869 ha->actthreads--;
884 870
885 if (sp->lun_queue == NULL) {
886 DEBUG2(printk("scsi(%ld): Status Entry invalid lun pointer.\n",
887 ha->host_no));
888 qla_printk(KERN_WARNING, ha,
889 "Status Entry invalid lun pointer.\n");
890
891 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
892 if (ha->dpc_wait && !ha->dpc_active)
893 up(ha->dpc_wait);
894
895 return;
896 }
897
898 sp->lun_queue->out_cnt--;
899
900 comp_status = le16_to_cpu(pkt->comp_status); 871 comp_status = le16_to_cpu(pkt->comp_status);
901 /* Mask of reserved bits 12-15, before we examine the scsi status */ 872 /* Mask of reserved bits 12-15, before we examine the scsi status */
902 scsi_status = le16_to_cpu(pkt->scsi_status) & SS_MASK; 873 scsi_status = le16_to_cpu(pkt->scsi_status) & SS_MASK;
@@ -911,26 +882,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
911 t = cp->device->id; 882 t = cp->device->id;
912 l = cp->device->lun, 883 l = cp->device->lun,
913 884
914 tq = sp->tgt_queue; 885 fcport = sp->fcport;
915 lq = sp->lun_queue;
916
917 /*
918 * If loop is in transient state Report DID_BUS_BUSY
919 */
920 if ((comp_status != CS_COMPLETE || scsi_status != 0)) {
921 if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
922 (atomic_read(&ha->loop_down_timer) ||
923 atomic_read(&ha->loop_state) != LOOP_READY)) {
924
925 DEBUG2(printk("scsi(%ld:%d:%d:%d): Loop Not Ready - "
926 "pid=%lx.\n",
927 ha->host_no, b, t, l, cp->serial_number));
928
929 qla2x00_extend_timeout(cp, EXTEND_CMD_TIMEOUT);
930 add_to_retry_queue(ha, sp);
931 return;
932 }
933 }
934 886
935 /* Check for any FCP transport errors. */ 887 /* Check for any FCP transport errors. */
936 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 888 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
@@ -945,7 +897,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
945 pkt->rsp_info[6], pkt->rsp_info[7])); 897 pkt->rsp_info[6], pkt->rsp_info[7]));
946 898
947 cp->result = DID_BUS_BUSY << 16; 899 cp->result = DID_BUS_BUSY << 16;
948 add_to_done_queue(ha, sp); 900 qla2x00_sp_compl(ha, sp);
949 return; 901 return;
950 } 902 }
951 } 903 }
@@ -964,11 +916,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
964 cp->resid = resid; 916 cp->resid = resid;
965 CMD_RESID_LEN(cp) = resid; 917 CMD_RESID_LEN(cp) = resid;
966 } 918 }
967 if (lscsi_status == SS_BUSY_CONDITION) {
968 cp->result = DID_BUS_BUSY << 16 | lscsi_status;
969 break;
970 }
971
972 cp->result = DID_OK << 16 | lscsi_status; 919 cp->result = DID_OK << 16 | lscsi_status;
973 920
974 if (lscsi_status != SS_CHECK_CONDITION) 921 if (lscsi_status != SS_CHECK_CONDITION)
@@ -1002,14 +949,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1002 if (sp->request_sense_length != 0) 949 if (sp->request_sense_length != 0)
1003 ha->status_srb = sp; 950 ha->status_srb = sp;
1004 951
1005 if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
1006 qla2x00_check_sense(cp, lq) == QLA_SUCCESS) {
1007 /* Throw away status_cont if any */
1008 ha->status_srb = NULL;
1009 add_to_scsi_retry_queue(ha, sp);
1010 return;
1011 }
1012
1013 DEBUG5(printk("%s(): Check condition Sense data, " 952 DEBUG5(printk("%s(): Check condition Sense data, "
1014 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", 953 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
1015 __func__, ha->host_no, b, t, l, cp, 954 __func__, ha->host_no, b, t, l, cp,
@@ -1035,12 +974,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1035 * Status. 974 * Status.
1036 */ 975 */
1037 if (lscsi_status != 0) { 976 if (lscsi_status != 0) {
1038 if (lscsi_status == SS_BUSY_CONDITION) {
1039 cp->result = DID_BUS_BUSY << 16 |
1040 lscsi_status;
1041 break;
1042 }
1043
1044 cp->result = DID_OK << 16 | lscsi_status; 977 cp->result = DID_OK << 16 | lscsi_status;
1045 978
1046 if (lscsi_status != SS_CHECK_CONDITION) 979 if (lscsi_status != SS_CHECK_CONDITION)
@@ -1072,12 +1005,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1072 if (sp->request_sense_length != 0) 1005 if (sp->request_sense_length != 0)
1073 ha->status_srb = sp; 1006 ha->status_srb = sp;
1074 1007
1075 if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
1076 (qla2x00_check_sense(cp, lq) == QLA_SUCCESS)) {
1077 ha->status_srb = NULL;
1078 add_to_scsi_retry_queue(ha, sp);
1079 return;
1080 }
1081 DEBUG5(printk("%s(): Check condition Sense data, " 1008 DEBUG5(printk("%s(): Check condition Sense data, "
1082 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", 1009 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
1083 __func__, ha->host_no, b, t, l, cp, 1010 __func__, ha->host_no, b, t, l, cp,
@@ -1149,30 +1076,15 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1149 * Target with DID_NO_CONNECT ELSE Queue the IOs in the 1076 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1150 * retry_queue. 1077 * retry_queue.
1151 */ 1078 */
1152 fcport = sp->fclun->fcport;
1153 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " 1079 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1154 "pid=%ld, compl status=0x%x, port state=0x%x\n", 1080 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1155 ha->host_no, t, l, cp->serial_number, comp_status, 1081 ha->host_no, t, l, cp->serial_number, comp_status,
1156 atomic_read(&fcport->state))); 1082 atomic_read(&fcport->state)));
1157 1083
1158 if ((sp->flags & (SRB_IOCTL | SRB_TAPE)) || 1084 cp->result = DID_BUS_BUSY << 16;
1159 atomic_read(&fcport->state) == FCS_DEVICE_DEAD) {
1160 cp->result = DID_NO_CONNECT << 16;
1161 if (atomic_read(&ha->loop_state) == LOOP_DOWN)
1162 sp->err_id = SRB_ERR_LOOP;
1163 else
1164 sp->err_id = SRB_ERR_PORT;
1165 add_to_done_queue(ha, sp);
1166 } else {
1167 qla2x00_extend_timeout(cp, EXTEND_CMD_TIMEOUT);
1168 add_to_retry_queue(ha, sp);
1169 }
1170
1171 if (atomic_read(&fcport->state) == FCS_ONLINE) { 1085 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1172 qla2x00_mark_device_lost(ha, fcport, 1); 1086 qla2x00_mark_device_lost(ha, fcport, 1);
1173 } 1087 }
1174
1175 return;
1176 break; 1088 break;
1177 1089
1178 case CS_RESET: 1090 case CS_RESET:
@@ -1180,13 +1092,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1180 "scsi(%ld): RESET status detected 0x%x-0x%x.\n", 1092 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1181 ha->host_no, comp_status, scsi_status)); 1093 ha->host_no, comp_status, scsi_status));
1182 1094
1183 if (sp->flags & (SRB_IOCTL | SRB_TAPE)) { 1095 cp->result = DID_RESET << 16;
1184 cp->result = DID_RESET << 16;
1185 } else {
1186 qla2x00_extend_timeout(cp, EXTEND_CMD_TIMEOUT);
1187 add_to_retry_queue(ha, sp);
1188 return;
1189 }
1190 break; 1096 break;
1191 1097
1192 case CS_ABORTED: 1098 case CS_ABORTED:
@@ -1210,8 +1116,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1210 1116
1211 cp->result = DID_BUS_BUSY << 16; 1117 cp->result = DID_BUS_BUSY << 16;
1212 1118
1213 fcport = lq->fclun->fcport;
1214
1215 /* Check to see if logout occurred */ 1119 /* Check to see if logout occurred */
1216 if ((le16_to_cpu(pkt->status_flags) & SF_LOGOUT_SENT)) { 1120 if ((le16_to_cpu(pkt->status_flags) & SF_LOGOUT_SENT)) {
1217 qla2x00_mark_device_lost(ha, fcport, 1); 1121 qla2x00_mark_device_lost(ha, fcport, 1);
@@ -1227,16 +1131,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1227 1131
1228 cp->result = DID_OK << 16 | lscsi_status; 1132 cp->result = DID_OK << 16 | lscsi_status;
1229 1133
1230 /* TODO: ??? */
1231 /* Adjust queue depth */
1232 ret = scsi_track_queue_full(cp->device,
1233 sp->lun_queue->out_cnt - 1);
1234 if (ret) {
1235 qla_printk(KERN_INFO, ha,
1236 "scsi(%ld:%d:%d:%d): Queue depth adjusted to %d.\n",
1237 ha->host_no, cp->device->channel, cp->device->id,
1238 cp->device->lun, ret);
1239 }
1240 break; 1134 break;
1241 1135
1242 default: 1136 default:
@@ -1253,7 +1147,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1253 1147
1254 /* Place command on done queue. */ 1148 /* Place command on done queue. */
1255 if (ha->status_srb == NULL) 1149 if (ha->status_srb == NULL)
1256 add_to_done_queue(ha, sp); 1150 qla2x00_sp_compl(ha, sp);
1257} 1151}
1258 1152
1259/** 1153/**
@@ -1298,8 +1192,8 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1298 1192
1299 /* Place command on done queue. */ 1193 /* Place command on done queue. */
1300 if (sp->request_sense_length == 0) { 1194 if (sp->request_sense_length == 0) {
1301 add_to_done_queue(ha, sp);
1302 ha->status_srb = NULL; 1195 ha->status_srb = NULL;
1196 qla2x00_sp_compl(ha, sp);
1303 } 1197 }
1304 } 1198 }
1305} 1199}
@@ -1341,8 +1235,6 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1341 ha->outstanding_cmds[pkt->handle] = NULL; 1235 ha->outstanding_cmds[pkt->handle] = NULL;
1342 if (ha->actthreads) 1236 if (ha->actthreads)
1343 ha->actthreads--; 1237 ha->actthreads--;
1344 sp->lun_queue->out_cnt--;
1345
1346 /* Bad payload or header */ 1238 /* Bad payload or header */
1347 if (pkt->entry_status & 1239 if (pkt->entry_status &
1348 (RF_INV_E_ORDER | RF_INV_E_COUNT | 1240 (RF_INV_E_ORDER | RF_INV_E_COUNT |
@@ -1353,8 +1245,7 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1353 } else { 1245 } else {
1354 sp->cmd->result = DID_ERROR << 16; 1246 sp->cmd->result = DID_ERROR << 16;
1355 } 1247 }
1356 /* Place command on done queue. */ 1248 qla2x00_sp_compl(ha, sp);
1357 add_to_done_queue(ha, sp);
1358 1249
1359 } else if (pkt->entry_type == COMMAND_A64_TYPE || 1250 } else if (pkt->entry_type == COMMAND_A64_TYPE ||
1360 pkt->entry_type == COMMAND_TYPE) { 1251 pkt->entry_type == COMMAND_TYPE) {
@@ -1403,62 +1294,5 @@ qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
1403 /* Free outstanding command slot. */ 1294 /* Free outstanding command slot. */
1404 ha->outstanding_cmds[pkt->handle1] = NULL; 1295 ha->outstanding_cmds[pkt->handle1] = NULL;
1405 1296
1406 add_to_done_queue(ha, sp); 1297 qla2x00_sp_compl(ha, sp);
1407}
1408
1409/**
1410 * qla2x00_check_sense() - Perform any sense data interrogation.
1411 * @cp: SCSI Command
1412 * @lq: Lun queue
1413 *
1414 * Returns QLA_SUCCESS if the lun queue is suspended, else
1415 * QLA_FUNCTION_FAILED (lun queue not suspended).
1416 */
1417static int
1418qla2x00_check_sense(struct scsi_cmnd *cp, os_lun_t *lq)
1419{
1420 scsi_qla_host_t *ha;
1421 srb_t *sp;
1422 fc_port_t *fcport;
1423
1424 ha = (scsi_qla_host_t *) cp->device->host->hostdata;
1425 if ((cp->sense_buffer[0] & 0x70) != 0x70) {
1426 return (QLA_FUNCTION_FAILED);
1427 }
1428
1429 sp = (srb_t * )CMD_SP(cp);
1430 sp->flags |= SRB_GOT_SENSE;
1431
1432 switch (cp->sense_buffer[2] & 0xf) {
1433 case RECOVERED_ERROR:
1434 cp->result = DID_OK << 16;
1435 cp->sense_buffer[0] = 0;
1436 break;
1437
1438 case NOT_READY:
1439 fcport = lq->fclun->fcport;
1440
1441 /*
1442 * Suspend the lun only for hard disk device type.
1443 */
1444 if ((fcport->flags & FCF_TAPE_PRESENT) == 0 &&
1445 lq->q_state != LUN_STATE_TIMEOUT) {
1446 /*
1447 * If target is in process of being ready then suspend
1448 * lun for 6 secs and retry all the commands.
1449 */
1450 if (cp->sense_buffer[12] == 0x4 &&
1451 cp->sense_buffer[13] == 0x1) {
1452
1453 /* Suspend the lun for 6 secs */
1454 qla2x00_suspend_lun(ha, lq, 6,
1455 ql2xsuspendcount);
1456
1457 return (QLA_SUCCESS);
1458 }
1459 }
1460 break;
1461 }
1462
1463 return (QLA_FUNCTION_FAILED);
1464} 1298}
diff --git a/drivers/scsi/qla2xxx/qla_listops.h b/drivers/scsi/qla2xxx/qla_listops.h
deleted file mode 100644
index 5da034f61afa..000000000000
--- a/drivers/scsi/qla2xxx/qla_listops.h
+++ /dev/null
@@ -1,351 +0,0 @@
1/******************************************************************************
2 * QLOGIC LINUX SOFTWARE
3 *
4 * QLogic ISP2x00 device driver for Linux 2.6.x
5 * Copyright (C) 2003-2004 QLogic Corporation
6 * (www.qlogic.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 ******************************************************************************/
19
20/* Management functions for various lists */
21
22/* __add_to_done_queue()
23 *
24 * Place SRB command on done queue.
25 *
26 * Input:
27 * ha = host pointer
28 * sp = srb pointer.
29 * Locking:
30 * this function assumes the ha->list_lock is already taken
31 */
32static inline void
33__add_to_done_queue(struct scsi_qla_host * ha, srb_t * sp)
34{
35 /*
36 if (sp->state != SRB_NO_QUEUE_STATE &&
37 sp->state != SRB_ACTIVE_STATE)
38 BUG();
39 */
40
41 /* Place block on done queue */
42 sp->cmd->host_scribble = (unsigned char *) NULL;
43 sp->state = SRB_DONE_STATE;
44 list_add_tail(&sp->list,&ha->done_queue);
45 ha->done_q_cnt++;
46 sp->ha = ha;
47}
48
49static inline void
50__add_to_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
51{
52 /*
53 if( sp->state != SRB_NO_QUEUE_STATE &&
54 sp->state != SRB_ACTIVE_STATE)
55 BUG();
56 */
57
58 /* Place block on retry queue */
59 list_add_tail(&sp->list,&ha->retry_queue);
60 ha->retry_q_cnt++;
61 sp->flags |= SRB_WATCHDOG;
62 sp->state = SRB_RETRY_STATE;
63 sp->ha = ha;
64}
65
66static inline void
67__add_to_scsi_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
68{
69 /*
70 if( sp->state != SRB_NO_QUEUE_STATE &&
71 sp->state != SRB_ACTIVE_STATE)
72 BUG();
73 */
74
75 /* Place block on retry queue */
76 list_add_tail(&sp->list,&ha->scsi_retry_queue);
77 ha->scsi_retry_q_cnt++;
78 sp->state = SRB_SCSI_RETRY_STATE;
79 sp->ha = ha;
80}
81
82static inline void
83add_to_done_queue(struct scsi_qla_host * ha, srb_t * sp)
84{
85 unsigned long flags;
86
87 spin_lock_irqsave(&ha->list_lock, flags);
88 __add_to_done_queue(ha,sp);
89 spin_unlock_irqrestore(&ha->list_lock, flags);
90}
91
92static inline void
93add_to_free_queue(struct scsi_qla_host * ha, srb_t * sp)
94{
95 mempool_free(sp, ha->srb_mempool);
96}
97
98static inline void
99add_to_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
100{
101 unsigned long flags;
102
103 spin_lock_irqsave(&ha->list_lock, flags);
104 __add_to_retry_queue(ha,sp);
105 spin_unlock_irqrestore(&ha->list_lock, flags);
106}
107
108static inline void
109add_to_scsi_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
110{
111 unsigned long flags;
112
113 spin_lock_irqsave(&ha->list_lock, flags);
114 __add_to_scsi_retry_queue(ha,sp);
115 spin_unlock_irqrestore(&ha->list_lock, flags);
116}
117
118/*
119 * __del_from_retry_queue
120 * Function used to remove a command block from the
121 * watchdog timer queue.
122 *
123 * Note: Must insure that command is on watchdog
124 * list before calling del_from_retry_queue
125 * if (sp->flags & SRB_WATCHDOG)
126 *
127 * Input:
128 * ha = adapter block pointer.
129 * sp = srb pointer.
130 * Locking:
131 * this function assumes the list_lock is already taken
132 */
133static inline void
134__del_from_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
135{
136 list_del_init(&sp->list);
137
138 sp->flags &= ~(SRB_WATCHDOG | SRB_BUSY);
139 sp->state = SRB_NO_QUEUE_STATE;
140 ha->retry_q_cnt--;
141}
142
143/*
144 * __del_from_scsi_retry_queue
145 * Function used to remove a command block from the
146 * scsi retry queue.
147 *
148 * Input:
149 * ha = adapter block pointer.
150 * sp = srb pointer.
151 * Locking:
152 * this function assumes the list_lock is already taken
153 */
154static inline void
155__del_from_scsi_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
156{
157 list_del_init(&sp->list);
158
159 ha->scsi_retry_q_cnt--;
160 sp->state = SRB_NO_QUEUE_STATE;
161}
162
163/*
164 * del_from_retry_queue
165 * Function used to remove a command block from the
166 * watchdog timer queue.
167 *
168 * Note: Must insure that command is on watchdog
169 * list before calling del_from_retry_queue
170 * if (sp->flags & SRB_WATCHDOG)
171 *
172 * Input:
173 * ha = adapter block pointer.
174 * sp = srb pointer.
175 * Locking:
176 * this function takes and releases the list_lock
177 */
178static inline void
179del_from_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
180{
181 unsigned long flags;
182
183 /* if (unlikely(!(sp->flags & SRB_WATCHDOG)))
184 BUG();*/
185 spin_lock_irqsave(&ha->list_lock, flags);
186
187 /* if (unlikely(list_empty(&ha->retry_queue)))
188 BUG();*/
189
190 __del_from_retry_queue(ha,sp);
191
192 spin_unlock_irqrestore(&ha->list_lock, flags);
193}
194/*
195 * del_from_scsi_retry_queue
196 * Function used to remove a command block from the
197 * scsi retry queue.
198 *
199 * Input:
200 * ha = adapter block pointer.
201 * sp = srb pointer.
202 * Locking:
203 * this function takes and releases the list_lock
204 */
205static inline void
206del_from_scsi_retry_queue(struct scsi_qla_host * ha, srb_t * sp)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(&ha->list_lock, flags);
211
212 /* if (unlikely(list_empty(&ha->scsi_retry_queue)))
213 BUG();*/
214
215 __del_from_scsi_retry_queue(ha,sp);
216
217 spin_unlock_irqrestore(&ha->list_lock, flags);
218}
219
220/*
221 * __add_to_pending_queue
222 * Add the standard SCB job to the bottom of standard SCB commands.
223 *
224 * Input:
225 * COMPLETE!!!
226 * q = SCSI LU pointer.
227 * sp = srb pointer.
228 * SCSI_LU_Q lock must be already obtained.
229 */
230static inline int
231__add_to_pending_queue(struct scsi_qla_host *ha, srb_t * sp)
232{
233 int empty;
234 /*
235 if( sp->state != SRB_NO_QUEUE_STATE &&
236 sp->state != SRB_FREE_STATE &&
237 sp->state != SRB_ACTIVE_STATE)
238 BUG();
239 */
240
241 empty = list_empty(&ha->pending_queue);
242 list_add_tail(&sp->list, &ha->pending_queue);
243 ha->qthreads++;
244 sp->state = SRB_PENDING_STATE;
245
246 return (empty);
247}
248
249static inline void
250__add_to_pending_queue_head(struct scsi_qla_host *ha, srb_t * sp)
251{
252 /*
253 if( sp->state != SRB_NO_QUEUE_STATE &&
254 sp->state != SRB_FREE_STATE &&
255 sp->state != SRB_ACTIVE_STATE)
256 BUG();
257 */
258
259 list_add(&sp->list, &ha->pending_queue);
260 ha->qthreads++;
261 sp->state = SRB_PENDING_STATE;
262}
263
264static inline int
265add_to_pending_queue(struct scsi_qla_host *ha, srb_t *sp)
266{
267 int empty;
268 unsigned long flags;
269
270 spin_lock_irqsave(&ha->list_lock, flags);
271 empty = __add_to_pending_queue(ha, sp);
272 spin_unlock_irqrestore(&ha->list_lock, flags);
273
274 return (empty);
275}
276static inline void
277add_to_pending_queue_head(struct scsi_qla_host *ha, srb_t *sp)
278{
279 unsigned long flags;
280
281 spin_lock_irqsave(&ha->list_lock, flags);
282 __add_to_pending_queue_head(ha, sp);
283 spin_unlock_irqrestore(&ha->list_lock, flags);
284}
285
286static inline void
287__del_from_pending_queue(struct scsi_qla_host *ha, srb_t *sp)
288{
289 list_del_init(&sp->list);
290 ha->qthreads--;
291 sp->state = SRB_NO_QUEUE_STATE;
292}
293
294/*
295 * Failover Stuff.
296 */
297static inline void
298__add_to_failover_queue(struct scsi_qla_host * ha, srb_t * sp)
299{
300 /*
301 if( sp->state != SRB_NO_QUEUE_STATE &&
302 sp->state != SRB_ACTIVE_STATE)
303 BUG();
304 */
305
306 list_add_tail(&sp->list,&ha->failover_queue);
307 ha->failover_cnt++;
308 sp->state = SRB_FAILOVER_STATE;
309 sp->ha = ha;
310}
311
312static inline void add_to_failover_queue(struct scsi_qla_host * ha, srb_t * sp)
313{
314 unsigned long flags;
315
316 spin_lock_irqsave(&ha->list_lock, flags);
317
318 __add_to_failover_queue(ha,sp);
319
320 spin_unlock_irqrestore(&ha->list_lock, flags);
321}
322static inline void __del_from_failover_queue(struct scsi_qla_host * ha, srb_t *
323 sp)
324{
325 ha->failover_cnt--;
326 list_del_init(&sp->list);
327 sp->state = SRB_NO_QUEUE_STATE;
328}
329
330static inline void del_from_failover_queue(struct scsi_qla_host * ha, srb_t * sp)
331{
332 unsigned long flags;
333
334 spin_lock_irqsave(&ha->list_lock, flags);
335
336 __del_from_failover_queue(ha,sp);
337
338 spin_unlock_irqrestore(&ha->list_lock, flags);
339}
340
341static inline void
342del_from_pending_queue(struct scsi_qla_host * ha, srb_t * sp)
343{
344 unsigned long flags;
345
346 spin_lock_irqsave(&ha->list_lock, flags);
347
348 __del_from_pending_queue(ha,sp);
349
350 spin_unlock_irqrestore(&ha->list_lock, flags);
351}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index c04fbcd75235..15f6acaca305 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -858,8 +858,7 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
858 858
859 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no);) 859 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no);)
860 860
861 fcport = sp->fclun->fcport; 861 fcport = sp->fcport;
862
863 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 862 if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
864 atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 863 atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
865 return 1; 864 return 1;
@@ -884,7 +883,7 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
884 mcp->mb[1] = fcport->loop_id << 8; 883 mcp->mb[1] = fcport->loop_id << 8;
885 mcp->mb[2] = (uint16_t)handle; 884 mcp->mb[2] = (uint16_t)handle;
886 mcp->mb[3] = (uint16_t)(handle >> 16); 885 mcp->mb[3] = (uint16_t)(handle >> 16);
887 mcp->mb[6] = (uint16_t)sp->fclun->lun; 886 mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
888 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 887 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
889 mcp->in_mb = MBX_0; 888 mcp->in_mb = MBX_0;
890 mcp->tov = 30; 889 mcp->tov = 30;
@@ -980,30 +979,22 @@ qla2x00_abort_target(fc_port_t *fcport)
980 * Kernel context. 979 * Kernel context.
981 */ 980 */
982int 981int
983qla2x00_target_reset(scsi_qla_host_t *ha, uint16_t b, uint16_t t) 982qla2x00_target_reset(scsi_qla_host_t *ha, struct fc_port *fcport)
984{ 983{
985 int rval; 984 int rval;
986 mbx_cmd_t mc; 985 mbx_cmd_t mc;
987 mbx_cmd_t *mcp = &mc; 986 mbx_cmd_t *mcp = &mc;
988 os_tgt_t *tgt;
989 987
990 DEBUG11(printk("qla2x00_target_reset(%ld): entered.\n", ha->host_no);) 988 DEBUG11(printk("qla2x00_target_reset(%ld): entered.\n", ha->host_no);)
991 989
992 tgt = TGT_Q(ha, t); 990 if (atomic_read(&fcport->state) != FCS_ONLINE)
993 if (tgt->fcport == NULL) {
994 /* no target to abort */
995 return 0;
996 }
997 if (atomic_read(&tgt->fcport->state) != FCS_ONLINE) {
998 /* target not online */
999 return 0; 991 return 0;
1000 }
1001 992
1002 mcp->mb[0] = MBC_TARGET_RESET; 993 mcp->mb[0] = MBC_TARGET_RESET;
1003 if (HAS_EXTENDED_IDS(ha)) 994 if (HAS_EXTENDED_IDS(ha))
1004 mcp->mb[1] = tgt->fcport->loop_id; 995 mcp->mb[1] = fcport->loop_id;
1005 else 996 else
1006 mcp->mb[1] = tgt->fcport->loop_id << 8; 997 mcp->mb[1] = fcport->loop_id << 8;
1007 mcp->mb[2] = ha->loop_reset_delay; 998 mcp->mb[2] = ha->loop_reset_delay;
1008 mcp->out_mb = MBX_2|MBX_1|MBX_0; 999 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1009 mcp->in_mb = MBX_0; 1000 mcp->in_mb = MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b5863d8769e0..84db911318c6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -63,7 +63,7 @@ module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
63MODULE_PARM_DESC(ql2xlogintimeout, 63MODULE_PARM_DESC(ql2xlogintimeout,
64 "Login timeout value in seconds."); 64 "Login timeout value in seconds.");
65 65
66int qlport_down_retry; 66int qlport_down_retry = 30;
67module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 67module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
68MODULE_PARM_DESC(qlport_down_retry, 68MODULE_PARM_DESC(qlport_down_retry,
69 "Maximum number of command retries to a port that returns" 69 "Maximum number of command retries to a port that returns"
@@ -75,11 +75,6 @@ MODULE_PARM_DESC(ql2xretrycount,
75 "Maximum number of mid-layer retries allowed for a command. " 75 "Maximum number of mid-layer retries allowed for a command. "
76 "Default value is 20, "); 76 "Default value is 20, ");
77 77
78int displayConfig;
79module_param(displayConfig, int, S_IRUGO|S_IWUSR);
80MODULE_PARM_DESC(displayConfig,
81 "If 1 then display the configuration used in /etc/modprobe.conf.");
82
83int ql2xplogiabsentdevice; 78int ql2xplogiabsentdevice;
84module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 79module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
85MODULE_PARM_DESC(ql2xplogiabsentdevice, 80MODULE_PARM_DESC(ql2xplogiabsentdevice,
@@ -119,30 +114,11 @@ MODULE_PARM_DESC(ql2xsuspendcount,
119 "target returns a <NOT READY> status. Default is 10 " 114 "target returns a <NOT READY> status. Default is 10 "
120 "iterations."); 115 "iterations.");
121 116
122int ql2xdoinitscan = 1;
123module_param(ql2xdoinitscan, int, S_IRUGO|S_IWUSR);
124MODULE_PARM_DESC(ql2xdoinitscan,
125 "Signal mid-layer to perform scan after driver load: 0 -- no "
126 "signal sent to mid-layer.");
127
128int ql2xloginretrycount = 0; 117int ql2xloginretrycount = 0;
129module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR); 118module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
130MODULE_PARM_DESC(ql2xloginretrycount, 119MODULE_PARM_DESC(ql2xloginretrycount,
131 "Specify an alternate value for the NVRAM login retry count."); 120 "Specify an alternate value for the NVRAM login retry count.");
132 121
133/*
134 * Proc structures and functions
135 */
136struct info_str {
137 char *buffer;
138 int length;
139 off_t offset;
140 int pos;
141};
142
143static void copy_mem_info(struct info_str *, char *, int);
144static int copy_info(struct info_str *, char *, ...);
145
146static void qla2x00_free_device(scsi_qla_host_t *); 122static void qla2x00_free_device(scsi_qla_host_t *);
147 123
148static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 124static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
@@ -151,6 +127,8 @@ static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
151 * SCSI host template entry points 127 * SCSI host template entry points
152 */ 128 */
153static int qla2xxx_slave_configure(struct scsi_device * device); 129static int qla2xxx_slave_configure(struct scsi_device * device);
130static int qla2xxx_slave_alloc(struct scsi_device *);
131static void qla2xxx_slave_destroy(struct scsi_device *);
154static int qla2x00_queuecommand(struct scsi_cmnd *cmd, 132static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
155 void (*fn)(struct scsi_cmnd *)); 133 void (*fn)(struct scsi_cmnd *));
156static int qla2xxx_eh_abort(struct scsi_cmnd *); 134static int qla2xxx_eh_abort(struct scsi_cmnd *);
@@ -160,14 +138,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
160static int qla2x00_loop_reset(scsi_qla_host_t *ha); 138static int qla2x00_loop_reset(scsi_qla_host_t *ha);
161static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *); 139static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
162 140
163static int qla2x00_proc_info(struct Scsi_Host *, char *, char **,
164 off_t, int, int);
165
166static struct scsi_host_template qla2x00_driver_template = { 141static struct scsi_host_template qla2x00_driver_template = {
167 .module = THIS_MODULE, 142 .module = THIS_MODULE,
168 .name = "qla2xxx", 143 .name = "qla2xxx",
169 .proc_name = "qla2xxx",
170 .proc_info = qla2x00_proc_info,
171 .queuecommand = qla2x00_queuecommand, 144 .queuecommand = qla2x00_queuecommand,
172 145
173 .eh_abort_handler = qla2xxx_eh_abort, 146 .eh_abort_handler = qla2xxx_eh_abort,
@@ -177,6 +150,8 @@ static struct scsi_host_template qla2x00_driver_template = {
177 150
178 .slave_configure = qla2xxx_slave_configure, 151 .slave_configure = qla2xxx_slave_configure,
179 152
153 .slave_alloc = qla2xxx_slave_alloc,
154 .slave_destroy = qla2xxx_slave_destroy,
180 .this_id = -1, 155 .this_id = -1,
181 .cmd_per_lun = 3, 156 .cmd_per_lun = 3,
182 .use_clustering = ENABLE_CLUSTERING, 157 .use_clustering = ENABLE_CLUSTERING,
@@ -191,8 +166,6 @@ static struct scsi_host_template qla2x00_driver_template = {
191 166
192static struct scsi_transport_template *qla2xxx_transport_template = NULL; 167static struct scsi_transport_template *qla2xxx_transport_template = NULL;
193 168
194static void qla2x00_display_fc_names(scsi_qla_host_t *);
195
196/* TODO Convert to inlines 169/* TODO Convert to inlines
197 * 170 *
198 * Timer routines 171 * Timer routines
@@ -230,168 +203,6 @@ qla2x00_stop_timer(scsi_qla_host_t *ha)
230 ha->timer_active = 0; 203 ha->timer_active = 0;
231} 204}
232 205
233void qla2x00_cmd_timeout(srb_t *);
234
235static __inline__ void qla2x00_callback(scsi_qla_host_t *, struct scsi_cmnd *);
236static __inline__ void sp_put(struct scsi_qla_host * ha, srb_t *sp);
237static __inline__ void sp_get(struct scsi_qla_host * ha, srb_t *sp);
238static __inline__ void
239qla2x00_delete_from_done_queue(scsi_qla_host_t *, srb_t *);
240
241/*
242* qla2x00_callback
243* Returns the completed SCSI command to LINUX.
244*
245* Input:
246* ha -- Host adapter structure
247* cmd -- SCSI mid-level command structure.
248* Returns:
249* None
250* Note:From failover point of view we always get the sp
251* from vis_ha pool in queuecommand.So when we put it
252* back to the pool it has to be the vis_ha.
253* So rely on struct scsi_cmnd to get the vis_ha and not on sp.
254*/
255static inline void
256qla2x00_callback(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
257{
258 srb_t *sp = (srb_t *) CMD_SP(cmd);
259 scsi_qla_host_t *vis_ha;
260 os_lun_t *lq;
261 int got_sense;
262 unsigned long cpu_flags = 0;
263
264 cmd->host_scribble = (unsigned char *) NULL;
265 vis_ha = (scsi_qla_host_t *) cmd->device->host->hostdata;
266
267 if (sp == NULL) {
268 qla_printk(KERN_INFO, ha,
269 "%s(): **** CMD derives a NULL SP\n",
270 __func__);
271 DEBUG2(BUG();)
272 return;
273 }
274
275 /*
276 * If command status is not DID_BUS_BUSY then go ahead and freed sp.
277 */
278 /*
279 * Cancel command timeout
280 */
281 qla2x00_delete_timer_from_cmd(sp);
282
283 /*
284 * Put SP back in the free queue
285 */
286 sp->cmd = NULL;
287 CMD_SP(cmd) = NULL;
288 lq = sp->lun_queue;
289 got_sense = (sp->flags & SRB_GOT_SENSE)? 1: 0;
290 add_to_free_queue(vis_ha, sp);
291
292 if (host_byte(cmd->result) == DID_OK) {
293 /* device ok */
294 ha->total_bytes += cmd->bufflen;
295 if (!got_sense) {
296 /* If lun was suspended then clear retry count */
297 spin_lock_irqsave(&lq->q_lock, cpu_flags);
298 if (!test_bit(LUN_EXEC_DELAYED, &lq->q_flag))
299 lq->q_state = LUN_STATE_READY;
300 spin_unlock_irqrestore(&lq->q_lock, cpu_flags);
301 }
302 } else if (host_byte(cmd->result) == DID_ERROR) {
303 /* device error */
304 ha->total_dev_errs++;
305 }
306
307 /* Call the mid-level driver interrupt handler */
308 (*(cmd)->scsi_done)(cmd);
309}
310
311/**************************************************************************
312* sp_put
313*
314* Description:
315* Decrement reference count and call the callback if we're the last
316* owner of the specified sp. Will get the host_lock before calling
317* the callback.
318*
319* Input:
320* ha - pointer to the scsi_qla_host_t where the callback is to occur.
321* sp - pointer to srb_t structure to use.
322*
323* Returns:
324*
325**************************************************************************/
326static inline void
327sp_put(struct scsi_qla_host * ha, srb_t *sp)
328{
329 if (atomic_read(&sp->ref_count) == 0) {
330 qla_printk(KERN_INFO, ha,
331 "%s(): **** SP->ref_count not zero\n",
332 __func__);
333 DEBUG2(BUG();)
334
335 return;
336 }
337
338 if (!atomic_dec_and_test(&sp->ref_count)) {
339 return;
340 }
341
342 qla2x00_callback(ha, sp->cmd);
343}
344
345/**************************************************************************
346* sp_get
347*
348* Description:
349* Increment reference count of the specified sp.
350*
351* Input:
352* sp - pointer to srb_t structure to use.
353*
354* Returns:
355*
356**************************************************************************/
357static inline void
358sp_get(struct scsi_qla_host * ha, srb_t *sp)
359{
360 atomic_inc(&sp->ref_count);
361
362 if (atomic_read(&sp->ref_count) > 2) {
363 qla_printk(KERN_INFO, ha,
364 "%s(): **** SP->ref_count greater than two\n",
365 __func__);
366 DEBUG2(BUG();)
367
368 return;
369 }
370}
371
372static inline void
373qla2x00_delete_from_done_queue(scsi_qla_host_t *dest_ha, srb_t *sp)
374{
375 /* remove command from done list */
376 list_del_init(&sp->list);
377 dest_ha->done_q_cnt--;
378 sp->state = SRB_NO_QUEUE_STATE;
379
380 if (sp->flags & SRB_DMA_VALID) {
381 sp->flags &= ~SRB_DMA_VALID;
382
383 /* Release memory used for this I/O */
384 if (sp->cmd->use_sg) {
385 pci_unmap_sg(dest_ha->pdev, sp->cmd->request_buffer,
386 sp->cmd->use_sg, sp->cmd->sc_data_direction);
387 } else if (sp->cmd->request_bufflen) {
388 pci_unmap_page(dest_ha->pdev, sp->dma_handle,
389 sp->cmd->request_bufflen,
390 sp->cmd->sc_data_direction);
391 }
392 }
393}
394
395static int qla2x00_do_dpc(void *data); 206static int qla2x00_do_dpc(void *data);
396 207
397static void qla2x00_rst_aen(scsi_qla_host_t *); 208static void qla2x00_rst_aen(scsi_qla_host_t *);
@@ -400,186 +211,12 @@ static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
400static void qla2x00_mem_free(scsi_qla_host_t *ha); 211static void qla2x00_mem_free(scsi_qla_host_t *ha);
401static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha); 212static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
402static void qla2x00_free_sp_pool(scsi_qla_host_t *ha); 213static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
403static srb_t *qla2x00_get_new_sp(scsi_qla_host_t *ha); 214static srb_t *qla2x00_get_new_sp(scsi_qla_host_t *);
404 215static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
405static ssize_t qla2x00_sysfs_read_fw_dump(struct kobject *, char *, loff_t, 216void qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *);
406 size_t);
407static ssize_t qla2x00_sysfs_write_fw_dump(struct kobject *, char *, loff_t,
408 size_t);
409static struct bin_attribute sysfs_fw_dump_attr = {
410 .attr = {
411 .name = "fw_dump",
412 .mode = S_IRUSR | S_IWUSR,
413 .owner = THIS_MODULE,
414 },
415 .size = 0,
416 .read = qla2x00_sysfs_read_fw_dump,
417 .write = qla2x00_sysfs_write_fw_dump,
418};
419static ssize_t qla2x00_sysfs_read_nvram(struct kobject *, char *, loff_t,
420 size_t);
421static ssize_t qla2x00_sysfs_write_nvram(struct kobject *, char *, loff_t,
422 size_t);
423static struct bin_attribute sysfs_nvram_attr = {
424 .attr = {
425 .name = "nvram",
426 .mode = S_IRUSR | S_IWUSR,
427 .owner = THIS_MODULE,
428 },
429 .size = sizeof(nvram_t),
430 .read = qla2x00_sysfs_read_nvram,
431 .write = qla2x00_sysfs_write_nvram,
432};
433 217
434/* -------------------------------------------------------------------------- */ 218/* -------------------------------------------------------------------------- */
435 219
436
437/* SysFS attributes. */
438static ssize_t qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf,
439 loff_t off, size_t count)
440{
441 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
442 struct device, kobj)));
443
444 if (ha->fw_dump_reading == 0)
445 return 0;
446 if (off > ha->fw_dump_buffer_len)
447 return 0;
448 if (off + count > ha->fw_dump_buffer_len)
449 count = ha->fw_dump_buffer_len - off;
450
451 memcpy(buf, &ha->fw_dump_buffer[off], count);
452
453 return (count);
454}
455
456static ssize_t qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf,
457 loff_t off, size_t count)
458{
459 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
460 struct device, kobj)));
461 int reading;
462 uint32_t dump_size;
463
464 if (off != 0)
465 return (0);
466
467 reading = simple_strtol(buf, NULL, 10);
468 switch (reading) {
469 case 0:
470 if (ha->fw_dump_reading == 1) {
471 qla_printk(KERN_INFO, ha,
472 "Firmware dump cleared on (%ld).\n",
473 ha->host_no);
474
475 vfree(ha->fw_dump_buffer);
476 free_pages((unsigned long)ha->fw_dump,
477 ha->fw_dump_order);
478
479 ha->fw_dump_reading = 0;
480 ha->fw_dump_buffer = NULL;
481 ha->fw_dump = NULL;
482 }
483 break;
484 case 1:
485 if (ha->fw_dump != NULL && !ha->fw_dump_reading) {
486 ha->fw_dump_reading = 1;
487
488 dump_size = FW_DUMP_SIZE_1M;
489 if (ha->fw_memory_size < 0x20000)
490 dump_size = FW_DUMP_SIZE_128K;
491 else if (ha->fw_memory_size < 0x80000)
492 dump_size = FW_DUMP_SIZE_512K;
493 ha->fw_dump_buffer = (char *)vmalloc(dump_size);
494 if (ha->fw_dump_buffer == NULL) {
495 qla_printk(KERN_WARNING, ha,
496 "Unable to allocate memory for firmware "
497 "dump buffer (%d).\n", dump_size);
498
499 ha->fw_dump_reading = 0;
500 return (count);
501 }
502 qla_printk(KERN_INFO, ha,
503 "Firmware dump ready for read on (%ld).\n",
504 ha->host_no);
505 memset(ha->fw_dump_buffer, 0, dump_size);
506 if (IS_QLA2100(ha) || IS_QLA2200(ha))
507 qla2100_ascii_fw_dump(ha);
508 else
509 qla2300_ascii_fw_dump(ha);
510 ha->fw_dump_buffer_len = strlen(ha->fw_dump_buffer);
511 }
512 break;
513 }
514 return (count);
515}
516
517static ssize_t qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf,
518 loff_t off, size_t count)
519{
520 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
521 struct device, kobj)));
522 uint16_t *witer;
523 unsigned long flags;
524 uint16_t cnt;
525
526 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != sizeof(nvram_t))
527 return 0;
528
529 /* Read NVRAM. */
530 spin_lock_irqsave(&ha->hardware_lock, flags);
531 qla2x00_lock_nvram_access(ha);
532 witer = (uint16_t *)buf;
533 for (cnt = 0; cnt < count / 2; cnt++) {
534 *witer = cpu_to_le16(qla2x00_get_nvram_word(ha,
535 cnt+ha->nvram_base));
536 witer++;
537 }
538 qla2x00_unlock_nvram_access(ha);
539 spin_unlock_irqrestore(&ha->hardware_lock, flags);
540
541 return (count);
542}
543
544static ssize_t qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf,
545 loff_t off, size_t count)
546{
547 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
548 struct device, kobj)));
549 uint8_t *iter;
550 uint16_t *witer;
551 unsigned long flags;
552 uint16_t cnt;
553 uint8_t chksum;
554
555 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != sizeof(nvram_t))
556 return 0;
557
558 /* Checksum NVRAM. */
559 iter = (uint8_t *)buf;
560 chksum = 0;
561 for (cnt = 0; cnt < count - 1; cnt++)
562 chksum += *iter++;
563 chksum = ~chksum + 1;
564 *iter = chksum;
565
566 /* Write NVRAM. */
567 spin_lock_irqsave(&ha->hardware_lock, flags);
568 qla2x00_lock_nvram_access(ha);
569 qla2x00_release_nvram_protection(ha);
570 witer = (uint16_t *)buf;
571 for (cnt = 0; cnt < count / 2; cnt++) {
572 qla2x00_write_nvram_word(ha, cnt+ha->nvram_base,
573 cpu_to_le16(*witer));
574 witer++;
575 }
576 qla2x00_unlock_nvram_access(ha);
577 spin_unlock_irqrestore(&ha->hardware_lock, flags);
578
579 return (count);
580}
581
582/* -------------------------------------------------------------------------- */
583static char * 220static char *
584qla2x00_get_pci_info_str(struct scsi_qla_host *ha, char *str) 221qla2x00_get_pci_info_str(struct scsi_qla_host *ha, char *str)
585{ 222{
@@ -661,210 +298,76 @@ qla2x00_get_fw_version_str(struct scsi_qla_host *ha, char *str)
661* handling). 298* handling).
662**************************************************************************/ 299**************************************************************************/
663static int 300static int
664qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) 301qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
665{ 302{
666 fc_port_t *fcport; 303 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
667 os_lun_t *lq; 304 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
668 os_tgt_t *tq; 305 srb_t *sp;
669 scsi_qla_host_t *ha, *ha2; 306 int rval;
670 srb_t *sp;
671 struct Scsi_Host *host;
672 unsigned int b, t, l;
673 unsigned long handle;
674 int was_empty;
675
676 307
677 host = cmd->device->host; 308 if (!fcport) {
678 ha = (scsi_qla_host_t *) host->hostdata; 309 cmd->result = DID_NO_CONNECT << 16;
679 was_empty = 1; 310 goto qc_fail_command;
311 }
680 312
681 cmd->scsi_done = fn; 313 if (atomic_read(&fcport->state) != FCS_ONLINE) {
314 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
315 atomic_read(&ha->loop_state) == LOOP_DEAD) {
316 cmd->result = DID_NO_CONNECT << 16;
317 goto qc_fail_command;
318 }
319 goto qc_host_busy;
320 }
682 321
683 spin_unlock_irq(ha->host->host_lock); 322 spin_unlock_irq(ha->host->host_lock);
684 323
685 /* 324 /* Allocate a command packet from the "sp" pool. */
686 * Allocate a command packet from the "sp" pool. If we cant get back
687 * one then let scsi layer come back later.
688 */
689 if ((sp = qla2x00_get_new_sp(ha)) == NULL) { 325 if ((sp = qla2x00_get_new_sp(ha)) == NULL) {
690 qla_printk(KERN_WARNING, ha, 326 goto qc_host_busy_lock;
691 "Couldn't allocate memory for sp - retried.\n");
692
693 spin_lock_irq(ha->host->host_lock);
694
695 return (1);
696 } 327 }
697 328
329 sp->ha = ha;
330 sp->fcport = fcport;
698 sp->cmd = cmd; 331 sp->cmd = cmd;
699 CMD_SP(cmd) = (void *)sp;
700
701 sp->flags = 0; 332 sp->flags = 0;
702 if (CMD_RESID_LEN(cmd) & SRB_IOCTL) {
703 /* Need to set sp->flags */
704 sp->flags |= SRB_IOCTL;
705 CMD_RESID_LEN(cmd) = 0; /* Clear it since no more use. */
706 }
707
708 sp->fo_retry_cnt = 0;
709 sp->err_id = 0; 333 sp->err_id = 0;
710 334
711 /* Generate LU queue on bus, target, LUN */ 335 CMD_SP(cmd) = (void *)sp;
712 b = cmd->device->channel; 336 cmd->scsi_done = done;
713 t = cmd->device->id;
714 l = cmd->device->lun;
715
716 /*
717 * Start Command Timer. Typically it will be 2 seconds less than what
718 * is requested by the Host such that we can return the IO before
719 * aborts are called.
720 */
721 if ((cmd->timeout_per_command / HZ) > QLA_CMD_TIMER_DELTA)
722 qla2x00_add_timer_to_cmd(sp,
723 (cmd->timeout_per_command / HZ) - QLA_CMD_TIMER_DELTA);
724 else
725 qla2x00_add_timer_to_cmd(sp, cmd->timeout_per_command / HZ);
726
727 if (l >= ha->max_luns) {
728 cmd->result = DID_NO_CONNECT << 16;
729 sp->err_id = SRB_ERR_PORT;
730
731 spin_lock_irq(ha->host->host_lock);
732
733 sp_put(ha, sp);
734
735 return (0);
736 }
737
738 if ((tq = (os_tgt_t *) TGT_Q(ha, t)) != NULL &&
739 (lq = (os_lun_t *) LUN_Q(ha, t, l)) != NULL) {
740 fcport = lq->fclun->fcport;
741 ha2 = fcport->ha;
742 } else {
743 lq = NULL;
744 fcport = NULL;
745 ha2 = ha;
746 }
747
748 /* Set an invalid handle until we issue the command to ISP */
749 /* then we will set the real handle value. */
750 handle = INVALID_HANDLE;
751 cmd->host_scribble = (unsigned char *)handle;
752
753 /* Bookkeeping information */
754 sp->r_start = jiffies; /* Time the request was recieved. */
755 sp->u_start = 0;
756
757 /* Setup device queue pointers. */
758 sp->tgt_queue = tq;
759 sp->lun_queue = lq;
760
761 /*
762 * NOTE : q is NULL
763 *
764 * 1. When device is added from persistent binding but has not been
765 * discovered yet.The state of loopid == PORT_AVAIL.
766 * 2. When device is never found on the bus.(loopid == UNUSED)
767 *
768 * IF Device Queue is not created, or device is not in a valid state
769 * and link down error reporting is enabled, reject IO.
770 */
771 if (fcport == NULL) {
772 DEBUG3(printk("scsi(%ld:%2d:%2d): port unavailable\n",
773 ha->host_no,t,l));
774
775 cmd->result = DID_NO_CONNECT << 16;
776 sp->err_id = SRB_ERR_PORT;
777 337
778 spin_lock_irq(ha->host->host_lock); 338 rval = qla2x00_start_scsi(sp);
339 if (rval != QLA_SUCCESS)
340 goto qc_host_busy_free_sp;
779 341
780 sp_put(ha, sp); 342 /* Manage unprocessed RIO/ZIO commands in response queue. */
343 if (ha->flags.online && ha->flags.process_response_queue &&
344 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
345 unsigned long flags;
781 346
782 return (0); 347 spin_lock_irqsave(&ha->hardware_lock, flags);
348 qla2x00_process_response_queue(ha);
349 spin_unlock_irqrestore(&ha->hardware_lock, flags);
783 } 350 }
784 351
785 /* Only modify the allowed count if the target is a *non* tape device */ 352 spin_lock_irq(ha->host->host_lock);
786 if ((fcport->flags & FCF_TAPE_PRESENT) == 0) {
787 sp->flags &= ~SRB_TAPE;
788 if (cmd->allowed < ql2xretrycount) {
789 cmd->allowed = ql2xretrycount;
790 }
791 } else
792 sp->flags |= SRB_TAPE;
793
794 DEBUG5(printk("scsi(%ld:%2d:%2d): (queuecmd) queue sp = %p, "
795 "flags=0x%x fo retry=%d, pid=%ld\n",
796 ha->host_no, t, l, sp, sp->flags, sp->fo_retry_cnt,
797 cmd->serial_number));
798 DEBUG5(qla2x00_print_scsi_cmd(cmd));
799
800 sp->fclun = lq->fclun;
801 sp->ha = ha2;
802
803 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL &&
804 cmd->request_bufflen != 0) {
805
806 DEBUG2(printk(KERN_WARNING
807 "scsi(%ld): Incorrect data direction - transfer "
808 "length=%d, direction=%d, pid=%ld, opcode=%x\n",
809 ha->host_no, cmd->request_bufflen, cmd->sc_data_direction,
810 cmd->serial_number, cmd->cmnd[0]));
811 }
812 353
813 /* Final pre-check : 354 return 0;
814 *
815 * Either PORT_DOWN_TIMER OR LINK_DOWN_TIMER Expired.
816 */
817 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
818 atomic_read(&ha2->loop_state) == LOOP_DEAD) {
819 /*
820 * Add the command to the done-queue for later failover
821 * processing.
822 */
823 cmd->result = DID_NO_CONNECT << 16;
824 if (atomic_read(&ha2->loop_state) == LOOP_DOWN)
825 sp->err_id = SRB_ERR_LOOP;
826 else
827 sp->err_id = SRB_ERR_PORT;
828 355
829 add_to_done_queue(ha, sp); 356qc_host_busy_free_sp:
830 qla2x00_done(ha); 357 qla2x00_sp_free_dma(ha, sp);
358 CMD_SP(cmd) = NULL;
359 mempool_free(sp, ha->srb_mempool);
831 360
832 spin_lock_irq(ha->host->host_lock); 361qc_host_busy_lock:
833 return (0); 362 spin_lock_irq(ha->host->host_lock);
834 }
835 363
836 if (tq && test_bit(TQF_SUSPENDED, &tq->flags) && 364qc_host_busy:
837 (sp->flags & SRB_TAPE) == 0) { 365 return SCSI_MLQUEUE_HOST_BUSY;
838 /* If target suspended put incoming I/O in retry_q. */
839 qla2x00_extend_timeout(sp->cmd, 10);
840 add_to_scsi_retry_queue(ha, sp);
841 } else
842 was_empty = add_to_pending_queue(ha, sp);
843
844 if ((IS_QLA2100(ha) || IS_QLA2200(ha)) && ha->flags.online) {
845 if (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
846 unsigned long flags;
847
848 spin_lock_irqsave(&ha->hardware_lock, flags);
849 qla2x00_process_response_queue(ha);
850 spin_unlock_irqrestore(&ha->hardware_lock, flags);
851 }
852 }
853 366
854 /* We submit to the hardware if: 367qc_fail_command:
855 * 368 done(cmd);
856 * 1) we're on the cpu the irq's arrive on or
857 * 2) there are very few io's outstanding.
858 *
859 * In all other cases we'll let an irq pick up our IO and submit it
860 * to the controller to improve affinity.
861 */
862 if (_smp_processor_id() == ha->last_irq_cpu || was_empty)
863 qla2x00_next(ha);
864 369
865 spin_lock_irq(ha->host->host_lock); 370 return 0;
866
867 return (0);
868} 371}
869 372
870/* 373/*
@@ -886,54 +389,21 @@ static int
886qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 389qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
887{ 390{
888#define ABORT_POLLING_PERIOD HZ 391#define ABORT_POLLING_PERIOD HZ
889#define ABORT_WAIT_TIME ((10 * HZ) / (ABORT_POLLING_PERIOD)) 392#define ABORT_WAIT_ITER ((10 * HZ) / (ABORT_POLLING_PERIOD))
890 393 unsigned long wait_iter = ABORT_WAIT_ITER;
891 int found = 0; 394 int ret = QLA_SUCCESS;
892 int done = 0;
893 srb_t *rp = NULL;
894 struct list_head *list, *temp;
895 u_long max_wait_time = ABORT_WAIT_TIME;
896
897 do {
898 /* Check on done queue */
899 spin_lock(&ha->list_lock);
900 list_for_each_safe(list, temp, &ha->done_queue) {
901 rp = list_entry(list, srb_t, list);
902
903 /*
904 * Found command. Just exit and wait for the cmd sent
905 * to OS.
906 */
907 if (cmd == rp->cmd) {
908 found++;
909 DEBUG3(printk("%s: found in done queue.\n",
910 __func__);)
911 break;
912 }
913 }
914 spin_unlock(&ha->list_lock);
915
916 /* Complete the cmd right away. */
917 if (found) {
918 qla2x00_delete_from_done_queue(ha, rp);
919 sp_put(ha, rp);
920 done++;
921 break;
922 }
923
924 spin_unlock_irq(ha->host->host_lock);
925 395
396 while (CMD_SP(cmd)) {
926 set_current_state(TASK_UNINTERRUPTIBLE); 397 set_current_state(TASK_UNINTERRUPTIBLE);
927 schedule_timeout(ABORT_POLLING_PERIOD); 398 schedule_timeout(ABORT_POLLING_PERIOD);
928 399
929 spin_lock_irq(ha->host->host_lock); 400 if (--wait_iter)
930 401 break;
931 } while ((max_wait_time--)); 402 }
932 403 if (CMD_SP(cmd))
933 if (done) 404 ret = QLA_FUNCTION_FAILED;
934 DEBUG2(printk(KERN_INFO "%s: found cmd=%p.\n", __func__, cmd));
935 405
936 return (done); 406 return ret;
937} 407}
938 408
939/* 409/*
@@ -1032,245 +502,69 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
1032int 502int
1033qla2xxx_eh_abort(struct scsi_cmnd *cmd) 503qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1034{ 504{
1035 int i; 505 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
1036 int return_status = FAILED; 506 srb_t *sp;
1037 os_lun_t *q; 507 int ret, i;
1038 scsi_qla_host_t *ha; 508 unsigned int id, lun;
1039 scsi_qla_host_t *vis_ha; 509 unsigned long serial;
1040 srb_t *sp;
1041 srb_t *rp;
1042 struct list_head *list, *temp;
1043 struct Scsi_Host *host;
1044 uint8_t found = 0;
1045 unsigned int b, t, l;
1046
1047 /* Get the SCSI request ptr */
1048 sp = (srb_t *) CMD_SP(cmd);
1049
1050 /*
1051 * If sp is NULL, command is already returned.
1052 * sp is NULLED just before we call back scsi_done
1053 *
1054 */
1055 if ((sp == NULL)) {
1056 /* no action - we don't have command */
1057 qla_printk(KERN_INFO, to_qla_host(cmd->device->host),
1058 "qla2xxx_eh_abort: cmd already done sp=%p\n", sp);
1059 DEBUG(printk("qla2xxx_eh_abort: cmd already done sp=%p\n", sp);)
1060 return SUCCESS;
1061 }
1062 if (sp) {
1063 DEBUG(printk("qla2xxx_eh_abort: refcount %i \n",
1064 atomic_read(&sp->ref_count));)
1065 }
1066
1067 vis_ha = (scsi_qla_host_t *) cmd->device->host->hostdata;
1068 ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
1069
1070 host = ha->host;
1071
1072 /* Generate LU queue on bus, target, LUN */
1073 b = cmd->device->channel;
1074 t = cmd->device->id;
1075 l = cmd->device->lun;
1076 q = GET_LU_Q(vis_ha, t, l);
1077
1078 qla_printk(KERN_INFO, ha,
1079 "%s scsi(%ld:%d:%d:%d): cmd_timeout_in_sec=0x%x.\n", __func__,
1080 ha->host_no, (int)b, (int)t, (int)l,
1081 cmd->timeout_per_command / HZ);
1082
1083 /*
1084 * if no LUN queue then something is very wrong!!!
1085 */
1086 if (q == NULL) {
1087 qla_printk(KERN_WARNING, ha,
1088 "qla2x00: (%x:%x:%x) No LUN queue.\n", b, t, l);
1089 510
1090 /* no action - we don't have command */ 511 if (!CMD_SP(cmd))
1091 return FAILED; 512 return FAILED;
1092 }
1093 513
1094 DEBUG2(printk("scsi(%ld): ABORTing cmd=%p sp=%p jiffies = 0x%lx, " 514 ret = FAILED;
1095 "timeout=%x, dpc_flags=%lx, vis_ha->dpc_flags=%lx q->flag=%lx\n",
1096 ha->host_no, cmd, sp, jiffies, cmd->timeout_per_command / HZ,
1097 ha->dpc_flags, vis_ha->dpc_flags, q->q_flag));
1098 DEBUG2(qla2x00_print_scsi_cmd(cmd));
1099 515
1100 spin_unlock_irq(ha->host->host_lock); 516 id = cmd->device->id;
1101 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 517 lun = cmd->device->lun;
1102 DEBUG2(printk("%s failed:board disabled\n", __func__);) 518 serial = cmd->serial_number;
1103 spin_lock_irq(ha->host->host_lock);
1104 return FAILED;
1105 }
1106 spin_lock_irq(ha->host->host_lock);
1107 519
1108 /* Search done queue */ 520 /* Check active list for command command. */
1109 spin_lock(&ha->list_lock); 521 spin_unlock_irq(ha->host->host_lock);
1110 list_for_each_safe(list, temp, &ha->done_queue) { 522 spin_lock(&ha->hardware_lock);
1111 rp = list_entry(list, srb_t, list); 523 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
524 sp = ha->outstanding_cmds[i];
1112 525
1113 if (cmd != rp->cmd) 526 if (sp == NULL)
1114 continue; 527 continue;
1115 528
1116 /* 529 if (sp->cmd != cmd)
1117 * Found command.Remove it from done list.
1118 * And proceed to post completion to scsi mid layer.
1119 */
1120 return_status = SUCCESS;
1121 found++;
1122 qla2x00_delete_from_done_queue(ha, sp);
1123
1124 break;
1125 } /* list_for_each_safe() */
1126 spin_unlock(&ha->list_lock);
1127
1128 /*
1129 * Return immediately if the aborted command was already in the done
1130 * queue
1131 */
1132 if (found) {
1133 qla_printk(KERN_INFO, ha,
1134 "qla2xxx_eh_abort: Returning completed command=%p sp=%p\n",
1135 cmd, sp);
1136 sp_put(ha, sp);
1137 return (return_status);
1138 }
1139
1140
1141 /*
1142 * See if this command is in the retry queue
1143 */
1144 DEBUG3(printk("qla2xxx_eh_abort: searching sp %p in retry "
1145 "queue.\n", sp);)
1146
1147 spin_lock(&ha->list_lock);
1148 list_for_each_safe(list, temp, &ha->retry_queue) {
1149 rp = list_entry(list, srb_t, list);
1150
1151 if (cmd != rp->cmd)
1152 continue; 530 continue;
1153 531
532 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld "
533 "sp->state=%x\n", __func__, ha->host_no, sp, serial,
534 sp->state));
535 DEBUG3(qla2x00_print_scsi_cmd(cmd);)
1154 536
1155 DEBUG2(printk("qla2xxx_eh_abort: found "
1156 "in retry queue. SP=%p\n", sp);)
1157
1158 __del_from_retry_queue(ha, rp);
1159 cmd->result = DID_ABORT << 16;
1160 __add_to_done_queue(ha, rp);
1161
1162 return_status = SUCCESS;
1163 found++;
1164
1165 break;
1166
1167 }
1168 spin_unlock(&ha->list_lock);
1169
1170
1171 /*
1172 * Our SP pointer points at the command we want to remove from the
1173 * pending queue providing we haven't already sent it to the adapter.
1174 */
1175 if (!found) {
1176 DEBUG3(printk("qla2xxx_eh_abort: searching sp %p "
1177 "in pending queue.\n", sp);)
1178
1179 spin_lock(&vis_ha->list_lock);
1180 list_for_each_safe(list, temp, &vis_ha->pending_queue) {
1181 rp = list_entry(list, srb_t, list);
1182
1183 if (rp->cmd != cmd)
1184 continue;
1185
1186 /* Remove srb from LUN queue. */
1187 rp->flags |= SRB_ABORTED;
1188
1189 DEBUG2(printk("qla2xxx_eh_abort: Cmd in pending queue."
1190 " serial_number %ld.\n",
1191 sp->cmd->serial_number);)
1192
1193 __del_from_pending_queue(vis_ha, rp);
1194 cmd->result = DID_ABORT << 16;
1195
1196 __add_to_done_queue(vis_ha, rp);
1197
1198 return_status = SUCCESS;
1199
1200 found++;
1201 break;
1202 } /* list_for_each_safe() */
1203 spin_unlock(&vis_ha->list_lock);
1204 } /*End of if !found */
1205
1206 if (!found) { /* find the command in our active list */
1207 DEBUG3(printk("qla2xxx_eh_abort: searching sp %p "
1208 "in outstanding queue.\n", sp);)
1209
1210 spin_lock(&ha->hardware_lock);
1211 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
1212 sp = ha->outstanding_cmds[i];
1213
1214 if (sp == NULL)
1215 continue;
1216
1217 if (sp->cmd != cmd)
1218 continue;
1219
1220 DEBUG2(printk("qla2xxx_eh_abort(%ld): aborting sp %p "
1221 "from RISC. pid=%ld sp->state=%x q->q_flag=%lx\n",
1222 ha->host_no, sp, sp->cmd->serial_number,
1223 sp->state, q->q_flag);)
1224 DEBUG(qla2x00_print_scsi_cmd(cmd);)
1225
1226 /* Get a reference to the sp and drop the lock.*/
1227 sp_get(ha, sp);
1228
1229 spin_unlock(&ha->hardware_lock);
1230 spin_unlock_irq(ha->host->host_lock);
1231
1232 if (qla2x00_abort_command(ha, sp)) {
1233 DEBUG2(printk("qla2xxx_eh_abort: abort_command "
1234 "mbx failed.\n");)
1235 return_status = FAILED;
1236 } else {
1237 DEBUG3(printk("qla2xxx_eh_abort: abort_command "
1238 " mbx success.\n");)
1239 return_status = SUCCESS;
1240 }
1241
1242 sp_put(ha,sp);
1243
1244 spin_lock_irq(ha->host->host_lock);
1245 spin_lock(&ha->hardware_lock);
1246
1247 /*
1248 * Regardless of mailbox command status, go check on
1249 * done queue just in case the sp is already done.
1250 */
1251 break;
1252
1253 }/*End of for loop */
1254 spin_unlock(&ha->hardware_lock); 537 spin_unlock(&ha->hardware_lock);
538 if (qla2x00_abort_command(ha, sp)) {
539 DEBUG2(printk("%s(%ld): abort_command "
540 "mbx failed.\n", __func__, ha->host_no));
541 } else {
542 DEBUG3(printk("%s(%ld): abort_command "
543 "mbx success.\n", __func__, ha->host_no));
544 ret = SUCCESS;
545 }
546 spin_lock(&ha->hardware_lock);
1255 547
1256 } /*End of if !found */ 548 break;
1257
1258 /* Waiting for our command in done_queue to be returned to OS.*/
1259 if (qla2x00_eh_wait_on_command(ha, cmd) != 0) {
1260 DEBUG2(printk("qla2xxx_eh_abort: cmd returned back to OS.\n");)
1261 return_status = SUCCESS;
1262 } 549 }
1263 550
1264 if (return_status == FAILED) { 551 /* Wait for the command to be returned. */
1265 qla_printk(KERN_INFO, ha, 552 if (ret == SUCCESS) {
1266 "qla2xxx_eh_abort Exiting: status=Failed\n"); 553 spin_unlock(&ha->hardware_lock);
1267 return FAILED; 554 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
555 qla_printk(KERN_ERR, ha,
556 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
557 "%x.\n", ha->host_no, id, lun, serial, ret);
558 }
559 spin_lock(&ha->hardware_lock);
1268 } 560 }
561 spin_lock_irq(ha->host->host_lock);
1269 562
1270 DEBUG2(printk("qla2xxx_eh_abort: Exiting. return_status=0x%x.\n", 563 qla_printk(KERN_INFO, ha,
1271 return_status)); 564 "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no,
565 id, lun, serial, ret);
1272 566
1273 return return_status; 567 return ret;
1274} 568}
1275 569
1276/************************************************************************** 570/**************************************************************************
@@ -1313,8 +607,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
1313 break; 607 break;
1314 } 608 }
1315 } 609 }
1316 } 610 } else {
1317 else {
1318 spin_unlock(&ha->hardware_lock); 611 spin_unlock(&ha->hardware_lock);
1319 } 612 }
1320 } 613 }
@@ -1344,96 +637,42 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
1344int 637int
1345qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 638qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1346{ 639{
1347 int return_status; 640 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
1348 unsigned int b, t, l; 641 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1349 scsi_qla_host_t *ha; 642 srb_t *sp;
1350 os_tgt_t *tq; 643 int ret;
1351 os_lun_t *lq; 644 unsigned int id, lun;
1352 fc_port_t *fcport_to_reset; 645 unsigned long serial;
1353 srb_t *rp;
1354 struct list_head *list, *temp;
1355
1356 return_status = FAILED;
1357 if (cmd == NULL) {
1358 printk(KERN_INFO
1359 "%s(): **** SCSI mid-layer passing in NULL cmd\n",
1360 __func__);
1361
1362 return (return_status);
1363 }
1364
1365 b = cmd->device->channel;
1366 t = cmd->device->id;
1367 l = cmd->device->lun;
1368 ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
1369
1370 tq = TGT_Q(ha, t);
1371 if (tq == NULL) {
1372 qla_printk(KERN_INFO, ha,
1373 "%s(): **** CMD derives a NULL TGT_Q\n", __func__);
1374 646
1375 return (return_status); 647 ret = FAILED;
1376 }
1377 lq = (os_lun_t *)LUN_Q(ha, t, l);
1378 if (lq == NULL) {
1379 printk(KERN_INFO
1380 "%s(): **** CMD derives a NULL LUN_Q\n", __func__);
1381 648
1382 return (return_status); 649 id = cmd->device->id;
1383 } 650 lun = cmd->device->lun;
1384 fcport_to_reset = lq->fclun->fcport; 651 serial = cmd->serial_number;
1385 652
1386 /* If we are coming in from the back-door, stall I/O until complete. */ 653 sp = (srb_t *) CMD_SP(cmd);
1387 if (!cmd->device->host->eh_active) 654 if (!sp || !fcport)
1388 set_bit(TQF_SUSPENDED, &tq->flags); 655 return ret;
1389 656
1390 qla_printk(KERN_INFO, ha, 657 qla_printk(KERN_INFO, ha,
1391 "scsi(%ld:%d:%d:%d): DEVICE RESET ISSUED.\n", ha->host_no, b, t, l); 658 "scsi(%ld:%d:%d): DEVICE RESET ISSUED.\n", ha->host_no, id, lun);
1392
1393 DEBUG2(printk(KERN_INFO
1394 "scsi(%ld): DEVICE_RESET cmd=%p jiffies = 0x%lx, timeout=%x, "
1395 "dpc_flags=%lx, status=%x allowed=%d cmd.state=%x\n",
1396 ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
1397 ha->dpc_flags, cmd->result, cmd->allowed, cmd->state));
1398
1399 /* Clear commands from the retry queue. */
1400 spin_lock(&ha->list_lock);
1401 list_for_each_safe(list, temp, &ha->retry_queue) {
1402 rp = list_entry(list, srb_t, list);
1403
1404 if (t != rp->cmd->device->id)
1405 continue;
1406
1407 DEBUG2(printk(KERN_INFO
1408 "qla2xxx_eh_reset: found in retry queue. SP=%p\n", rp));
1409
1410 __del_from_retry_queue(ha, rp);
1411 rp->cmd->result = DID_RESET << 16;
1412 __add_to_done_queue(ha, rp);
1413 }
1414 spin_unlock(&ha->list_lock);
1415 659
1416 spin_unlock_irq(ha->host->host_lock); 660 spin_unlock_irq(ha->host->host_lock);
1417 661
1418 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 662 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) {
1419 DEBUG2(printk(KERN_INFO
1420 "%s failed:board disabled\n",__func__));
1421
1422 spin_lock_irq(ha->host->host_lock); 663 spin_lock_irq(ha->host->host_lock);
1423 goto eh_dev_reset_done; 664 goto eh_dev_reset_done;
1424 } 665 }
1425 666
1426 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 667 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
1427 if (qla2x00_device_reset(ha, fcport_to_reset) == 0) { 668 if (qla2x00_device_reset(ha, fcport) == 0)
1428 return_status = SUCCESS; 669 ret = SUCCESS;
1429 }
1430 670
1431#if defined(LOGOUT_AFTER_DEVICE_RESET) 671#if defined(LOGOUT_AFTER_DEVICE_RESET)
1432 if (return_status == SUCCESS) { 672 if (ret == SUCCESS) {
1433 if (fcport_to_reset->flags & FC_FABRIC_DEVICE) { 673 if (fcport->flags & FC_FABRIC_DEVICE) {
1434 qla2x00_fabric_logout(ha, 674 qla2x00_fabric_logout(ha, fcport->loop_id);
1435 fcport_to_reset->loop_id); 675 qla2x00_mark_device_lost(ha, fcport);
1436 qla2x00_mark_device_lost(ha, fcport_to_reset);
1437 } 676 }
1438 } 677 }
1439#endif 678#endif
@@ -1442,9 +681,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1442 "%s failed: loop not ready\n",__func__);) 681 "%s failed: loop not ready\n",__func__);)
1443 } 682 }
1444 683
1445 spin_lock_irq(ha->host->host_lock); 684 if (ret == FAILED) {
1446
1447 if (return_status == FAILED) {
1448 DEBUG3(printk("%s(%ld): device reset failed\n", 685 DEBUG3(printk("%s(%ld): device reset failed\n",
1449 __func__, ha->host_no)); 686 __func__, ha->host_no));
1450 qla_printk(KERN_INFO, ha, "%s: device reset failed\n", 687 qla_printk(KERN_INFO, ha, "%s: device reset failed\n",
@@ -1458,10 +695,10 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1458 * complete for the device. 695 * complete for the device.
1459 */ 696 */
1460 if (cmd->device->host->eh_active) { 697 if (cmd->device->host->eh_active) {
1461 if (qla2x00_eh_wait_for_pending_target_commands(ha, t)) 698 if (qla2x00_eh_wait_for_pending_target_commands(ha, id))
1462 return_status = FAILED; 699 ret = FAILED;
1463 700
1464 if (return_status == FAILED) { 701 if (ret == FAILED) {
1465 DEBUG3(printk("%s(%ld): failed while waiting for " 702 DEBUG3(printk("%s(%ld): failed while waiting for "
1466 "commands\n", __func__, ha->host_no)); 703 "commands\n", __func__, ha->host_no));
1467 qla_printk(KERN_INFO, ha, 704 qla_printk(KERN_INFO, ha,
@@ -1473,15 +710,12 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1473 } 710 }
1474 711
1475 qla_printk(KERN_INFO, ha, 712 qla_printk(KERN_INFO, ha,
1476 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n", 713 "scsi(%ld:%d:%d): DEVICE RESET SUCCEEDED.\n", ha->host_no, id, lun);
1477 ha->host_no, b, t, l);
1478 714
1479eh_dev_reset_done: 715eh_dev_reset_done:
716 spin_lock_irq(ha->host->host_lock);
1480 717
1481 if (!cmd->device->host->eh_active) 718 return ret;
1482 clear_bit(TQF_SUSPENDED, &tq->flags);
1483
1484 return (return_status);
1485} 719}
1486 720
1487/************************************************************************** 721/**************************************************************************
@@ -1549,44 +783,52 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
1549int 783int
1550qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 784qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1551{ 785{
1552 scsi_qla_host_t *ha; 786 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
787 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1553 srb_t *sp; 788 srb_t *sp;
1554 int rval = FAILED; 789 int ret;
790 unsigned int id, lun;
791 unsigned long serial;
792
793 ret = FAILED;
794
795 id = cmd->device->id;
796 lun = cmd->device->lun;
797 serial = cmd->serial_number;
1555 798
1556 ha = (scsi_qla_host_t *) cmd->device->host->hostdata;
1557 sp = (srb_t *) CMD_SP(cmd); 799 sp = (srb_t *) CMD_SP(cmd);
800 if (!sp || !fcport)
801 return ret;
1558 802
1559 qla_printk(KERN_INFO, ha, 803 qla_printk(KERN_INFO, ha,
1560 "scsi(%ld:%d:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, 804 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun);
1561 cmd->device->channel, cmd->device->id, cmd->device->lun);
1562 805
1563 spin_unlock_irq(ha->host->host_lock); 806 spin_unlock_irq(ha->host->host_lock);
1564 807
1565 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 808 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) {
1566 DEBUG2(printk("%s failed:board disabled\n",__func__)); 809 DEBUG2(printk("%s failed:board disabled\n",__func__));
1567 spin_lock_irq(ha->host->host_lock); 810 goto eh_bus_reset_done;
1568 return FAILED;
1569 } 811 }
1570 812
1571 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 813 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
1572 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 814 if (qla2x00_loop_reset(ha) == QLA_SUCCESS)
1573 rval = SUCCESS; 815 ret = SUCCESS;
1574 } 816 }
1575 817 if (ret == FAILED)
1576 spin_lock_irq(ha->host->host_lock); 818 goto eh_bus_reset_done;
1577 if (rval == FAILED)
1578 goto out;
1579 819
1580 /* Waiting for our command in done_queue to be returned to OS.*/ 820 /* Waiting for our command in done_queue to be returned to OS.*/
1581 if (cmd->device->host->eh_active) 821 if (cmd->device->host->eh_active)
1582 if (!qla2x00_eh_wait_for_pending_commands(ha)) 822 if (!qla2x00_eh_wait_for_pending_commands(ha))
1583 rval = FAILED; 823 ret = FAILED;
1584 824
1585 out: 825eh_bus_reset_done:
1586 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 826 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1587 (rval == FAILED) ? "failed" : "succeded"); 827 (ret == FAILED) ? "failed" : "succeded");
1588 828
1589 return rval; 829 spin_lock_irq(ha->host->host_lock);
830
831 return ret;
1590} 832}
1591 833
1592/************************************************************************** 834/**************************************************************************
@@ -1607,24 +849,30 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1607int 849int
1608qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 850qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1609{ 851{
1610 scsi_qla_host_t *ha = (scsi_qla_host_t *)cmd->device->host->hostdata; 852 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
1611 int rval = SUCCESS; 853 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
854 srb_t *sp;
855 int ret;
856 unsigned int id, lun;
857 unsigned long serial;
1612 858
1613 /* Display which one we're actually resetting for debug. */ 859 ret = FAILED;
1614 DEBUG(printk("qla2xxx_eh_host_reset:Resetting scsi(%ld).\n", 860
1615 ha->host_no)); 861 id = cmd->device->id;
862 lun = cmd->device->lun;
863 serial = cmd->serial_number;
864
865 sp = (srb_t *) CMD_SP(cmd);
866 if (!sp || !fcport)
867 return ret;
1616 868
1617 /*
1618 * Now issue reset.
1619 */
1620 qla_printk(KERN_INFO, ha, 869 qla_printk(KERN_INFO, ha,
1621 "scsi(%ld:%d:%d:%d): ADAPTER RESET issued.\n", ha->host_no, 870 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun);
1622 cmd->device->channel, cmd->device->id, cmd->device->lun);
1623 871
1624 spin_unlock_irq(ha->host->host_lock); 872 spin_unlock_irq(ha->host->host_lock);
1625 873
1626 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 874 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
1627 goto board_disabled; 875 goto eh_host_reset_lock;
1628 876
1629 /* 877 /*
1630 * Fixme-may be dpc thread is active and processing 878 * Fixme-may be dpc thread is active and processing
@@ -1634,7 +882,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1634 * devices as lost kicking of the port_down_timer 882 * devices as lost kicking of the port_down_timer
1635 * while dpc is stuck for the mailbox to complete. 883 * while dpc is stuck for the mailbox to complete.
1636 */ 884 */
1637 /* Blocking call-Does context switching if loop is Not Ready */
1638 qla2x00_wait_for_loop_ready(ha); 885 qla2x00_wait_for_loop_ready(ha);
1639 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 886 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
1640 if (qla2x00_abort_isp(ha)) { 887 if (qla2x00_abort_isp(ha)) {
@@ -1643,32 +890,22 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1643 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 890 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1644 891
1645 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 892 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
1646 goto board_disabled; 893 goto eh_host_reset_lock;
1647 } 894 }
1648
1649 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 895 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
1650 896
1651 spin_lock_irq(ha->host->host_lock);
1652 if (rval == FAILED)
1653 goto out;
1654
1655 /* Waiting for our command in done_queue to be returned to OS.*/ 897 /* Waiting for our command in done_queue to be returned to OS.*/
1656 if (!qla2x00_eh_wait_for_pending_commands(ha)) 898 if (qla2x00_eh_wait_for_pending_commands(ha))
1657 rval = FAILED; 899 ret = SUCCESS;
1658
1659 out:
1660 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1661 (rval == FAILED) ? "failed" : "succeded");
1662
1663 return rval;
1664 900
1665 board_disabled: 901eh_host_reset_lock:
1666 spin_lock_irq(ha->host->host_lock); 902 spin_lock_irq(ha->host->host_lock);
1667 903
1668 qla_printk(KERN_INFO, ha, "%s: failed:board disabled\n", __func__); 904 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1669 return FAILED; 905 (ret == FAILED) ? "failed" : "succeded");
1670}
1671 906
907 return ret;
908}
1672 909
1673/* 910/*
1674* qla2x00_loop_reset 911* qla2x00_loop_reset
@@ -1684,25 +921,20 @@ static int
1684qla2x00_loop_reset(scsi_qla_host_t *ha) 921qla2x00_loop_reset(scsi_qla_host_t *ha)
1685{ 922{
1686 int status = QLA_SUCCESS; 923 int status = QLA_SUCCESS;
1687 uint16_t t; 924 struct fc_port *fcport;
1688 os_tgt_t *tq;
1689 925
1690 if (ha->flags.enable_lip_reset) { 926 if (ha->flags.enable_lip_reset) {
1691 status = qla2x00_lip_reset(ha); 927 status = qla2x00_lip_reset(ha);
1692 } 928 }
1693 929
1694 if (status == QLA_SUCCESS && ha->flags.enable_target_reset) { 930 if (status == QLA_SUCCESS && ha->flags.enable_target_reset) {
1695 for (t = 0; t < MAX_FIBRE_DEVICES; t++) { 931 list_for_each_entry(fcport, &ha->fcports, list) {
1696 if ((tq = TGT_Q(ha, t)) == NULL) 932 if (fcport->port_type != FCT_TARGET)
1697 continue;
1698
1699 if (tq->fcport == NULL)
1700 continue; 933 continue;
1701 934
1702 status = qla2x00_target_reset(ha, 0, t); 935 status = qla2x00_target_reset(ha, fcport);
1703 if (status != QLA_SUCCESS) { 936 if (status != QLA_SUCCESS)
1704 break; 937 break;
1705 }
1706 } 938 }
1707 } 939 }
1708 940
@@ -1752,41 +984,53 @@ qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
1752 return qla2x00_abort_target(reset_fcport); 984 return qla2x00_abort_target(reset_fcport);
1753} 985}
1754 986
1755/************************************************************************** 987static int
1756* qla2xxx_slave_configure 988qla2xxx_slave_alloc(struct scsi_device *sdev)
1757*
1758* Description:
1759**************************************************************************/
1760int
1761qla2xxx_slave_configure(struct scsi_device *sdev)
1762{ 989{
1763 scsi_qla_host_t *ha = to_qla_host(sdev->host); 990 scsi_qla_host_t *ha = to_qla_host(sdev->host);
1764 int queue_depth; 991 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
992 fc_port_t *fcport;
993 int found;
1765 994
1766 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 995 if (!rport)
1767 queue_depth = 16; 996 return -ENXIO;
1768 else
1769 queue_depth = 32;
1770 997
1771 if (sdev->tagged_supported) { 998 found = 0;
1772 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 999 list_for_each_entry(fcport, &ha->fcports, list) {
1773 queue_depth = ql2xmaxqdepth; 1000 if (rport->port_name ==
1001 be64_to_cpu(*(uint64_t *)fcport->port_name)) {
1002 found++;
1003 break;
1004 }
1005 }
1006 if (!found)
1007 return -ENXIO;
1774 1008
1775 ql2xmaxqdepth = queue_depth; 1009 sdev->hostdata = fcport;
1776 1010
1777 scsi_activate_tcq(sdev, queue_depth); 1011 return 0;
1012}
1778 1013
1779 qla_printk(KERN_INFO, ha, 1014static int
1780 "scsi(%d:%d:%d:%d): Enabled tagged queuing, queue " 1015qla2xxx_slave_configure(struct scsi_device *sdev)
1781 "depth %d.\n", 1016{
1782 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun, 1017 scsi_qla_host_t *ha = to_qla_host(sdev->host);
1783 sdev->queue_depth); 1018 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1784 } else {
1785 scsi_adjust_queue_depth(sdev, 0 /* TCQ off */,
1786 sdev->host->hostt->cmd_per_lun /* 3 */);
1787 }
1788 1019
1789 return (0); 1020 if (sdev->tagged_supported)
1021 scsi_activate_tcq(sdev, 32);
1022 else
1023 scsi_deactivate_tcq(sdev, 32);
1024
1025 rport->dev_loss_tmo = ha->port_down_retry_count + 5;
1026
1027 return 0;
1028}
1029
1030static void
1031qla2xxx_slave_destroy(struct scsi_device *sdev)
1032{
1033 sdev->hostdata = NULL;
1790} 1034}
1791 1035
1792/** 1036/**
@@ -1912,6 +1156,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1912 unsigned long wait_switch = 0; 1156 unsigned long wait_switch = 0;
1913 char pci_info[20]; 1157 char pci_info[20];
1914 char fw_str[30]; 1158 char fw_str[30];
1159 fc_port_t *fcport;
1915 1160
1916 if (pci_enable_device(pdev)) 1161 if (pci_enable_device(pdev))
1917 return -1; 1162 return -1;
@@ -1937,7 +1182,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1937 /* Configure PCI I/O space */ 1182 /* Configure PCI I/O space */
1938 ret = qla2x00_iospace_config(ha); 1183 ret = qla2x00_iospace_config(ha);
1939 if (ret != 0) { 1184 if (ret != 0) {
1940 goto probe_failed; 1185 goto probe_alloc_failed;
1941 } 1186 }
1942 1187
1943 /* Sanitize the information from PCI BIOS. */ 1188 /* Sanitize the information from PCI BIOS. */
@@ -1993,10 +1238,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1993 INIT_LIST_HEAD(&ha->list); 1238 INIT_LIST_HEAD(&ha->list);
1994 INIT_LIST_HEAD(&ha->fcports); 1239 INIT_LIST_HEAD(&ha->fcports);
1995 INIT_LIST_HEAD(&ha->rscn_fcports); 1240 INIT_LIST_HEAD(&ha->rscn_fcports);
1996 INIT_LIST_HEAD(&ha->done_queue);
1997 INIT_LIST_HEAD(&ha->retry_queue);
1998 INIT_LIST_HEAD(&ha->scsi_retry_queue);
1999 INIT_LIST_HEAD(&ha->pending_queue);
2000 1241
2001 /* 1242 /*
2002 * These locks are used to prevent more than one CPU 1243 * These locks are used to prevent more than one CPU
@@ -2005,7 +1246,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
2005 * contention for these locks. 1246 * contention for these locks.
2006 */ 1247 */
2007 spin_lock_init(&ha->mbx_reg_lock); 1248 spin_lock_init(&ha->mbx_reg_lock);
2008 spin_lock_init(&ha->list_lock);
2009 1249
2010 ha->dpc_pid = -1; 1250 ha->dpc_pid = -1;
2011 init_completion(&ha->dpc_inited); 1251 init_completion(&ha->dpc_inited);
@@ -2016,9 +1256,23 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
2016 qla_printk(KERN_WARNING, ha, 1256 qla_printk(KERN_WARNING, ha,
2017 "[ERROR] Failed to allocate memory for adapter\n"); 1257 "[ERROR] Failed to allocate memory for adapter\n");
2018 1258
2019 goto probe_failed; 1259 goto probe_alloc_failed;
2020 } 1260 }
2021 1261
1262 pci_set_drvdata(pdev, ha);
1263 host->this_id = 255;
1264 host->cmd_per_lun = 3;
1265 host->unique_id = ha->instance;
1266 host->max_cmd_len = MAX_CMDSZ;
1267 host->max_channel = ha->ports - 1;
1268 host->max_id = ha->max_targets;
1269 host->max_lun = ha->max_luns;
1270 host->transportt = qla2xxx_transport_template;
1271 if (scsi_add_host(host, &pdev->dev))
1272 goto probe_alloc_failed;
1273
1274 qla2x00_alloc_sysfs_attr(ha);
1275
2022 if (qla2x00_initialize_adapter(ha) && 1276 if (qla2x00_initialize_adapter(ha) &&
2023 !(ha->device_flags & DFLG_NO_CABLE)) { 1277 !(ha->device_flags & DFLG_NO_CABLE)) {
2024 1278
@@ -2032,6 +1286,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
2032 goto probe_failed; 1286 goto probe_failed;
2033 } 1287 }
2034 1288
1289 qla2x00_init_host_attr(ha);
1290
2035 /* 1291 /*
2036 * Startup the kernel thread for this host adapter 1292 * Startup the kernel thread for this host adapter
2037 */ 1293 */
@@ -2045,16 +1301,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
2045 } 1301 }
2046 wait_for_completion(&ha->dpc_inited); 1302 wait_for_completion(&ha->dpc_inited);
2047 1303
2048 host->this_id = 255;
2049 host->cmd_per_lun = 3;
2050 host->max_cmd_len = MAX_CMDSZ;
2051 host->max_channel = ha->ports - 1;
2052 host->max_lun = ha->max_luns;
2053 BUG_ON(qla2xxx_transport_template == NULL);
2054 host->transportt = qla2xxx_transport_template;
2055 host->unique_id = ha->instance;
2056 host->max_id = ha->max_targets;
2057
2058 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1304 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2059 ret = request_irq(host->irq, qla2100_intr_handler, 1305 ret = request_irq(host->irq, qla2100_intr_handler,
2060 SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha); 1306 SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
@@ -2115,21 +1361,9 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
2115 msleep(10); 1361 msleep(10);
2116 } 1362 }
2117 1363
2118 pci_set_drvdata(pdev, ha);
2119 ha->flags.init_done = 1; 1364 ha->flags.init_done = 1;
2120 num_hosts++; 1365 num_hosts++;
2121 1366
2122 /* List the target we have found */
2123 if (displayConfig) {
2124 qla2x00_display_fc_names(ha);
2125 }
2126
2127 if (scsi_add_host(host, &pdev->dev))
2128 goto probe_failed;
2129
2130 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
2131 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
2132
2133 qla_printk(KERN_INFO, ha, "\n" 1367 qla_printk(KERN_INFO, ha, "\n"
2134 " QLogic Fibre Channel HBA Driver: %s\n" 1368 " QLogic Fibre Channel HBA Driver: %s\n"
2135 " QLogic %s - %s\n" 1369 " QLogic %s - %s\n"
@@ -2139,12 +1373,18 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
2139 pci_name(ha->pdev), ha->flags.enable_64bit_addressing ? '+': '-', 1373 pci_name(ha->pdev), ha->flags.enable_64bit_addressing ? '+': '-',
2140 ha->host_no, qla2x00_get_fw_version_str(ha, fw_str)); 1374 ha->host_no, qla2x00_get_fw_version_str(ha, fw_str));
2141 1375
2142 if (ql2xdoinitscan) 1376 /* Go with fc_rport registration. */
2143 scsi_scan_host(host); 1377 list_for_each_entry(fcport, &ha->fcports, list)
1378 qla2x00_reg_remote_port(ha, fcport);
2144 1379
2145 return 0; 1380 return 0;
2146 1381
2147probe_failed: 1382probe_failed:
1383 fc_remove_host(ha->host);
1384
1385 scsi_remove_host(host);
1386
1387probe_alloc_failed:
2148 qla2x00_free_device(ha); 1388 qla2x00_free_device(ha);
2149 1389
2150 scsi_host_put(host); 1390 scsi_host_put(host);
@@ -2162,9 +1402,9 @@ void qla2x00_remove_one(struct pci_dev *pdev)
2162 1402
2163 ha = pci_get_drvdata(pdev); 1403 ha = pci_get_drvdata(pdev);
2164 1404
2165 sysfs_remove_bin_file(&ha->host->shost_gendev.kobj, 1405 qla2x00_free_sysfs_attr(ha);
2166 &sysfs_fw_dump_attr); 1406
2167 sysfs_remove_bin_file(&ha->host->shost_gendev.kobj, &sysfs_nvram_attr); 1407 fc_remove_host(ha->host);
2168 1408
2169 scsi_remove_host(ha->host); 1409 scsi_remove_host(ha->host);
2170 1410
@@ -2225,500 +1465,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
2225 pci_disable_device(ha->pdev); 1465 pci_disable_device(ha->pdev);
2226} 1466}
2227 1467
2228
2229/*
2230 * The following support functions are adopted to handle
2231 * the re-entrant qla2x00_proc_info correctly.
2232 */
2233static void
2234copy_mem_info(struct info_str *info, char *data, int len)
2235{
2236 if (info->pos + len > info->offset + info->length)
2237 len = info->offset + info->length - info->pos;
2238
2239 if (info->pos + len < info->offset) {
2240 info->pos += len;
2241 return;
2242 }
2243
2244 if (info->pos < info->offset) {
2245 off_t partial;
2246
2247 partial = info->offset - info->pos;
2248 data += partial;
2249 info->pos += partial;
2250 len -= partial;
2251 }
2252
2253 if (len > 0) {
2254 memcpy(info->buffer, data, len);
2255 info->pos += len;
2256 info->buffer += len;
2257 }
2258}
2259
2260static int
2261copy_info(struct info_str *info, char *fmt, ...)
2262{
2263 va_list args;
2264 char buf[256];
2265 int len;
2266
2267 va_start(args, fmt);
2268 len = vsprintf(buf, fmt, args);
2269 va_end(args);
2270
2271 copy_mem_info(info, buf, len);
2272
2273 return (len);
2274}
2275
2276/*************************************************************************
2277* qla2x00_proc_info
2278*
2279* Description:
2280* Return information to handle /proc support for the driver.
2281*
2282* inout : decides the direction of the dataflow and the meaning of the
2283* variables
2284* buffer: If inout==0 data is being written to it else read from it
2285* (ptr to a page buffer)
2286* *start: If inout==0 start of the valid data in the buffer
2287* offset: If inout==0 starting offset from the beginning of all
2288* possible data to return.
2289* length: If inout==0 max number of bytes to be written into the buffer
2290* else number of bytes in "buffer"
2291* Returns:
2292* < 0: error. errno value.
2293* >= 0: sizeof data returned.
2294*************************************************************************/
2295int
2296qla2x00_proc_info(struct Scsi_Host *shost, char *buffer,
2297 char **start, off_t offset, int length, int inout)
2298{
2299 struct info_str info;
2300 int retval = -EINVAL;
2301 os_lun_t *up;
2302 os_tgt_t *tq;
2303 unsigned int t, l;
2304 uint32_t tmp_sn;
2305 uint32_t *flags;
2306 uint8_t *loop_state;
2307 scsi_qla_host_t *ha;
2308 char fw_info[30];
2309
2310 DEBUG3(printk(KERN_INFO
2311 "Entering proc_info buff_in=%p, offset=0x%lx, length=0x%x\n",
2312 buffer, offset, length);)
2313
2314 ha = (scsi_qla_host_t *) shost->hostdata;
2315
2316 if (inout) {
2317 /* Has data been written to the file? */
2318 DEBUG3(printk(
2319 "%s: has data been written to the file. \n",
2320 __func__);)
2321
2322 return -ENOSYS;
2323 }
2324
2325 if (start) {
2326 *start = buffer;
2327 }
2328
2329 info.buffer = buffer;
2330 info.length = length;
2331 info.offset = offset;
2332 info.pos = 0;
2333
2334 /* start building the print buffer */
2335 copy_info(&info,
2336 "QLogic PCI to Fibre Channel Host Adapter for %s:\n"
2337 " Firmware version %s, ",
2338 ha->model_number, qla2x00_get_fw_version_str(ha, fw_info));
2339
2340 copy_info(&info, "Driver version %s\n", qla2x00_version_str);
2341
2342 tmp_sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) |
2343 ha->serial1;
2344 copy_info(&info, "ISP: %s, Serial# %c%05d\n",
2345 ha->brd_info->isp_name, ('A' + tmp_sn/100000), (tmp_sn%100000));
2346
2347 copy_info(&info,
2348 "Request Queue = 0x%llx, Response Queue = 0x%llx\n",
2349 (unsigned long long)ha->request_dma,
2350 (unsigned long long)ha->response_dma);
2351
2352 copy_info(&info,
2353 "Request Queue count = %d, Response Queue count = %d\n",
2354 ha->request_q_length, ha->response_q_length);
2355
2356 copy_info(&info,
2357 "Total number of active commands = %ld\n",
2358 ha->actthreads);
2359
2360 copy_info(&info,
2361 "Total number of interrupts = %ld\n",
2362 (long)ha->total_isr_cnt);
2363
2364 copy_info(&info,
2365 " Device queue depth = 0x%x\n",
2366 (ql2xmaxqdepth == 0) ? 16 : ql2xmaxqdepth);
2367
2368 copy_info(&info,
2369 "Number of free request entries = %d\n", ha->req_q_cnt);
2370
2371 copy_info(&info,
2372 "Number of mailbox timeouts = %ld\n", ha->total_mbx_timeout);
2373
2374 copy_info(&info,
2375 "Number of ISP aborts = %ld\n", ha->total_isp_aborts);
2376
2377 copy_info(&info,
2378 "Number of loop resyncs = %ld\n", ha->total_loop_resync);
2379
2380 copy_info(&info,
2381 "Number of retries for empty slots = %ld\n",
2382 qla2x00_stats.outarray_full);
2383
2384 copy_info(&info,
2385 "Number of reqs in pending_q= %ld, retry_q= %d, "
2386 "done_q= %ld, scsi_retry_q= %d\n",
2387 ha->qthreads, ha->retry_q_cnt,
2388 ha->done_q_cnt, ha->scsi_retry_q_cnt);
2389
2390
2391 flags = (uint32_t *) &ha->flags;
2392
2393 if (atomic_read(&ha->loop_state) == LOOP_DOWN) {
2394 loop_state = "DOWN";
2395 } else if (atomic_read(&ha->loop_state) == LOOP_UP) {
2396 loop_state = "UP";
2397 } else if (atomic_read(&ha->loop_state) == LOOP_READY) {
2398 loop_state = "READY";
2399 } else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT) {
2400 loop_state = "TIMEOUT";
2401 } else if (atomic_read(&ha->loop_state) == LOOP_UPDATE) {
2402 loop_state = "UPDATE";
2403 } else {
2404 loop_state = "UNKNOWN";
2405 }
2406
2407 copy_info(&info,
2408 "Host adapter:loop state = <%s>, flags = 0x%lx\n",
2409 loop_state , *flags);
2410
2411 copy_info(&info, "Dpc flags = 0x%lx\n", ha->dpc_flags);
2412
2413 copy_info(&info, "MBX flags = 0x%x\n", ha->mbx_flags);
2414
2415 copy_info(&info, "Link down Timeout = %3.3d\n",
2416 ha->link_down_timeout);
2417
2418 copy_info(&info, "Port down retry = %3.3d\n",
2419 ha->port_down_retry_count);
2420
2421 copy_info(&info, "Login retry count = %3.3d\n",
2422 ha->login_retry_count);
2423
2424 copy_info(&info,
2425 "Commands retried with dropped frame(s) = %d\n",
2426 ha->dropped_frame_error_cnt);
2427
2428 copy_info(&info,
2429 "Product ID = %04x %04x %04x %04x\n", ha->product_id[0],
2430 ha->product_id[1], ha->product_id[2], ha->product_id[3]);
2431
2432 copy_info(&info, "\n");
2433
2434 /* 2.25 node/port display to proc */
2435 /* Display the node name for adapter */
2436 copy_info(&info, "\nSCSI Device Information:\n");
2437 copy_info(&info,
2438 "scsi-qla%d-adapter-node="
2439 "%02x%02x%02x%02x%02x%02x%02x%02x;\n",
2440 (int)ha->instance,
2441 ha->init_cb->node_name[0],
2442 ha->init_cb->node_name[1],
2443 ha->init_cb->node_name[2],
2444 ha->init_cb->node_name[3],
2445 ha->init_cb->node_name[4],
2446 ha->init_cb->node_name[5],
2447 ha->init_cb->node_name[6],
2448 ha->init_cb->node_name[7]);
2449
2450 /* display the port name for adapter */
2451 copy_info(&info,
2452 "scsi-qla%d-adapter-port="
2453 "%02x%02x%02x%02x%02x%02x%02x%02x;\n",
2454 (int)ha->instance,
2455 ha->init_cb->port_name[0],
2456 ha->init_cb->port_name[1],
2457 ha->init_cb->port_name[2],
2458 ha->init_cb->port_name[3],
2459 ha->init_cb->port_name[4],
2460 ha->init_cb->port_name[5],
2461 ha->init_cb->port_name[6],
2462 ha->init_cb->port_name[7]);
2463
2464 /* Print out device port names */
2465 for (t = 0; t < MAX_FIBRE_DEVICES; t++) {
2466 if ((tq = TGT_Q(ha, t)) == NULL)
2467 continue;
2468
2469 copy_info(&info,
2470 "scsi-qla%d-target-%d="
2471 "%02x%02x%02x%02x%02x%02x%02x%02x;\n",
2472 (int)ha->instance, t,
2473 tq->port_name[0], tq->port_name[1],
2474 tq->port_name[2], tq->port_name[3],
2475 tq->port_name[4], tq->port_name[5],
2476 tq->port_name[6], tq->port_name[7]);
2477 }
2478
2479 copy_info(&info, "\nSCSI LUN Information:\n");
2480 copy_info(&info,
2481 "(Id:Lun) * - indicates lun is not registered with the OS.\n");
2482
2483 /* scan for all equipment stats */
2484 for (t = 0; t < MAX_FIBRE_DEVICES; t++) {
2485 /* scan all luns */
2486 for (l = 0; l < ha->max_luns; l++) {
2487 up = (os_lun_t *) GET_LU_Q(ha, t, l);
2488
2489 if (up == NULL) {
2490 continue;
2491 }
2492 if (up->fclun == NULL) {
2493 continue;
2494 }
2495
2496 copy_info(&info,
2497 "(%2d:%2d): Total reqs %ld,",
2498 t,l,up->io_cnt);
2499
2500 copy_info(&info,
2501 " Pending reqs %ld,",
2502 up->out_cnt);
2503
2504 if (up->io_cnt < 4) {
2505 copy_info(&info,
2506 " flags 0x%x*,",
2507 (int)up->q_flag);
2508 } else {
2509 copy_info(&info,
2510 " flags 0x%x,",
2511 (int)up->q_flag);
2512 }
2513
2514 copy_info(&info,
2515 " %ld:%d:%02x %02x",
2516 up->fclun->fcport->ha->instance,
2517 up->fclun->fcport->cur_path,
2518 up->fclun->fcport->loop_id,
2519 up->fclun->device_type);
2520
2521 copy_info(&info, "\n");
2522
2523 if (info.pos >= info.offset + info.length) {
2524 /* No need to continue */
2525 goto profile_stop;
2526 }
2527 }
2528
2529 if (info.pos >= info.offset + info.length) {
2530 /* No need to continue */
2531 break;
2532 }
2533 }
2534
2535profile_stop:
2536
2537 retval = info.pos > info.offset ? info.pos - info.offset : 0;
2538
2539 DEBUG3(printk(KERN_INFO
2540 "Exiting proc_info: info.pos=%d, offset=0x%lx, "
2541 "length=0x%x\n", info.pos, offset, length);)
2542
2543 return (retval);
2544}
2545
2546/*
2547* qla2x00_display_fc_names
2548* This routine will the node names of the different devices found
2549* after port inquiry.
2550*
2551* Input:
2552* cmd = SCSI command structure
2553*
2554* Returns:
2555* None.
2556*/
2557static void
2558qla2x00_display_fc_names(scsi_qla_host_t *ha)
2559{
2560 uint16_t tgt;
2561 os_tgt_t *tq;
2562
2563 /* Display the node name for adapter */
2564 qla_printk(KERN_INFO, ha,
2565 "scsi-qla%d-adapter-node=%02x%02x%02x%02x%02x%02x%02x%02x\\;\n",
2566 (int)ha->instance,
2567 ha->init_cb->node_name[0],
2568 ha->init_cb->node_name[1],
2569 ha->init_cb->node_name[2],
2570 ha->init_cb->node_name[3],
2571 ha->init_cb->node_name[4],
2572 ha->init_cb->node_name[5],
2573 ha->init_cb->node_name[6],
2574 ha->init_cb->node_name[7]);
2575
2576 /* display the port name for adapter */
2577 qla_printk(KERN_INFO, ha,
2578 "scsi-qla%d-adapter-port=%02x%02x%02x%02x%02x%02x%02x%02x\\;\n",
2579 (int)ha->instance,
2580 ha->init_cb->port_name[0],
2581 ha->init_cb->port_name[1],
2582 ha->init_cb->port_name[2],
2583 ha->init_cb->port_name[3],
2584 ha->init_cb->port_name[4],
2585 ha->init_cb->port_name[5],
2586 ha->init_cb->port_name[6],
2587 ha->init_cb->port_name[7]);
2588
2589 /* Print out device port names */
2590 for (tgt = 0; tgt < MAX_TARGETS; tgt++) {
2591 if ((tq = ha->otgt[tgt]) == NULL)
2592 continue;
2593
2594 if (tq->fcport == NULL)
2595 continue;
2596
2597 switch (ha->binding_type) {
2598 case BIND_BY_PORT_NAME:
2599 qla_printk(KERN_INFO, ha,
2600 "scsi-qla%d-tgt-%d-di-0-port="
2601 "%02x%02x%02x%02x%02x%02x%02x%02x\\;\n",
2602 (int)ha->instance,
2603 tgt,
2604 tq->port_name[0],
2605 tq->port_name[1],
2606 tq->port_name[2],
2607 tq->port_name[3],
2608 tq->port_name[4],
2609 tq->port_name[5],
2610 tq->port_name[6],
2611 tq->port_name[7]);
2612
2613 break;
2614
2615 case BIND_BY_PORT_ID:
2616 qla_printk(KERN_INFO, ha,
2617 "scsi-qla%d-tgt-%d-di-0-pid="
2618 "%02x%02x%02x\\;\n",
2619 (int)ha->instance,
2620 tgt,
2621 tq->d_id.b.domain,
2622 tq->d_id.b.area,
2623 tq->d_id.b.al_pa);
2624 break;
2625 }
2626
2627#if VSA
2628 qla_printk(KERN_INFO, ha,
2629 "scsi-qla%d-target-%d-vsa=01;\n", (int)ha->instance, tgt);
2630#endif
2631 }
2632}
2633
2634/*
2635 * qla2x00_suspend_lun
2636 * Suspend lun and start port down timer
2637 *
2638 * Input:
2639 * ha = visable adapter block pointer.
2640 * lq = lun queue
2641 * cp = Scsi command pointer
2642 * time = time in seconds
2643 * count = number of times to let time expire
2644 * delay_lun = non-zero, if lun should be delayed rather than suspended
2645 *
2646 * Return:
2647 * QLA_SUCCESS -- suspended lun
2648 * QLA_FUNCTION_FAILED -- Didn't suspend lun
2649 *
2650 * Context:
2651 * Interrupt context.
2652 */
2653int
2654__qla2x00_suspend_lun(scsi_qla_host_t *ha,
2655 os_lun_t *lq, int time, int count, int delay_lun)
2656{
2657 int rval;
2658 srb_t *sp;
2659 struct list_head *list, *temp;
2660 unsigned long flags;
2661
2662 rval = QLA_SUCCESS;
2663
2664 /* if the lun_q is already suspended then don't do it again */
2665 if (lq->q_state == LUN_STATE_READY ||lq->q_state == LUN_STATE_RUN) {
2666
2667 spin_lock_irqsave(&lq->q_lock, flags);
2668 if (lq->q_state == LUN_STATE_READY) {
2669 lq->q_max = count;
2670 lq->q_count = 0;
2671 }
2672 /* Set the suspend time usually 6 secs */
2673 atomic_set(&lq->q_timer, time);
2674
2675 /* now suspend the lun */
2676 lq->q_state = LUN_STATE_WAIT;
2677
2678 if (delay_lun) {
2679 set_bit(LUN_EXEC_DELAYED, &lq->q_flag);
2680 DEBUG(printk(KERN_INFO
2681 "scsi(%ld): Delay lun execution for %d secs, "
2682 "count=%d, max count=%d, state=%d\n",
2683 ha->host_no,
2684 time,
2685 lq->q_count, lq->q_max, lq->q_state));
2686 } else {
2687 DEBUG(printk(KERN_INFO
2688 "scsi(%ld): Suspend lun for %d secs, count=%d, "
2689 "max count=%d, state=%d\n",
2690 ha->host_no,
2691 time,
2692 lq->q_count, lq->q_max, lq->q_state));
2693 }
2694 spin_unlock_irqrestore(&lq->q_lock, flags);
2695
2696 /*
2697 * Remove all pending commands from request queue and put them
2698 * in the scsi_retry queue.
2699 */
2700 spin_lock_irqsave(&ha->list_lock, flags);
2701 list_for_each_safe(list, temp, &ha->pending_queue) {
2702 sp = list_entry(list, srb_t, list);
2703 if (sp->lun_queue != lq)
2704 continue;
2705
2706 __del_from_pending_queue(ha, sp);
2707
2708 if (sp->cmd->allowed < count)
2709 sp->cmd->allowed = count;
2710 __add_to_scsi_retry_queue(ha, sp);
2711
2712 } /* list_for_each_safe */
2713 spin_unlock_irqrestore(&ha->list_lock, flags);
2714 rval = QLA_SUCCESS;
2715 } else {
2716 rval = QLA_FUNCTION_FAILED;
2717 }
2718
2719 return (rval);
2720}
2721
2722/* 1468/*
2723 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 1469 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
2724 * 1470 *
@@ -2731,6 +1477,8 @@ __qla2x00_suspend_lun(scsi_qla_host_t *ha,
2731void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 1477void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
2732 int do_login) 1478 int do_login)
2733{ 1479{
1480 if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport)
1481 fc_remote_port_block(fcport->rport);
2734 /* 1482 /*
2735 * We may need to retry the login, so don't change the state of the 1483 * We may need to retry the login, so don't change the state of the
2736 * port but do the retries. 1484 * port but do the retries.
@@ -2790,7 +1538,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha)
2790 */ 1538 */
2791 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1539 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2792 continue; 1540 continue;
2793 1541 if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport)
1542 fc_remote_port_block(fcport->rport);
2794 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1543 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2795 } 1544 }
2796} 1545}
@@ -3007,11 +1756,8 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
3007static void 1756static void
3008qla2x00_mem_free(scsi_qla_host_t *ha) 1757qla2x00_mem_free(scsi_qla_host_t *ha)
3009{ 1758{
3010 uint32_t t;
3011 struct list_head *fcpl, *fcptemp; 1759 struct list_head *fcpl, *fcptemp;
3012 fc_port_t *fcport; 1760 fc_port_t *fcport;
3013 struct list_head *fcll, *fcltemp;
3014 fc_lun_t *fclun;
3015 unsigned long wtime;/* max wait time if mbx cmd is busy. */ 1761 unsigned long wtime;/* max wait time if mbx cmd is busy. */
3016 1762
3017 if (ha == NULL) { 1763 if (ha == NULL) {
@@ -3020,11 +1766,6 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
3020 return; 1766 return;
3021 } 1767 }
3022 1768
3023 /* Free the target queues */
3024 for (t = 0; t < MAX_TARGETS; t++) {
3025 qla2x00_tgt_free(ha, t);
3026 }
3027
3028 /* Make sure all other threads are stopped. */ 1769 /* Make sure all other threads are stopped. */
3029 wtime = 60 * HZ; 1770 wtime = 60 * HZ;
3030 while (ha->dpc_wait && wtime) { 1771 while (ha->dpc_wait && wtime) {
@@ -3103,14 +1844,6 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
3103 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 1844 list_for_each_safe(fcpl, fcptemp, &ha->fcports) {
3104 fcport = list_entry(fcpl, fc_port_t, list); 1845 fcport = list_entry(fcpl, fc_port_t, list);
3105 1846
3106 /* fc luns */
3107 list_for_each_safe(fcll, fcltemp, &fcport->fcluns) {
3108 fclun = list_entry(fcll, fc_lun_t, list);
3109
3110 list_del_init(&fclun->list);
3111 kfree(fclun);
3112 }
3113
3114 /* fc ports */ 1847 /* fc ports */
3115 list_del_init(&fcport->list); 1848 list_del_init(&fcport->list);
3116 kfree(fcport); 1849 kfree(fcport);
@@ -3188,16 +1921,8 @@ qla2x00_do_dpc(void *data)
3188 DECLARE_MUTEX_LOCKED(sem); 1921 DECLARE_MUTEX_LOCKED(sem);
3189 scsi_qla_host_t *ha; 1922 scsi_qla_host_t *ha;
3190 fc_port_t *fcport; 1923 fc_port_t *fcport;
3191 os_lun_t *q;
3192 srb_t *sp;
3193 uint8_t status; 1924 uint8_t status;
3194 unsigned long flags = 0;
3195 struct list_head *list, *templist;
3196 int dead_cnt, online_cnt;
3197 int retry_cmds = 0;
3198 uint16_t next_loopid; 1925 uint16_t next_loopid;
3199 int t;
3200 os_tgt_t *tq;
3201 1926
3202 ha = (scsi_qla_host_t *)data; 1927 ha = (scsi_qla_host_t *)data;
3203 1928
@@ -3233,139 +1958,7 @@ qla2x00_do_dpc(void *data)
3233 1958
3234 ha->dpc_active = 1; 1959 ha->dpc_active = 1;
3235 1960
3236 if (!list_empty(&ha->done_queue))
3237 qla2x00_done(ha);
3238
3239 /* Process commands in retry queue */
3240 if (test_and_clear_bit(PORT_RESTART_NEEDED, &ha->dpc_flags)) {
3241 DEBUG(printk("scsi(%ld): DPC checking retry_q. "
3242 "total=%d\n",
3243 ha->host_no, ha->retry_q_cnt));
3244
3245 spin_lock_irqsave(&ha->list_lock, flags);
3246 dead_cnt = online_cnt = 0;
3247 list_for_each_safe(list, templist, &ha->retry_queue) {
3248 sp = list_entry(list, srb_t, list);
3249 q = sp->lun_queue;
3250 DEBUG3(printk("scsi(%ld): pid=%ld sp=%p, "
3251 "spflags=0x%x, q_flag= 0x%lx\n",
3252 ha->host_no, sp->cmd->serial_number, sp,
3253 sp->flags, q->q_flag));
3254
3255 if (q == NULL)
3256 continue;
3257 fcport = q->fclun->fcport;
3258
3259 if (atomic_read(&fcport->state) ==
3260 FCS_DEVICE_DEAD ||
3261 atomic_read(&fcport->ha->loop_state) == LOOP_DEAD) {
3262
3263 __del_from_retry_queue(ha, sp);
3264 sp->cmd->result = DID_NO_CONNECT << 16;
3265 if (atomic_read(&fcport->ha->loop_state) ==
3266 LOOP_DOWN)
3267 sp->err_id = SRB_ERR_LOOP;
3268 else
3269 sp->err_id = SRB_ERR_PORT;
3270 sp->cmd->host_scribble =
3271 (unsigned char *) NULL;
3272 __add_to_done_queue(ha, sp);
3273 dead_cnt++;
3274 } else if (atomic_read(&fcport->state) !=
3275 FCS_DEVICE_LOST) {
3276
3277 __del_from_retry_queue(ha, sp);
3278 sp->cmd->result = DID_BUS_BUSY << 16;
3279 sp->cmd->host_scribble =
3280 (unsigned char *) NULL;
3281 __add_to_done_queue(ha, sp);
3282 online_cnt++;
3283 }
3284 } /* list_for_each_safe() */
3285 spin_unlock_irqrestore(&ha->list_lock, flags);
3286
3287 DEBUG(printk("scsi(%ld): done processing retry queue "
3288 "- dead=%d, online=%d\n ",
3289 ha->host_no, dead_cnt, online_cnt));
3290 }
3291
3292 /* Process commands in scsi retry queue */
3293 if (test_and_clear_bit(SCSI_RESTART_NEEDED, &ha->dpc_flags)) {
3294 /*
3295 * Any requests we want to delay for some period is put
3296 * in the scsi retry queue with a delay added. The
3297 * timer will schedule a "scsi_restart_needed" every
3298 * second as long as there are requests in the scsi
3299 * queue.
3300 */
3301 DEBUG(printk("scsi(%ld): DPC checking scsi "
3302 "retry_q.total=%d\n",
3303 ha->host_no, ha->scsi_retry_q_cnt));
3304
3305 online_cnt = 0;
3306 spin_lock_irqsave(&ha->list_lock, flags);
3307 list_for_each_safe(list, templist,
3308 &ha->scsi_retry_queue) {
3309
3310 sp = list_entry(list, srb_t, list);
3311 q = sp->lun_queue;
3312 tq = sp->tgt_queue;
3313
3314 DEBUG3(printk("scsi(%ld): scsi_retry_q: "
3315 "pid=%ld sp=%p, spflags=0x%x, "
3316 "q_flag= 0x%lx,q_state=%d\n",
3317 ha->host_no, sp->cmd->serial_number,
3318 sp, sp->flags, q->q_flag, q->q_state));
3319
3320 /* Was this lun suspended */
3321 if (q->q_state != LUN_STATE_WAIT) {
3322 online_cnt++;
3323 __del_from_scsi_retry_queue(ha, sp);
3324
3325 if (test_bit(TQF_RETRY_CMDS,
3326 &tq->flags)) {
3327 qla2x00_extend_timeout(sp->cmd,
3328 (sp->cmd->timeout_per_command / HZ) - QLA_CMD_TIMER_DELTA);
3329 __add_to_pending_queue(ha, sp);
3330 retry_cmds++;
3331 } else
3332 __add_to_retry_queue(ha, sp);
3333 }
3334
3335 /* Was this command suspended for N secs */
3336 if (sp->delay != 0) {
3337 sp->delay--;
3338 if (sp->delay == 0) {
3339 online_cnt++;
3340 __del_from_scsi_retry_queue(
3341 ha, sp);
3342 __add_to_retry_queue(ha,sp);
3343 }
3344 }
3345 }
3346 spin_unlock_irqrestore(&ha->list_lock, flags);
3347
3348 /* Clear all Target Unsuspended bits */
3349 for (t = 0; t < ha->max_targets; t++) {
3350 if ((tq = ha->otgt[t]) == NULL)
3351 continue;
3352
3353 if (test_bit(TQF_RETRY_CMDS, &tq->flags))
3354 clear_bit(TQF_RETRY_CMDS, &tq->flags);
3355 }
3356 if (retry_cmds)
3357 qla2x00_next(ha);
3358
3359 DEBUG(if (online_cnt > 0))
3360 DEBUG(printk("scsi(%ld): dpc() found scsi reqs to "
3361 "restart= %d\n",
3362 ha->host_no, online_cnt));
3363 }
3364
3365 if (ha->flags.mbox_busy) { 1961 if (ha->flags.mbox_busy) {
3366 if (!list_empty(&ha->done_queue))
3367 qla2x00_done(ha);
3368
3369 ha->dpc_active = 0; 1962 ha->dpc_active = 0;
3370 continue; 1963 continue;
3371 } 1964 }
@@ -3493,28 +2086,6 @@ qla2x00_do_dpc(void *data)
3493 ha->host_no)); 2086 ha->host_no));
3494 } 2087 }
3495 2088
3496
3497 if (test_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags)) {
3498 DEBUG(printk("scsi(%ld): qla2x00_restart_queues()\n",
3499 ha->host_no));
3500
3501 qla2x00_restart_queues(ha, 0);
3502
3503 DEBUG(printk("scsi(%ld): qla2x00_restart_queues - end\n",
3504 ha->host_no));
3505 }
3506
3507 if (test_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags)) {
3508
3509 DEBUG(printk("scsi(%ld): qla2x00_abort_queues()\n",
3510 ha->host_no));
3511
3512 qla2x00_abort_queues(ha, 0);
3513
3514 DEBUG(printk("scsi(%ld): qla2x00_abort_queues - end\n",
3515 ha->host_no));
3516 }
3517
3518 if (test_and_clear_bit(FCPORT_RESCAN_NEEDED, &ha->dpc_flags)) { 2089 if (test_and_clear_bit(FCPORT_RESCAN_NEEDED, &ha->dpc_flags)) {
3519 2090
3520 DEBUG(printk("scsi(%ld): Rescan flagged fcports...\n", 2091 DEBUG(printk("scsi(%ld): Rescan flagged fcports...\n",
@@ -3527,13 +2098,9 @@ qla2x00_do_dpc(void *data)
3527 ha->host_no)); 2098 ha->host_no));
3528 } 2099 }
3529 2100
3530
3531 if (!ha->interrupts_on) 2101 if (!ha->interrupts_on)
3532 qla2x00_enable_intrs(ha); 2102 qla2x00_enable_intrs(ha);
3533 2103
3534 if (!list_empty(&ha->done_queue))
3535 qla2x00_done(ha);
3536
3537 ha->dpc_active = 0; 2104 ha->dpc_active = 0;
3538 } /* End of while(1) */ 2105 } /* End of while(1) */
3539 2106
@@ -3549,45 +2116,6 @@ qla2x00_do_dpc(void *data)
3549} 2116}
3550 2117
3551/* 2118/*
3552 * qla2x00_abort_queues
3553 * Abort all commands on queues on device
3554 *
3555 * Input:
3556 * ha = adapter block pointer.
3557 *
3558 * Context:
3559 * Interrupt context.
3560 */
3561void
3562qla2x00_abort_queues(scsi_qla_host_t *ha, uint8_t doneqflg)
3563{
3564
3565 srb_t *sp;
3566 struct list_head *list, *temp;
3567 unsigned long flags;
3568
3569 clear_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags);
3570
3571 /* Return all commands device queues. */
3572 spin_lock_irqsave(&ha->list_lock,flags);
3573 list_for_each_safe(list, temp, &ha->pending_queue) {
3574 sp = list_entry(list, srb_t, list);
3575
3576 if (sp->flags & SRB_ABORTED)
3577 continue;
3578
3579 /* Remove srb from LUN queue. */
3580 __del_from_pending_queue(ha, sp);
3581
3582 /* Set ending status. */
3583 sp->cmd->result = DID_BUS_BUSY << 16;
3584
3585 __add_to_done_queue(ha, sp);
3586 }
3587 spin_unlock_irqrestore(&ha->list_lock, flags);
3588}
3589
3590/*
3591* qla2x00_rst_aen 2119* qla2x00_rst_aen
3592* Processes asynchronous reset. 2120* Processes asynchronous reset.
3593* 2121*
@@ -3632,6 +2160,36 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha)
3632 return (sp); 2160 return (sp);
3633} 2161}
3634 2162
2163static void
2164qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
2165{
2166 struct scsi_cmnd *cmd = sp->cmd;
2167
2168 if (sp->flags & SRB_DMA_VALID) {
2169 if (cmd->use_sg) {
2170 dma_unmap_sg(&ha->pdev->dev, cmd->request_buffer,
2171 cmd->use_sg, cmd->sc_data_direction);
2172 } else if (cmd->request_bufflen) {
2173 dma_unmap_single(&ha->pdev->dev, sp->dma_handle,
2174 cmd->request_bufflen, cmd->sc_data_direction);
2175 }
2176 sp->flags &= ~SRB_DMA_VALID;
2177 }
2178}
2179
2180void
2181qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
2182{
2183 struct scsi_cmnd *cmd = sp->cmd;
2184
2185 qla2x00_sp_free_dma(ha, sp);
2186
2187 CMD_SP(cmd) = NULL;
2188 mempool_free(sp, ha->srb_mempool);
2189
2190 cmd->scsi_done(cmd);
2191}
2192
3635/************************************************************************** 2193/**************************************************************************
3636* qla2x00_timer 2194* qla2x00_timer
3637* 2195*
@@ -3643,30 +2201,12 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha)
3643static void 2201static void
3644qla2x00_timer(scsi_qla_host_t *ha) 2202qla2x00_timer(scsi_qla_host_t *ha)
3645{ 2203{
3646 int t,l;
3647 unsigned long cpu_flags = 0; 2204 unsigned long cpu_flags = 0;
3648 fc_port_t *fcport; 2205 fc_port_t *fcport;
3649 os_lun_t *lq;
3650 os_tgt_t *tq;
3651 int start_dpc = 0; 2206 int start_dpc = 0;
3652 int index; 2207 int index;
3653 srb_t *sp; 2208 srb_t *sp;
3654 2209 int t;
3655 /*
3656 * We try and restart any request in the retry queue every second.
3657 */
3658 if (!list_empty(&ha->retry_queue)) {
3659 set_bit(PORT_RESTART_NEEDED, &ha->dpc_flags);
3660 start_dpc++;
3661 }
3662
3663 /*
3664 * We try and restart any request in the scsi_retry queue every second.
3665 */
3666 if (!list_empty(&ha->scsi_retry_queue)) {
3667 set_bit(SCSI_RESTART_NEEDED, &ha->dpc_flags);
3668 start_dpc++;
3669 }
3670 2210
3671 /* 2211 /*
3672 * Ports - Port down timer. 2212 * Ports - Port down timer.
@@ -3696,59 +2236,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
3696 t++; 2236 t++;
3697 } /* End of for fcport */ 2237 } /* End of for fcport */
3698 2238
3699 /*
3700 * LUNS - lun suspend timer.
3701 *
3702 * Whenever, a lun is suspended the timer starts decrementing its
3703 * suspend timer every second until it reaches zero. Once it reaches
3704 * zero the lun retry count is decremented.
3705 */
3706
3707 /*
3708 * FIXME(dg) - Need to convert this linear search of luns into a search
3709 * of a list of suspended luns.
3710 */
3711 for (t = 0; t < ha->max_targets; t++) {
3712 if ((tq = ha->otgt[t]) == NULL)
3713 continue;
3714
3715 for (l = 0; l < ha->max_luns; l++) {
3716 if ((lq = (os_lun_t *) tq->olun[l]) == NULL)
3717 continue;
3718
3719 spin_lock_irqsave(&lq->q_lock, cpu_flags);
3720 if (lq->q_state == LUN_STATE_WAIT &&
3721 atomic_read(&lq->q_timer) != 0) {
3722
3723 if (atomic_dec_and_test(&lq->q_timer) != 0) {
3724 /*
3725 * A delay should immediately
3726 * transition to a READY state
3727 */
3728 if (test_and_clear_bit(LUN_EXEC_DELAYED,
3729 &lq->q_flag)) {
3730 lq->q_state = LUN_STATE_READY;
3731 }
3732 else {
3733 lq->q_count++;
3734 if (lq->q_count == lq->q_max)
3735 lq->q_state =
3736 LUN_STATE_TIMEOUT;
3737 else
3738 lq->q_state =
3739 LUN_STATE_RUN;
3740 }
3741 }
3742 DEBUG3(printk("scsi(%ld): lun%d - timer %d, "
3743 "count=%d, max=%d, state=%d\n",
3744 ha->host_no,
3745 l,
3746 atomic_read(&lq->q_timer),
3747 lq->q_count, lq->q_max, lq->q_state));
3748 }
3749 spin_unlock_irqrestore(&lq->q_lock, cpu_flags);
3750 } /* End of for luns */
3751 } /* End of for targets */
3752 2239
3753 /* Loop down handler. */ 2240 /* Loop down handler. */
3754 if (atomic_read(&ha->loop_down_timer) > 0 && 2241 if (atomic_read(&ha->loop_down_timer) > 0 &&
@@ -3768,11 +2255,13 @@ qla2x00_timer(scsi_qla_host_t *ha)
3768 spin_lock_irqsave(&ha->hardware_lock, cpu_flags); 2255 spin_lock_irqsave(&ha->hardware_lock, cpu_flags);
3769 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; 2256 for (index = 1; index < MAX_OUTSTANDING_COMMANDS;
3770 index++) { 2257 index++) {
2258 fc_port_t *sfcp;
2259
3771 sp = ha->outstanding_cmds[index]; 2260 sp = ha->outstanding_cmds[index];
3772 if (!sp) 2261 if (!sp)
3773 continue; 2262 continue;
3774 if (!(sp->fclun->fcport->flags & 2263 sfcp = sp->fcport;
3775 FCF_TAPE_PRESENT)) 2264 if (!(sfcp->flags & FCF_TAPE_PRESENT))
3776 continue; 2265 continue;
3777 2266
3778 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2267 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
@@ -3808,19 +2297,12 @@ qla2x00_timer(scsi_qla_host_t *ha)
3808 atomic_read(&ha->loop_down_timer))); 2297 atomic_read(&ha->loop_down_timer)));
3809 } 2298 }
3810 2299
3811 /*
3812 * Done Q Handler -- dgFIXME This handler will kick off doneq if we
3813 * haven't process it in 2 seconds.
3814 */
3815 if (!list_empty(&ha->done_queue))
3816 qla2x00_done(ha);
3817
3818
3819 /* Schedule the DPC routine if needed */ 2300 /* Schedule the DPC routine if needed */
3820 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2301 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
3821 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2302 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
3822 start_dpc || 2303 start_dpc ||
3823 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2304 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
2305 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
3824 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2306 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
3825 ha->dpc_wait && !ha->dpc_active) { 2307 ha->dpc_wait && !ha->dpc_active) {
3826 2308
@@ -3830,496 +2312,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
3830 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2312 qla2x00_restart_timer(ha, WATCH_INTERVAL);
3831} 2313}
3832 2314
3833/*
3834 * qla2x00_extend_timeout
3835 * This routine will extend the timeout to the specified value.
3836 *
3837 * Input:
3838 * cmd = SCSI command structure
3839 *
3840 * Returns:
3841 * None.
3842 */
3843void
3844qla2x00_extend_timeout(struct scsi_cmnd *cmd, int timeout)
3845{
3846 srb_t *sp = (srb_t *) CMD_SP(cmd);
3847 u_long our_jiffies = (timeout * HZ) + jiffies;
3848
3849 sp->ext_history= 0;
3850 sp->e_start = jiffies;
3851 if (cmd->eh_timeout.function) {
3852 mod_timer(&cmd->eh_timeout,our_jiffies);
3853 sp->ext_history |= 1;
3854 }
3855 if (sp->timer.function != NULL) {
3856 /*
3857 * Our internal timer should timeout before the midlayer has a
3858 * chance begin the abort process
3859 */
3860 mod_timer(&sp->timer,our_jiffies - (QLA_CMD_TIMER_DELTA * HZ));
3861
3862 sp->ext_history |= 2;
3863 }
3864}
3865
3866/**************************************************************************
3867* qla2x00_cmd_timeout
3868*
3869* Description:
3870* Handles the command if it times out in any state.
3871*
3872* Input:
3873* sp - pointer to validate
3874*
3875* Returns:
3876* None.
3877* Note:Need to add the support for if( sp->state == SRB_FAILOVER_STATE).
3878**************************************************************************/
3879void
3880qla2x00_cmd_timeout(srb_t *sp)
3881{
3882 int t, l;
3883 int processed;
3884 scsi_qla_host_t *vis_ha, *dest_ha;
3885 struct scsi_cmnd *cmd;
3886 unsigned long flags, cpu_flags;
3887 fc_port_t *fcport;
3888
3889 cmd = sp->cmd;
3890 vis_ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
3891
3892 DEBUG3(printk("cmd_timeout: Entering sp->state = %x\n", sp->state));
3893
3894 t = cmd->device->id;
3895 l = cmd->device->lun;
3896 fcport = sp->fclun->fcport;
3897 dest_ha = sp->ha;
3898
3899 /*
3900 * If IO is found either in retry Queue
3901 * OR in Lun Queue
3902 * Return this IO back to host
3903 */
3904 spin_lock_irqsave(&vis_ha->list_lock, flags);
3905 processed = 0;
3906 if (sp->state == SRB_PENDING_STATE) {
3907 __del_from_pending_queue(vis_ha, sp);
3908 DEBUG2(printk("scsi(%ld): Found in Pending queue pid %ld, "
3909 "State = %x., fcport state=%d sjiffs=%lx njiffs=%lx\n",
3910 vis_ha->host_no, cmd->serial_number, sp->state,
3911 atomic_read(&fcport->state), sp->r_start, jiffies));
3912
3913 /*
3914 * If FC_DEVICE is marked as dead return the cmd with
3915 * DID_NO_CONNECT status. Otherwise set the host_byte to
3916 * DID_BUS_BUSY to let the OS retry this cmd.
3917 */
3918 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
3919 atomic_read(&fcport->ha->loop_state) == LOOP_DEAD) {
3920 cmd->result = DID_NO_CONNECT << 16;
3921 if (atomic_read(&fcport->ha->loop_state) == LOOP_DOWN)
3922 sp->err_id = SRB_ERR_LOOP;
3923 else
3924 sp->err_id = SRB_ERR_PORT;
3925 } else {
3926 cmd->result = DID_BUS_BUSY << 16;
3927 }
3928 __add_to_done_queue(vis_ha, sp);
3929 processed++;
3930 }
3931 spin_unlock_irqrestore(&vis_ha->list_lock, flags);
3932
3933 if (processed) {
3934 qla2x00_done(vis_ha);
3935 return;
3936 }
3937
3938 spin_lock_irqsave(&dest_ha->list_lock, flags);
3939 if ((sp->state == SRB_RETRY_STATE) ||
3940 (sp->state == SRB_SCSI_RETRY_STATE)) {
3941
3942 DEBUG2(printk("scsi(%ld): Found in (Scsi) Retry queue or "
3943 "failover Q pid %ld, State = %x., fcport state=%d "
3944 "jiffies=%lx retried=%d\n",
3945 dest_ha->host_no, cmd->serial_number, sp->state,
3946 atomic_read(&fcport->state), jiffies, cmd->retries));
3947
3948 if ((sp->state == SRB_RETRY_STATE)) {
3949 __del_from_retry_queue(dest_ha, sp);
3950 } else if ((sp->state == SRB_SCSI_RETRY_STATE)) {
3951 __del_from_scsi_retry_queue(dest_ha, sp);
3952 }
3953
3954 /*
3955 * If FC_DEVICE is marked as dead return the cmd with
3956 * DID_NO_CONNECT status. Otherwise set the host_byte to
3957 * DID_BUS_BUSY to let the OS retry this cmd.
3958 */
3959 if ((atomic_read(&fcport->state) == FCS_DEVICE_DEAD) ||
3960 atomic_read(&dest_ha->loop_state) == LOOP_DEAD) {
3961 qla2x00_extend_timeout(cmd, EXTEND_CMD_TIMEOUT);
3962 cmd->result = DID_NO_CONNECT << 16;
3963 if (atomic_read(&dest_ha->loop_state) == LOOP_DOWN)
3964 sp->err_id = SRB_ERR_LOOP;
3965 else
3966 sp->err_id = SRB_ERR_PORT;
3967 } else {
3968 cmd->result = DID_BUS_BUSY << 16;
3969 }
3970
3971 __add_to_done_queue(dest_ha, sp);
3972 processed++;
3973 }
3974 spin_unlock_irqrestore(&dest_ha->list_lock, flags);
3975
3976 if (processed) {
3977 qla2x00_done(dest_ha);
3978 return;
3979 }
3980
3981 spin_lock_irqsave(&dest_ha->list_lock, cpu_flags);
3982 if (sp->state == SRB_DONE_STATE) {
3983 /* IO in done_q -- leave it */
3984 DEBUG(printk("scsi(%ld): Found in Done queue pid %ld sp=%p.\n",
3985 dest_ha->host_no, cmd->serial_number, sp));
3986 } else if (sp->state == SRB_SUSPENDED_STATE) {
3987 DEBUG(printk("scsi(%ld): Found SP %p in suspended state "
3988 "- pid %ld:\n",
3989 dest_ha->host_no, sp, cmd->serial_number));
3990 DEBUG(qla2x00_dump_buffer((uint8_t *)sp, sizeof(srb_t));)
3991 } else if (sp->state == SRB_ACTIVE_STATE) {
3992 /*
3993 * IO is with ISP find the command in our active list.
3994 */
3995 spin_unlock_irqrestore(&dest_ha->list_lock, cpu_flags);
3996 spin_lock_irqsave(&dest_ha->hardware_lock, flags);
3997 if (sp == dest_ha->outstanding_cmds[
3998 (unsigned long)sp->cmd->host_scribble]) {
3999
4000 DEBUG(printk("cmd_timeout: Found in ISP \n"));
4001
4002 if (sp->flags & SRB_TAPE) {
4003 /*
4004 * We cannot allow the midlayer error handler
4005 * to wakeup and begin the abort process.
4006 * Extend the timer so that the firmware can
4007 * properly return the IOCB.
4008 */
4009 DEBUG(printk("cmd_timeout: Extending timeout "
4010 "of FCP2 tape command!\n"));
4011 qla2x00_extend_timeout(sp->cmd,
4012 EXTEND_CMD_TIMEOUT);
4013 }
4014 sp->state = SRB_ACTIVE_TIMEOUT_STATE;
4015 spin_unlock_irqrestore(&dest_ha->hardware_lock, flags);
4016 } else {
4017 spin_unlock_irqrestore(&dest_ha->hardware_lock, flags);
4018 printk(KERN_INFO
4019 "qla_cmd_timeout: State indicates it is with "
4020 "ISP, But not in active array\n");
4021 }
4022 spin_lock_irqsave(&dest_ha->list_lock, cpu_flags);
4023 } else if (sp->state == SRB_ACTIVE_TIMEOUT_STATE) {
4024 DEBUG(printk("qla2100%ld: Found in Active timeout state"
4025 "pid %ld, State = %x., \n",
4026 dest_ha->host_no,
4027 sp->cmd->serial_number, sp->state);)
4028 } else {
4029 /* EMPTY */
4030 DEBUG2(printk("cmd_timeout%ld: LOST command state = "
4031 "0x%x, sp=%p\n",
4032 vis_ha->host_no, sp->state,sp);)
4033
4034 qla_printk(KERN_INFO, vis_ha,
4035 "cmd_timeout: LOST command state = 0x%x\n", sp->state);
4036 }
4037 spin_unlock_irqrestore(&dest_ha->list_lock, cpu_flags);
4038
4039 DEBUG3(printk("cmd_timeout: Leaving\n");)
4040}
4041
4042/**************************************************************************
4043* qla2x00_done
4044* Process completed commands.
4045*
4046* Input:
4047* old_ha = adapter block pointer.
4048*
4049**************************************************************************/
4050void
4051qla2x00_done(scsi_qla_host_t *old_ha)
4052{
4053 os_lun_t *lq;
4054 struct scsi_cmnd *cmd;
4055 unsigned long flags = 0;
4056 scsi_qla_host_t *ha;
4057 scsi_qla_host_t *vis_ha;
4058 int send_marker_once = 0;
4059 srb_t *sp, *sptemp;
4060 LIST_HEAD(local_sp_list);
4061
4062 /*
4063 * Get into local queue such that we do not wind up calling done queue
4064 * tasklet for the same IOs from DPC or any other place.
4065 */
4066 spin_lock_irqsave(&old_ha->list_lock, flags);
4067 list_splice_init(&old_ha->done_queue, &local_sp_list);
4068 spin_unlock_irqrestore(&old_ha->list_lock, flags);
4069
4070 /*
4071 * All done commands are in the local queue, now do the call back.
4072 */
4073 list_for_each_entry_safe(sp, sptemp, &local_sp_list, list) {
4074 old_ha->done_q_cnt--;
4075 sp->state = SRB_NO_QUEUE_STATE;
4076
4077 /* remove command from local list */
4078 list_del_init(&sp->list);
4079
4080 cmd = sp->cmd;
4081 if (cmd == NULL)
4082 continue;
4083
4084 vis_ha = (scsi_qla_host_t *)cmd->device->host->hostdata;
4085 lq = sp->lun_queue;
4086 ha = sp->ha;
4087
4088 if (sp->flags & SRB_DMA_VALID) {
4089 sp->flags &= ~SRB_DMA_VALID;
4090
4091 /* Release memory used for this I/O */
4092 if (cmd->use_sg) {
4093 pci_unmap_sg(ha->pdev, cmd->request_buffer,
4094 cmd->use_sg, cmd->sc_data_direction);
4095 } else if (cmd->request_bufflen) {
4096 pci_unmap_page(ha->pdev, sp->dma_handle,
4097 cmd->request_bufflen,
4098 cmd->sc_data_direction);
4099 }
4100 }
4101
4102
4103 switch (host_byte(cmd->result)) {
4104 case DID_OK:
4105 case DID_ERROR:
4106 break;
4107
4108 case DID_RESET:
4109 /*
4110 * Set marker needed, so we don't have to
4111 * send multiple markers
4112 */
4113 if (!send_marker_once) {
4114 ha->marker_needed = 1;
4115 send_marker_once++;
4116 }
4117
4118 /*
4119 * WORKAROUND
4120 *
4121 * A backdoor device-reset requires different
4122 * error handling. This code differentiates
4123 * between normal error handling and the
4124 * backdoor method.
4125 *
4126 */
4127 if (ha->host->eh_active != EH_ACTIVE)
4128 cmd->result = DID_BUS_BUSY << 16;
4129 break;
4130
4131
4132 case DID_ABORT:
4133 sp->flags &= ~SRB_ABORT_PENDING;
4134 sp->flags |= SRB_ABORTED;
4135
4136 if (sp->flags & SRB_TIMEOUT)
4137 cmd->result = DID_TIME_OUT << 16;
4138
4139 break;
4140
4141 default:
4142 DEBUG2(printk("scsi(%ld:%d:%d) %s: did_error "
4143 "= %d, comp-scsi= 0x%x-0x%x pid=%ld.\n",
4144 vis_ha->host_no,
4145 cmd->device->id, cmd->device->lun,
4146 __func__,
4147 host_byte(cmd->result),
4148 CMD_COMPL_STATUS(cmd),
4149 CMD_SCSI_STATUS(cmd), cmd->serial_number));
4150 break;
4151 }
4152
4153 /*
4154 * Call the mid-level driver interrupt handler -- via sp_put()
4155 */
4156 sp_put(ha, sp);
4157 } /* end of while */
4158}
4159
4160/*
4161 * qla2x00_process_response_queue_in_zio_mode
4162 * Process response queue completion as fast as possible
4163 * to achieve Zero Interrupt Opertions-ZIO
4164 *
4165 * Input:
4166 * ha = adapter block pointer.
4167 *
4168 * Context:
4169 * Kernel context.
4170 */
4171static inline void
4172qla2x00_process_response_queue_in_zio_mode(scsi_qla_host_t *ha)
4173{
4174 unsigned long flags;
4175
4176 /* Check for unprocessed commands in response queue. */
4177 if (!ha->flags.process_response_queue)
4178 return;
4179 if (!ha->flags.online)
4180 return;
4181 if (ha->response_ring_ptr->signature == RESPONSE_PROCESSED)
4182 return;
4183
4184 spin_lock_irqsave(&ha->hardware_lock,flags);
4185 qla2x00_process_response_queue(ha);
4186 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4187}
4188
4189/*
4190 * qla2x00_next
4191 * Retrieve and process next job in the LUN queue.
4192 *
4193 * Input:
4194 * tq = SCSI target queue pointer.
4195 * lq = SCSI LUN queue pointer.
4196 * TGT_LOCK must be already obtained.
4197 *
4198 * Output:
4199 * Releases TGT_LOCK upon exit.
4200 *
4201 * Context:
4202 * Kernel/Interrupt context.
4203 *
4204 * Note: This routine will always try to start I/O from visible HBA.
4205 */
4206void
4207qla2x00_next(scsi_qla_host_t *vis_ha)
4208{
4209 int rval;
4210 unsigned long flags;
4211 scsi_qla_host_t *dest_ha;
4212 fc_port_t *fcport;
4213 srb_t *sp, *sptemp;
4214 LIST_HEAD(local_sp_list);
4215
4216 dest_ha = NULL;
4217
4218 spin_lock_irqsave(&vis_ha->list_lock, flags);
4219 list_splice_init(&vis_ha->pending_queue, &local_sp_list);
4220 vis_ha->qthreads = 0;
4221 spin_unlock_irqrestore(&vis_ha->list_lock, flags);
4222
4223 list_for_each_entry_safe(sp, sptemp, &local_sp_list, list) {
4224 list_del_init(&sp->list);
4225 sp->state = SRB_NO_QUEUE_STATE;
4226
4227 fcport = sp->fclun->fcport;
4228 dest_ha = fcport->ha;
4229
4230 /* If device is dead then send request back to OS */
4231 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) {
4232 sp->cmd->result = DID_NO_CONNECT << 16;
4233 if (atomic_read(&dest_ha->loop_state) == LOOP_DOWN)
4234 sp->err_id = SRB_ERR_LOOP;
4235 else
4236 sp->err_id = SRB_ERR_PORT;
4237
4238 DEBUG3(printk("scsi(%ld): loop/port is down - pid=%ld, "
4239 "sp=%p err_id=%d loopid=0x%x queued to dest HBA "
4240 "scsi%ld.\n", dest_ha->host_no,
4241 sp->cmd->serial_number, sp, sp->err_id,
4242 fcport->loop_id, dest_ha->host_no));
4243 /*
4244 * Initiate a failover - done routine will initiate.
4245 */
4246 add_to_done_queue(vis_ha, sp);
4247
4248 continue;
4249 }
4250
4251 /*
4252 * SCSI Kluge: Whenever, we need to wait for an event such as
4253 * loop down (i.e. loop_down_timer ) or port down (i.e. LUN
4254 * request qeueue is suspended) then we will recycle new
4255 * commands back to the SCSI layer. We do this because this is
4256 * normally a temporary condition and we don't want the
4257 * mid-level scsi.c driver to get upset and start aborting
4258 * commands. The timeout value is extracted from the command
4259 * minus 1-second and put on a retry queue (watchdog). Once the
4260 * command timeout it is returned to the mid-level with a BUSY
4261 * status, so the mid-level will retry it. This process
4262 * continues until the LOOP DOWN time expires or the condition
4263 * goes away.
4264 */
4265 if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
4266 (atomic_read(&fcport->state) != FCS_ONLINE ||
4267 test_bit(ABORT_ISP_ACTIVE, &dest_ha->dpc_flags) ||
4268 atomic_read(&dest_ha->loop_state) != LOOP_READY)) {
4269
4270 DEBUG3(printk("scsi(%ld): pid=%ld port=0x%x state=%d "
4271 "loop state=%d, loop counter=0x%x "
4272 "dpc_flags=0x%lx\n", sp->cmd->serial_number,
4273 dest_ha->host_no, fcport->loop_id,
4274 atomic_read(&fcport->state),
4275 atomic_read(&dest_ha->loop_state),
4276 atomic_read(&dest_ha->loop_down_timer),
4277 dest_ha->dpc_flags));
4278
4279 qla2x00_extend_timeout(sp->cmd, EXTEND_CMD_TIMEOUT);
4280 add_to_retry_queue(vis_ha, sp);
4281
4282 continue;
4283 }
4284
4285 /*
4286 * If this request's lun is suspended then put the request on
4287 * the scsi_retry queue.
4288 */
4289 if (!(sp->flags & (SRB_IOCTL | SRB_TAPE)) &&
4290 sp->lun_queue->q_state == LUN_STATE_WAIT) {
4291 DEBUG3(printk("scsi(%ld): lun wait state - pid=%ld, "
4292 "opcode=%d, allowed=%d, retries=%d\n",
4293 dest_ha->host_no,
4294 sp->cmd->serial_number,
4295 sp->cmd->cmnd[0],
4296 sp->cmd->allowed,
4297 sp->cmd->retries));
4298
4299 add_to_scsi_retry_queue(vis_ha, sp);
4300
4301 continue;
4302 }
4303
4304 sp->lun_queue->io_cnt++;
4305
4306 rval = qla2x00_start_scsi(sp);
4307 if (rval != QLA_SUCCESS) {
4308 /* Place request back on top of device queue */
4309 /* add to the top of queue */
4310 add_to_pending_queue_head(vis_ha, sp);
4311
4312 sp->lun_queue->io_cnt--;
4313 }
4314 }
4315
4316 if (!IS_QLA2100(vis_ha) && !IS_QLA2200(vis_ha)) {
4317 /* Process response_queue if ZIO support is enabled*/
4318 qla2x00_process_response_queue_in_zio_mode(vis_ha);
4319
4320 }
4321}
4322
4323/* XXX(hch): crude hack to emulate a down_timeout() */ 2315/* XXX(hch): crude hack to emulate a down_timeout() */
4324int 2316int
4325qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout) 2317qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
@@ -4337,67 +2329,6 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
4337 return -ETIMEDOUT; 2329 return -ETIMEDOUT;
4338} 2330}
4339 2331
4340static void
4341qla2xxx_get_port_id(struct scsi_target *starget)
4342{
4343 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4344 scsi_qla_host_t *ha = to_qla_host(shost);
4345 struct fc_port *fc;
4346
4347 list_for_each_entry(fc, &ha->fcports, list) {
4348 if (fc->os_target_id == starget->id) {
4349 fc_starget_port_id(starget) = fc->d_id.b.domain << 16 |
4350 fc->d_id.b.area << 8 |
4351 fc->d_id.b.al_pa;
4352 return;
4353 }
4354 }
4355 fc_starget_port_id(starget) = -1;
4356}
4357
4358static void
4359qla2xxx_get_port_name(struct scsi_target *starget)
4360{
4361 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4362 scsi_qla_host_t *ha = to_qla_host(shost);
4363 struct fc_port *fc;
4364
4365 list_for_each_entry(fc, &ha->fcports, list) {
4366 if (fc->os_target_id == starget->id) {
4367 fc_starget_port_name(starget) =
4368 __be64_to_cpu(*(uint64_t *)fc->port_name);
4369 return;
4370 }
4371 }
4372 fc_starget_port_name(starget) = -1;
4373}
4374
4375static void
4376qla2xxx_get_node_name(struct scsi_target *starget)
4377{
4378 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4379 scsi_qla_host_t *ha = to_qla_host(shost);
4380 struct fc_port *fc;
4381
4382 list_for_each_entry(fc, &ha->fcports, list) {
4383 if (fc->os_target_id == starget->id) {
4384 fc_starget_node_name(starget) =
4385 __be64_to_cpu(*(uint64_t *)fc->node_name);
4386 return;
4387 }
4388 }
4389 fc_starget_node_name(starget) = -1;
4390}
4391
4392static struct fc_function_template qla2xxx_transport_functions = {
4393 .get_starget_port_id = qla2xxx_get_port_id,
4394 .show_starget_port_id = 1,
4395 .get_starget_port_name = qla2xxx_get_port_name,
4396 .show_starget_port_name = 1,
4397 .get_starget_node_name = qla2xxx_get_node_name,
4398 .show_starget_node_name = 1,
4399};
4400
4401/** 2332/**
4402 * qla2x00_module_init - Module initialization. 2333 * qla2x00_module_init - Module initialization.
4403 **/ 2334 **/
@@ -4419,8 +2350,7 @@ qla2x00_module_init(void)
4419#if DEBUG_QLA2100 2350#if DEBUG_QLA2100
4420 strcat(qla2x00_version_str, "-debug"); 2351 strcat(qla2x00_version_str, "-debug");
4421#endif 2352#endif
4422 2353 qla2xxx_transport_template = qla2x00_alloc_transport_tmpl();
4423 qla2xxx_transport_template = fc_attach_transport(&qla2xxx_transport_functions);
4424 if (!qla2xxx_transport_template) 2354 if (!qla2xxx_transport_template)
4425 return -ENODEV; 2355 return -ENODEV;
4426 2356
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 73ff88b834b7..98e68867261a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -19,9 +19,9 @@
19/* 19/*
20 * Driver version 20 * Driver version
21 */ 21 */
22#define QLA2XXX_VERSION "8.00.02b4-k" 22#define QLA2XXX_VERSION "8.00.02b5-k"
23 23
24#define QLA_DRIVER_MAJOR_VER 8 24#define QLA_DRIVER_MAJOR_VER 8
25#define QLA_DRIVER_MINOR_VER 0 25#define QLA_DRIVER_MINOR_VER 0
26#define QLA_DRIVER_PATCH_VER 2 26#define QLA_DRIVER_PATCH_VER 2
27#define QLA_DRIVER_BETA_VER 4 27#define QLA_DRIVER_BETA_VER 5
diff --git a/drivers/scsi/qlogicfc.c b/drivers/scsi/qlogicfc.c
index 24c1174b0c2f..ddf0f4277ee8 100644
--- a/drivers/scsi/qlogicfc.c
+++ b/drivers/scsi/qlogicfc.c
@@ -1261,7 +1261,7 @@ static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
1261 1261
1262 if (Cmnd->use_sg) { 1262 if (Cmnd->use_sg) {
1263 sg = (struct scatterlist *) Cmnd->request_buffer; 1263 sg = (struct scatterlist *) Cmnd->request_buffer;
1264 sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg, scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1264 sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
1265 cmd->segment_cnt = cpu_to_le16(sg_count); 1265 cmd->segment_cnt = cpu_to_le16(sg_count);
1266 ds = cmd->dataseg; 1266 ds = cmd->dataseg;
1267 /* fill in first two sg entries: */ 1267 /* fill in first two sg entries: */
@@ -1307,7 +1307,7 @@ static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
1307 dma_addr_t busaddr = pci_map_page(hostdata->pci_dev, 1307 dma_addr_t busaddr = pci_map_page(hostdata->pci_dev,
1308 page, offset, 1308 page, offset,
1309 Cmnd->request_bufflen, 1309 Cmnd->request_bufflen,
1310 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1310 Cmnd->sc_data_direction);
1311 Cmnd->SCp.dma_handle = busaddr; 1311 Cmnd->SCp.dma_handle = busaddr;
1312 1312
1313 cmd->dataseg[0].d_base = cpu_to_le32(pci64_dma_lo32(busaddr)); 1313 cmd->dataseg[0].d_base = cpu_to_le32(pci64_dma_lo32(busaddr));
@@ -1320,7 +1320,7 @@ static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
1320 cmd->segment_cnt = cpu_to_le16(1); /* Shouldn't this be 0? */ 1320 cmd->segment_cnt = cpu_to_le16(1); /* Shouldn't this be 0? */
1321 } 1321 }
1322 1322
1323 if (Cmnd->sc_data_direction == SCSI_DATA_WRITE) 1323 if (Cmnd->sc_data_direction == DMA_TO_DEVICE)
1324 cmd->control_flags = cpu_to_le16(CFLAG_WRITE); 1324 cmd->control_flags = cpu_to_le16(CFLAG_WRITE);
1325 else 1325 else
1326 cmd->control_flags = cpu_to_le16(CFLAG_READ); 1326 cmd->control_flags = cpu_to_le16(CFLAG_READ);
@@ -1405,13 +1405,13 @@ static void redo_port_db(unsigned long arg)
1405 pci_unmap_sg(hostdata->pci_dev, 1405 pci_unmap_sg(hostdata->pci_dev,
1406 (struct scatterlist *)Cmnd->buffer, 1406 (struct scatterlist *)Cmnd->buffer,
1407 Cmnd->use_sg, 1407 Cmnd->use_sg,
1408 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1408 Cmnd->sc_data_direction);
1409 else if (Cmnd->request_bufflen && 1409 else if (Cmnd->request_bufflen &&
1410 Cmnd->sc_data_direction != PCI_DMA_NONE) { 1410 Cmnd->sc_data_direction != PCI_DMA_NONE) {
1411 pci_unmap_page(hostdata->pci_dev, 1411 pci_unmap_page(hostdata->pci_dev,
1412 Cmnd->SCp.dma_handle, 1412 Cmnd->SCp.dma_handle,
1413 Cmnd->request_bufflen, 1413 Cmnd->request_bufflen,
1414 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1414 Cmnd->sc_data_direction);
1415 } 1415 }
1416 1416
1417 hostdata->handle_ptrs[i]->result = DID_SOFT_ERROR << 16; 1417 hostdata->handle_ptrs[i]->result = DID_SOFT_ERROR << 16;
@@ -1515,13 +1515,13 @@ void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
1515 pci_unmap_sg(hostdata->pci_dev, 1515 pci_unmap_sg(hostdata->pci_dev,
1516 (struct scatterlist *)Cmnd->buffer, 1516 (struct scatterlist *)Cmnd->buffer,
1517 Cmnd->use_sg, 1517 Cmnd->use_sg,
1518 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1518 Cmnd->sc_data_direction);
1519 else if (Cmnd->request_bufflen && 1519 else if (Cmnd->request_bufflen &&
1520 Cmnd->sc_data_direction != PCI_DMA_NONE) 1520 Cmnd->sc_data_direction != PCI_DMA_NONE)
1521 pci_unmap_page(hostdata->pci_dev, 1521 pci_unmap_page(hostdata->pci_dev,
1522 Cmnd->SCp.dma_handle, 1522 Cmnd->SCp.dma_handle,
1523 Cmnd->request_bufflen, 1523 Cmnd->request_bufflen,
1524 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1524 Cmnd->sc_data_direction);
1525 Cmnd->result = 0x0; 1525 Cmnd->result = 0x0;
1526 (*Cmnd->scsi_done) (Cmnd); 1526 (*Cmnd->scsi_done) (Cmnd);
1527 } else 1527 } else
@@ -1569,12 +1569,12 @@ void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
1569 if (Cmnd->use_sg) 1569 if (Cmnd->use_sg)
1570 pci_unmap_sg(hostdata->pci_dev, 1570 pci_unmap_sg(hostdata->pci_dev,
1571 (struct scatterlist *)Cmnd->buffer, Cmnd->use_sg, 1571 (struct scatterlist *)Cmnd->buffer, Cmnd->use_sg,
1572 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1572 Cmnd->sc_data_direction);
1573 else if (Cmnd->request_bufflen && Cmnd->sc_data_direction != PCI_DMA_NONE) 1573 else if (Cmnd->request_bufflen && Cmnd->sc_data_direction != PCI_DMA_NONE)
1574 pci_unmap_page(hostdata->pci_dev, 1574 pci_unmap_page(hostdata->pci_dev,
1575 Cmnd->SCp.dma_handle, 1575 Cmnd->SCp.dma_handle,
1576 Cmnd->request_bufflen, 1576 Cmnd->request_bufflen,
1577 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1577 Cmnd->sc_data_direction);
1578 1578
1579 /* 1579 /*
1580 * if any of the following are true we do not 1580 * if any of the following are true we do not
diff --git a/drivers/scsi/qlogicisp.c b/drivers/scsi/qlogicisp.c
index 71d597a9b0b0..6d29e1b864e2 100644
--- a/drivers/scsi/qlogicisp.c
+++ b/drivers/scsi/qlogicisp.c
@@ -877,7 +877,7 @@ static int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *))
877 ds = cmd->dataseg; 877 ds = cmd->dataseg;
878 878
879 sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg, 879 sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg,
880 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 880 Cmnd->sc_data_direction);
881 881
882 cmd->segment_cnt = cpu_to_le16(sg_count); 882 cmd->segment_cnt = cpu_to_le16(sg_count);
883 883
@@ -934,7 +934,7 @@ static int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *))
934 dma_addr = pci_map_single(hostdata->pci_dev, 934 dma_addr = pci_map_single(hostdata->pci_dev,
935 Cmnd->request_buffer, 935 Cmnd->request_buffer,
936 Cmnd->request_bufflen, 936 Cmnd->request_bufflen,
937 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 937 Cmnd->sc_data_direction);
938 Cmnd->SCp.ptr = (char *)(unsigned long) dma_addr; 938 Cmnd->SCp.ptr = (char *)(unsigned long) dma_addr;
939 939
940 cmd->dataseg[0].d_base = 940 cmd->dataseg[0].d_base =
@@ -1067,7 +1067,7 @@ void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
1067 pci_unmap_sg(hostdata->pci_dev, 1067 pci_unmap_sg(hostdata->pci_dev,
1068 (struct scatterlist *)Cmnd->buffer, 1068 (struct scatterlist *)Cmnd->buffer,
1069 Cmnd->use_sg, 1069 Cmnd->use_sg,
1070 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1070 Cmnd->sc_data_direction);
1071 else if (Cmnd->request_bufflen) 1071 else if (Cmnd->request_bufflen)
1072 pci_unmap_single(hostdata->pci_dev, 1072 pci_unmap_single(hostdata->pci_dev,
1073#ifdef CONFIG_QL_ISP_A64 1073#ifdef CONFIG_QL_ISP_A64
@@ -1076,7 +1076,7 @@ void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
1076 (u32)((long)Cmnd->SCp.ptr), 1076 (u32)((long)Cmnd->SCp.ptr),
1077#endif 1077#endif
1078 Cmnd->request_bufflen, 1078 Cmnd->request_bufflen,
1079 scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); 1079 Cmnd->sc_data_direction);
1080 1080
1081 isp_outw(out_ptr, host, MBOX5); 1081 isp_outw(out_ptr, host, MBOX5);
1082 (*Cmnd->scsi_done)(Cmnd); 1082 (*Cmnd->scsi_done)(Cmnd);
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index e2360c26ef01..5ee5d80a9931 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -45,21 +45,6 @@ struct scsi_device;
45struct scsi_target; 45struct scsi_target;
46struct scatterlist; 46struct scatterlist;
47 47
48/*
49 * Legacy dma direction interfaces.
50 *
51 * This assumes the pci/sbus dma mapping flags have the same numercial
52 * values as the generic dma-mapping ones. Currently they have but there's
53 * no way to check. Better don't use these interfaces!
54 */
55#define SCSI_DATA_UNKNOWN (DMA_BIDIRECTIONAL)
56#define SCSI_DATA_WRITE (DMA_TO_DEVICE)
57#define SCSI_DATA_READ (DMA_FROM_DEVICE)
58#define SCSI_DATA_NONE (DMA_NONE)
59
60#define scsi_to_pci_dma_dir(scsi_dir) ((int)(scsi_dir))
61#define scsi_to_sbus_dma_dir(scsi_dir) ((int)(scsi_dir))
62
63/* obsolete typedef junk. */ 48/* obsolete typedef junk. */
64#include "scsi_typedefs.h" 49#include "scsi_typedefs.h"
65 50
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 203a0812508a..1a135f38e78d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -476,7 +476,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
476 **/ 476 **/
477static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) 477static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
478{ 478{
479 struct Scsi_Host *host = scmd->device->host; 479 struct scsi_device *sdev = scmd->device;
480 struct Scsi_Host *shost = sdev->host;
480 DECLARE_MUTEX_LOCKED(sem); 481 DECLARE_MUTEX_LOCKED(sem);
481 unsigned long flags; 482 unsigned long flags;
482 int rtn = SUCCESS; 483 int rtn = SUCCESS;
@@ -487,27 +488,27 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
487 */ 488 */
488 scmd->owner = SCSI_OWNER_LOWLEVEL; 489 scmd->owner = SCSI_OWNER_LOWLEVEL;
489 490
490 if (scmd->device->scsi_level <= SCSI_2) 491 if (sdev->scsi_level <= SCSI_2)
491 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 492 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
492 (scmd->device->lun << 5 & 0xe0); 493 (sdev->lun << 5 & 0xe0);
493 494
494 scsi_add_timer(scmd, timeout, scsi_eh_times_out); 495 scsi_add_timer(scmd, timeout, scsi_eh_times_out);
495 496
496 /* 497 /*
497 * set up the semaphore so we wait for the command to complete. 498 * set up the semaphore so we wait for the command to complete.
498 */ 499 */
499 scmd->device->host->eh_action = &sem; 500 shost->eh_action = &sem;
500 scmd->request->rq_status = RQ_SCSI_BUSY; 501 scmd->request->rq_status = RQ_SCSI_BUSY;
501 502
502 spin_lock_irqsave(scmd->device->host->host_lock, flags); 503 spin_lock_irqsave(shost->host_lock, flags);
503 scsi_log_send(scmd); 504 scsi_log_send(scmd);
504 host->hostt->queuecommand(scmd, scsi_eh_done); 505 shost->hostt->queuecommand(scmd, scsi_eh_done);
505 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 506 spin_unlock_irqrestore(shost->host_lock, flags);
506 507
507 down(&sem); 508 down(&sem);
508 scsi_log_completion(scmd, SUCCESS); 509 scsi_log_completion(scmd, SUCCESS);
509 510
510 scmd->device->host->eh_action = NULL; 511 shost->eh_action = NULL;
511 512
512 /* 513 /*
513 * see if timeout. if so, tell the host to forget about it. 514 * see if timeout. if so, tell the host to forget about it.
@@ -527,10 +528,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
527 * abort a timed out command or not. not sure how 528 * abort a timed out command or not. not sure how
528 * we should treat them differently anyways. 529 * we should treat them differently anyways.
529 */ 530 */
530 spin_lock_irqsave(scmd->device->host->host_lock, flags); 531 spin_lock_irqsave(shost->host_lock, flags);
531 if (scmd->device->host->hostt->eh_abort_handler) 532 if (shost->hostt->eh_abort_handler)
532 scmd->device->host->hostt->eh_abort_handler(scmd); 533 shost->hostt->eh_abort_handler(scmd);
533 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 534 spin_unlock_irqrestore(shost->host_lock, flags);
534 535
535 scmd->request->rq_status = RQ_SCSI_DONE; 536 scmd->request->rq_status = RQ_SCSI_DONE;
536 scmd->owner = SCSI_OWNER_ERROR_HANDLER; 537 scmd->owner = SCSI_OWNER_ERROR_HANDLER;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 619d3fb7a2f0..d18da21c9c57 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -358,9 +358,9 @@ void scsi_device_unbusy(struct scsi_device *sdev)
358 shost->host_failed)) 358 shost->host_failed))
359 scsi_eh_wakeup(shost); 359 scsi_eh_wakeup(shost);
360 spin_unlock(shost->host_lock); 360 spin_unlock(shost->host_lock);
361 spin_lock(&sdev->sdev_lock); 361 spin_lock(sdev->request_queue->queue_lock);
362 sdev->device_busy--; 362 sdev->device_busy--;
363 spin_unlock_irqrestore(&sdev->sdev_lock, flags); 363 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
364} 364}
365 365
366/* 366/*
@@ -1423,7 +1423,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1423 struct Scsi_Host *shost = sdev->host; 1423 struct Scsi_Host *shost = sdev->host;
1424 struct request_queue *q; 1424 struct request_queue *q;
1425 1425
1426 q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock); 1426 q = blk_init_queue(scsi_request_fn, NULL);
1427 if (!q) 1427 if (!q)
1428 return NULL; 1428 return NULL;
1429 1429
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a8a37a338c02..287d197a7c17 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -249,7 +249,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
249 */ 249 */
250 sdev->borken = 1; 250 sdev->borken = 1;
251 251
252 spin_lock_init(&sdev->sdev_lock);
253 sdev->request_queue = scsi_alloc_queue(sdev); 252 sdev->request_queue = scsi_alloc_queue(sdev);
254 if (!sdev->request_queue) { 253 if (!sdev->request_queue) {
255 /* release fn is set up in scsi_sysfs_device_initialise, so 254 /* release fn is set up in scsi_sysfs_device_initialise, so
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 134d3a3e4222..e75ee4671ee3 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -171,6 +171,9 @@ void scsi_device_dev_release(struct device *dev)
171 if (sdev->request_queue) { 171 if (sdev->request_queue) {
172 sdev->request_queue->queuedata = NULL; 172 sdev->request_queue->queuedata = NULL;
173 scsi_free_queue(sdev->request_queue); 173 scsi_free_queue(sdev->request_queue);
174 /* temporary expedient, try to catch use of queue lock
175 * after free of sdev */
176 sdev->request_queue = NULL;
174 } 177 }
175 178
176 scsi_target_reap(scsi_target(sdev)); 179 scsi_target_reap(scsi_target(sdev));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cf6b1f0fb124..7936aafc3d05 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -18,8 +18,8 @@
18 * 18 *
19 */ 19 */
20 20
21static int sg_version_num = 30532; /* 2 digits for each component */ 21static int sg_version_num = 30533; /* 2 digits for each component */
22#define SG_VERSION_STR "3.5.32" 22#define SG_VERSION_STR "3.5.33"
23 23
24/* 24/*
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
@@ -61,7 +61,7 @@ static int sg_version_num = 30532; /* 2 digits for each component */
61 61
62#ifdef CONFIG_SCSI_PROC_FS 62#ifdef CONFIG_SCSI_PROC_FS
63#include <linux/proc_fs.h> 63#include <linux/proc_fs.h>
64static char *sg_version_date = "20050117"; 64static char *sg_version_date = "20050328";
65 65
66static int sg_proc_init(void); 66static int sg_proc_init(void);
67static void sg_proc_cleanup(void); 67static void sg_proc_cleanup(void);
@@ -331,14 +331,13 @@ sg_release(struct inode *inode, struct file *filp)
331static ssize_t 331static ssize_t
332sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) 332sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
333{ 333{
334 int res;
335 Sg_device *sdp; 334 Sg_device *sdp;
336 Sg_fd *sfp; 335 Sg_fd *sfp;
337 Sg_request *srp; 336 Sg_request *srp;
338 int req_pack_id = -1; 337 int req_pack_id = -1;
339 struct sg_header old_hdr;
340 sg_io_hdr_t new_hdr;
341 sg_io_hdr_t *hp; 338 sg_io_hdr_t *hp;
339 struct sg_header *old_hdr = NULL;
340 int retval = 0;
342 341
343 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 342 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
344 return -ENXIO; 343 return -ENXIO;
@@ -347,98 +346,138 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
347 if (!access_ok(VERIFY_WRITE, buf, count)) 346 if (!access_ok(VERIFY_WRITE, buf, count))
348 return -EFAULT; 347 return -EFAULT;
349 if (sfp->force_packid && (count >= SZ_SG_HEADER)) { 348 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
350 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) 349 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
351 return -EFAULT; 350 if (!old_hdr)
352 if (old_hdr.reply_len < 0) { 351 return -ENOMEM;
352 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
353 retval = -EFAULT;
354 goto free_old_hdr;
355 }
356 if (old_hdr->reply_len < 0) {
353 if (count >= SZ_SG_IO_HDR) { 357 if (count >= SZ_SG_IO_HDR) {
354 if (__copy_from_user 358 sg_io_hdr_t *new_hdr;
355 (&new_hdr, buf, SZ_SG_IO_HDR)) 359 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
356 return -EFAULT; 360 if (!new_hdr) {
357 req_pack_id = new_hdr.pack_id; 361 retval = -ENOMEM;
362 goto free_old_hdr;
363 }
364 retval =__copy_from_user
365 (new_hdr, buf, SZ_SG_IO_HDR);
366 req_pack_id = new_hdr->pack_id;
367 kfree(new_hdr);
368 if (retval) {
369 retval = -EFAULT;
370 goto free_old_hdr;
371 }
358 } 372 }
359 } else 373 } else
360 req_pack_id = old_hdr.pack_id; 374 req_pack_id = old_hdr->pack_id;
361 } 375 }
362 srp = sg_get_rq_mark(sfp, req_pack_id); 376 srp = sg_get_rq_mark(sfp, req_pack_id);
363 if (!srp) { /* now wait on packet to arrive */ 377 if (!srp) { /* now wait on packet to arrive */
364 if (sdp->detached) 378 if (sdp->detached) {
365 return -ENODEV; 379 retval = -ENODEV;
366 if (filp->f_flags & O_NONBLOCK) 380 goto free_old_hdr;
367 return -EAGAIN; 381 }
382 if (filp->f_flags & O_NONBLOCK) {
383 retval = -EAGAIN;
384 goto free_old_hdr;
385 }
368 while (1) { 386 while (1) {
369 res = 0; /* following is a macro that beats race condition */ 387 retval = 0; /* following macro beats race condition */
370 __wait_event_interruptible(sfp->read_wait, 388 __wait_event_interruptible(sfp->read_wait,
371 (sdp->detached || (srp = sg_get_rq_mark(sfp, req_pack_id))), 389 (sdp->detached ||
372 res); 390 (srp = sg_get_rq_mark(sfp, req_pack_id))),
373 if (sdp->detached) 391 retval);
374 return -ENODEV; 392 if (sdp->detached) {
375 if (0 == res) 393 retval = -ENODEV;
394 goto free_old_hdr;
395 }
396 if (0 == retval)
376 break; 397 break;
377 return res; /* -ERESTARTSYS because signal hit process */ 398
399 /* -ERESTARTSYS as signal hit process */
400 goto free_old_hdr;
378 } 401 }
379 } 402 }
380 if (srp->header.interface_id != '\0') 403 if (srp->header.interface_id != '\0') {
381 return sg_new_read(sfp, buf, count, srp); 404 retval = sg_new_read(sfp, buf, count, srp);
405 goto free_old_hdr;
406 }
382 407
383 hp = &srp->header; 408 hp = &srp->header;
384 memset(&old_hdr, 0, SZ_SG_HEADER); 409 if (old_hdr == NULL) {
385 old_hdr.reply_len = (int) hp->timeout; 410 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
386 old_hdr.pack_len = old_hdr.reply_len; /* very old, strange behaviour */ 411 if (! old_hdr) {
387 old_hdr.pack_id = hp->pack_id; 412 retval = -ENOMEM;
388 old_hdr.twelve_byte = 413 goto free_old_hdr;
414 }
415 }
416 memset(old_hdr, 0, SZ_SG_HEADER);
417 old_hdr->reply_len = (int) hp->timeout;
418 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
419 old_hdr->pack_id = hp->pack_id;
420 old_hdr->twelve_byte =
389 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; 421 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
390 old_hdr.target_status = hp->masked_status; 422 old_hdr->target_status = hp->masked_status;
391 old_hdr.host_status = hp->host_status; 423 old_hdr->host_status = hp->host_status;
392 old_hdr.driver_status = hp->driver_status; 424 old_hdr->driver_status = hp->driver_status;
393 if ((CHECK_CONDITION & hp->masked_status) || 425 if ((CHECK_CONDITION & hp->masked_status) ||
394 (DRIVER_SENSE & hp->driver_status)) 426 (DRIVER_SENSE & hp->driver_status))
395 memcpy(old_hdr.sense_buffer, srp->sense_b, 427 memcpy(old_hdr->sense_buffer, srp->sense_b,
396 sizeof (old_hdr.sense_buffer)); 428 sizeof (old_hdr->sense_buffer));
397 switch (hp->host_status) { 429 switch (hp->host_status) {
398 /* This setup of 'result' is for backward compatibility and is best 430 /* This setup of 'result' is for backward compatibility and is best
399 ignored by the user who should use target, host + driver status */ 431 ignored by the user who should use target, host + driver status */
400 case DID_OK: 432 case DID_OK:
401 case DID_PASSTHROUGH: 433 case DID_PASSTHROUGH:
402 case DID_SOFT_ERROR: 434 case DID_SOFT_ERROR:
403 old_hdr.result = 0; 435 old_hdr->result = 0;
404 break; 436 break;
405 case DID_NO_CONNECT: 437 case DID_NO_CONNECT:
406 case DID_BUS_BUSY: 438 case DID_BUS_BUSY:
407 case DID_TIME_OUT: 439 case DID_TIME_OUT:
408 old_hdr.result = EBUSY; 440 old_hdr->result = EBUSY;
409 break; 441 break;
410 case DID_BAD_TARGET: 442 case DID_BAD_TARGET:
411 case DID_ABORT: 443 case DID_ABORT:
412 case DID_PARITY: 444 case DID_PARITY:
413 case DID_RESET: 445 case DID_RESET:
414 case DID_BAD_INTR: 446 case DID_BAD_INTR:
415 old_hdr.result = EIO; 447 old_hdr->result = EIO;
416 break; 448 break;
417 case DID_ERROR: 449 case DID_ERROR:
418 old_hdr.result = (srp->sense_b[0] == 0 && 450 old_hdr->result = (srp->sense_b[0] == 0 &&
419 hp->masked_status == GOOD) ? 0 : EIO; 451 hp->masked_status == GOOD) ? 0 : EIO;
420 break; 452 break;
421 default: 453 default:
422 old_hdr.result = EIO; 454 old_hdr->result = EIO;
423 break; 455 break;
424 } 456 }
425 457
426 /* Now copy the result back to the user buffer. */ 458 /* Now copy the result back to the user buffer. */
427 if (count >= SZ_SG_HEADER) { 459 if (count >= SZ_SG_HEADER) {
428 if (__copy_to_user(buf, &old_hdr, SZ_SG_HEADER)) 460 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
429 return -EFAULT; 461 retval = -EFAULT;
462 goto free_old_hdr;
463 }
430 buf += SZ_SG_HEADER; 464 buf += SZ_SG_HEADER;
431 if (count > old_hdr.reply_len) 465 if (count > old_hdr->reply_len)
432 count = old_hdr.reply_len; 466 count = old_hdr->reply_len;
433 if (count > SZ_SG_HEADER) { 467 if (count > SZ_SG_HEADER) {
434 if ((res = 468 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
435 sg_read_oxfer(srp, buf, count - SZ_SG_HEADER))) 469 retval = -EFAULT;
436 return -EFAULT; 470 goto free_old_hdr;
471 }
437 } 472 }
438 } else 473 } else
439 count = (old_hdr.result == 0) ? 0 : -EIO; 474 count = (old_hdr->result == 0) ? 0 : -EIO;
440 sg_finish_rem_req(srp); 475 sg_finish_rem_req(srp);
441 return count; 476 retval = count;
477free_old_hdr:
478 if (old_hdr)
479 kfree(old_hdr);
480 return retval;
442} 481}
443 482
444static ssize_t 483static ssize_t
@@ -708,16 +747,16 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
708 switch (hp->dxfer_direction) { 747 switch (hp->dxfer_direction) {
709 case SG_DXFER_TO_FROM_DEV: 748 case SG_DXFER_TO_FROM_DEV:
710 case SG_DXFER_FROM_DEV: 749 case SG_DXFER_FROM_DEV:
711 SRpnt->sr_data_direction = SCSI_DATA_READ; 750 SRpnt->sr_data_direction = DMA_FROM_DEVICE;
712 break; 751 break;
713 case SG_DXFER_TO_DEV: 752 case SG_DXFER_TO_DEV:
714 SRpnt->sr_data_direction = SCSI_DATA_WRITE; 753 SRpnt->sr_data_direction = DMA_TO_DEVICE;
715 break; 754 break;
716 case SG_DXFER_UNKNOWN: 755 case SG_DXFER_UNKNOWN:
717 SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN; 756 SRpnt->sr_data_direction = DMA_BIDIRECTIONAL;
718 break; 757 break;
719 default: 758 default:
720 SRpnt->sr_data_direction = SCSI_DATA_NONE; 759 SRpnt->sr_data_direction = DMA_NONE;
721 break; 760 break;
722 } 761 }
723 SRpnt->upper_private_data = srp; 762 SRpnt->upper_private_data = srp;
@@ -725,7 +764,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
725 srp->data.sglist_len = 0; 764 srp->data.sglist_len = 0;
726 srp->data.bufflen = 0; 765 srp->data.bufflen = 0;
727 srp->data.buffer = NULL; 766 srp->data.buffer = NULL;
728 hp->duration = jiffies; /* unit jiffies now, millisecs after done */ 767 hp->duration = jiffies_to_msecs(jiffies);
729/* Now send everything of to mid-level. The next time we hear about this 768/* Now send everything of to mid-level. The next time we hear about this
730 packet is when sg_cmd_done() is called (i.e. a callback). */ 769 packet is when sg_cmd_done() is called (i.e. a callback). */
731 scsi_do_req(SRpnt, (void *) cmnd, 770 scsi_do_req(SRpnt, (void *) cmnd,
@@ -938,8 +977,13 @@ sg_ioctl(struct inode *inode, struct file *filp,
938 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) 977 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
939 return -EFAULT; 978 return -EFAULT;
940 else { 979 else {
941 sg_req_info_t rinfo[SG_MAX_QUEUE]; 980 sg_req_info_t *rinfo;
942 Sg_request *srp; 981 unsigned int ms;
982
983 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
984 GFP_KERNEL);
985 if (!rinfo)
986 return -ENOMEM;
943 read_lock_irqsave(&sfp->rq_list_lock, iflags); 987 read_lock_irqsave(&sfp->rq_list_lock, iflags);
944 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; 988 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
945 ++val, srp = srp ? srp->nextrp : srp) { 989 ++val, srp = srp ? srp->nextrp : srp) {
@@ -950,19 +994,30 @@ sg_ioctl(struct inode *inode, struct file *filp,
950 srp->header.masked_status & 994 srp->header.masked_status &
951 srp->header.host_status & 995 srp->header.host_status &
952 srp->header.driver_status; 996 srp->header.driver_status;
953 rinfo[val].duration = 997 if (srp->done)
954 srp->done ? srp->header.duration : 998 rinfo[val].duration =
955 jiffies_to_msecs( 999 srp->header.duration;
956 jiffies - srp->header.duration); 1000 else {
1001 ms = jiffies_to_msecs(jiffies);
1002 rinfo[val].duration =
1003 (ms > srp->header.duration) ?
1004 (ms - srp->header.duration) : 0;
1005 }
957 rinfo[val].orphan = srp->orphan; 1006 rinfo[val].orphan = srp->orphan;
958 rinfo[val].sg_io_owned = srp->sg_io_owned; 1007 rinfo[val].sg_io_owned =
959 rinfo[val].pack_id = srp->header.pack_id; 1008 srp->sg_io_owned;
960 rinfo[val].usr_ptr = srp->header.usr_ptr; 1009 rinfo[val].pack_id =
1010 srp->header.pack_id;
1011 rinfo[val].usr_ptr =
1012 srp->header.usr_ptr;
961 } 1013 }
962 } 1014 }
963 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1015 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
964 return (__copy_to_user(p, rinfo, 1016 result = __copy_to_user(p, rinfo,
965 SZ_SG_REQ_INFO * SG_MAX_QUEUE) ? -EFAULT : 0); 1017 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1018 result = result ? -EFAULT : 0;
1019 kfree(rinfo);
1020 return result;
966 } 1021 }
967 case SG_EMULATED_HOST: 1022 case SG_EMULATED_HOST:
968 if (sdp->detached) 1023 if (sdp->detached)
@@ -1209,11 +1264,12 @@ static int
1209sg_mmap(struct file *filp, struct vm_area_struct *vma) 1264sg_mmap(struct file *filp, struct vm_area_struct *vma)
1210{ 1265{
1211 Sg_fd *sfp; 1266 Sg_fd *sfp;
1212 unsigned long req_sz = vma->vm_end - vma->vm_start; 1267 unsigned long req_sz;
1213 Sg_scatter_hold *rsv_schp; 1268 Sg_scatter_hold *rsv_schp;
1214 1269
1215 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1270 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1216 return -ENXIO; 1271 return -ENXIO;
1272 req_sz = vma->vm_end - vma->vm_start;
1217 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n", 1273 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1218 (void *) vma->vm_start, (int) req_sz)); 1274 (void *) vma->vm_start, (int) req_sz));
1219 if (vma->vm_pgoff) 1275 if (vma->vm_pgoff)
@@ -1260,6 +1316,7 @@ sg_cmd_done(Scsi_Cmnd * SCpnt)
1260 Sg_fd *sfp; 1316 Sg_fd *sfp;
1261 Sg_request *srp = NULL; 1317 Sg_request *srp = NULL;
1262 unsigned long iflags; 1318 unsigned long iflags;
1319 unsigned int ms;
1263 1320
1264 if (SCpnt && (SRpnt = SCpnt->sc_request)) 1321 if (SCpnt && (SRpnt = SCpnt->sc_request))
1265 srp = (Sg_request *) SRpnt->upper_private_data; 1322 srp = (Sg_request *) SRpnt->upper_private_data;
@@ -1296,9 +1353,9 @@ sg_cmd_done(Scsi_Cmnd * SCpnt)
1296 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1353 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1297 sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result)); 1354 sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result));
1298 srp->header.resid = SCpnt->resid; 1355 srp->header.resid = SCpnt->resid;
1299 /* N.B. unit of duration changes here from jiffies to millisecs */ 1356 ms = jiffies_to_msecs(jiffies);
1300 srp->header.duration = 1357 srp->header.duration = (ms > srp->header.duration) ?
1301 jiffies_to_msecs(jiffies - srp->header.duration); 1358 (ms - srp->header.duration) : 0;
1302 if (0 != SRpnt->sr_result) { 1359 if (0 != SRpnt->sr_result) {
1303 struct scsi_sense_hdr sshdr; 1360 struct scsi_sense_hdr sshdr;
1304 1361
@@ -2396,7 +2453,7 @@ sg_add_request(Sg_fd * sfp)
2396 } 2453 }
2397 if (resp) { 2454 if (resp) {
2398 resp->nextrp = NULL; 2455 resp->nextrp = NULL;
2399 resp->header.duration = jiffies; 2456 resp->header.duration = jiffies_to_msecs(jiffies);
2400 resp->my_cmdp = NULL; 2457 resp->my_cmdp = NULL;
2401 } 2458 }
2402 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2459 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2991,6 +3048,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2991 Sg_fd *fp; 3048 Sg_fd *fp;
2992 const sg_io_hdr_t *hp; 3049 const sg_io_hdr_t *hp;
2993 const char * cp; 3050 const char * cp;
3051 unsigned int ms;
2994 3052
2995 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) { 3053 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
2996 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " 3054 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
@@ -3029,10 +3087,13 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
3029 srp->header.pack_id, blen); 3087 srp->header.pack_id, blen);
3030 if (srp->done) 3088 if (srp->done)
3031 seq_printf(s, " dur=%d", hp->duration); 3089 seq_printf(s, " dur=%d", hp->duration);
3032 else 3090 else {
3091 ms = jiffies_to_msecs(jiffies);
3033 seq_printf(s, " t_o/elap=%d/%d", 3092 seq_printf(s, " t_o/elap=%d/%d",
3034 new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout), 3093 (new_interface ? hp->timeout :
3035 jiffies_to_msecs(hp->duration ? (jiffies - hp->duration) : 0)); 3094 jiffies_to_msecs(fp->timeout)),
3095 (ms > hp->duration ? ms - hp->duration : 0));
3096 }
3036 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, 3097 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
3037 (int) srp->data.cmd_opcode); 3098 (int) srp->data.cmd_opcode);
3038 } 3099 }
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 63bf2aecbc57..9171788348c4 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -120,11 +120,10 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
120 } 120 }
121 121
122 /* Fill in the three required pieces of hostdata */ 122 /* Fill in the three required pieces of hostdata */
123 hostdata->base = base_addr; 123 hostdata->base = ioport_map(base_addr, 64);
124 hostdata->differential = differential; 124 hostdata->differential = differential;
125 hostdata->clock = clock; 125 hostdata->clock = clock;
126 hostdata->chip710 = 1; 126 hostdata->chip710 = 1;
127 NCR_700_set_io_mapped(hostdata);
128 127
129 /* and register the chip */ 128 /* and register the chip */
130 if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev)) 129 if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev))
@@ -133,6 +132,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
133 goto out_release; 132 goto out_release;
134 } 133 }
135 host->this_id = scsi_id; 134 host->this_id = scsi_id;
135 host->base = base_addr;
136 host->irq = irq; 136 host->irq = irq;
137 if (request_irq(irq, NCR_700_intr, SA_SHIRQ, "sim710", host)) { 137 if (request_irq(irq, NCR_700_intr, SA_SHIRQ, "sim710", host)) {
138 printk(KERN_ERR "sim710: request_irq failed\n"); 138 printk(KERN_ERR "sim710: request_irq failed\n");
@@ -164,6 +164,7 @@ sim710_device_remove(struct device *dev)
164 NCR_700_release(host); 164 NCR_700_release(host);
165 kfree(hostdata); 165 kfree(hostdata);
166 free_irq(host->irq, host); 166 free_irq(host->irq, host);
167 release_region(host->base, 64);
167 return 0; 168 return 0;
168} 169}
169 170